CombinedText stringlengths 4 3.42M |
|---|
fn main() {
}
implement pwd
use std::env;
fn main() {
println!("{}", env::current_dir().unwrap().display())
}
|
extern crate lapack;
use self::lapack::fortran::*;
pub trait LapackBinding: Sized {
fn _syev(jobz: u8,
uplo: u8,
n: i32,
a: &mut Vec<Self>,
lda: i32,
w: &mut Vec<Self>,
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32);
fn _getrf(m: i32, n: i32, a: &mut Vec<Self>, lda: i32, ipiv: &mut [i32], info: &mut i32);
fn _getri(n: i32,
a: &mut Vec<Self>,
lda: i32,
ipiv: &[i32],
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32);
fn _lange(norm: u8, m: i32, n: i32, a: &Vec<Self>, lda: i32, work: &mut Vec<Self>) -> Self;
}
impl LapackBinding for f64 {
fn _syev(jobz: u8,
uplo: u8,
n: i32,
a: &mut Vec<Self>,
lda: i32,
w: &mut Vec<Self>,
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32) {
dsyev(jobz, uplo, n, a, lda, w, work, lwork, info);
}
fn _getrf(m: i32, n: i32, a: &mut Vec<Self>, lda: i32, ipiv: &mut [i32], info: &mut i32) {
dgetrf(m, n, a, lda, ipiv, info);
}
fn _getri(n: i32,
a: &mut Vec<Self>,
lda: i32,
ipiv: &[i32],
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32) {
dgetri(n, a, lda, ipiv, work, lwork, info);
}
fn _lange(norm: u8, m: i32, n: i32, a: &Vec<Self>, lda: i32, work: &mut Vec<Self>) -> Self {
dlange(norm, m, n, a, lda, work)
}
}
impl LapackBinding for f32 {
fn _syev(jobz: u8,
uplo: u8,
n: i32,
a: &mut Vec<Self>,
lda: i32,
w: &mut Vec<Self>,
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32) {
ssyev(jobz, uplo, n, a, lda, w, work, lwork, info);
}
fn _getrf(m: i32, n: i32, a: &mut Vec<Self>, lda: i32, ipiv: &mut [i32], info: &mut i32) {
sgetrf(m, n, a, lda, ipiv, info);
}
fn _getri(n: i32,
a: &mut Vec<Self>,
lda: i32,
ipiv: &[i32],
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32) {
sgetri(n, a, lda, ipiv, work, lwork, info);
}
fn _lange(norm: u8, m: i32, n: i32, a: &Vec<Self>, lda: i32, work: &mut Vec<Self>) -> Self {
slange(norm, m, n, a, lda, work)
}
}
Add _gesvd
extern crate lapack;
use self::lapack::fortran::*;
pub trait LapackBinding: Sized {
fn _syev(jobz: u8,
uplo: u8,
n: i32,
a: &mut Vec<Self>,
lda: i32,
w: &mut Vec<Self>,
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32);
fn _getrf(m: i32, n: i32, a: &mut Vec<Self>, lda: i32, ipiv: &mut [i32], info: &mut i32);
fn _getri(n: i32,
a: &mut Vec<Self>,
lda: i32,
ipiv: &[i32],
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32);
fn _lange(norm: u8, m: i32, n: i32, a: &Vec<Self>, lda: i32, work: &mut Vec<Self>) -> Self;
fn _gesvd(jobu: u8,
jobvt: u8,
m: i32,
n: i32,
a: &mut [Self],
lda: i32,
s: &mut [Self],
u: &mut [Self],
ldu: i32,
vt: &mut [Self],
ldvt: i32,
work: &mut [Self],
lwork: i32,
info: &mut i32);
}
impl LapackBinding for f64 {
fn _syev(jobz: u8,
uplo: u8,
n: i32,
a: &mut Vec<Self>,
lda: i32,
w: &mut Vec<Self>,
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32) {
dsyev(jobz, uplo, n, a, lda, w, work, lwork, info);
}
fn _getrf(m: i32, n: i32, a: &mut Vec<Self>, lda: i32, ipiv: &mut [i32], info: &mut i32) {
dgetrf(m, n, a, lda, ipiv, info);
}
fn _getri(n: i32,
a: &mut Vec<Self>,
lda: i32,
ipiv: &[i32],
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32) {
dgetri(n, a, lda, ipiv, work, lwork, info);
}
fn _lange(norm: u8, m: i32, n: i32, a: &Vec<Self>, lda: i32, work: &mut Vec<Self>) -> Self {
dlange(norm, m, n, a, lda, work)
}
fn _gesvd(jobu: u8,
jobvt: u8,
m: i32,
n: i32,
a: &mut [Self],
lda: i32,
s: &mut [Self],
u: &mut [Self],
ldu: i32,
vt: &mut [Self],
ldvt: i32,
work: &mut [Self],
lwork: i32,
info: &mut i32) {
dgesvd(jobu,
jobvt,
m,
n,
a,
lda,
s,
u,
ldu,
vt,
ldvt,
work,
lwork,
info);
}
}
impl LapackBinding for f32 {
fn _syev(jobz: u8,
uplo: u8,
n: i32,
a: &mut Vec<Self>,
lda: i32,
w: &mut Vec<Self>,
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32) {
ssyev(jobz, uplo, n, a, lda, w, work, lwork, info);
}
fn _getrf(m: i32, n: i32, a: &mut Vec<Self>, lda: i32, ipiv: &mut [i32], info: &mut i32) {
sgetrf(m, n, a, lda, ipiv, info);
}
fn _getri(n: i32,
a: &mut Vec<Self>,
lda: i32,
ipiv: &[i32],
work: &mut Vec<Self>,
lwork: i32,
info: &mut i32) {
sgetri(n, a, lda, ipiv, work, lwork, info);
}
fn _lange(norm: u8, m: i32, n: i32, a: &Vec<Self>, lda: i32, work: &mut Vec<Self>) -> Self {
slange(norm, m, n, a, lda, work)
}
fn _gesvd(jobu: u8,
jobvt: u8,
m: i32,
n: i32,
a: &mut [Self],
lda: i32,
s: &mut [Self],
u: &mut [Self],
ldu: i32,
vt: &mut [Self],
ldvt: i32,
work: &mut [Self],
lwork: i32,
info: &mut i32) {
sgesvd(jobu,
jobvt,
m,
n,
a,
lda,
s,
u,
ldu,
vt,
ldvt,
work,
lwork,
info);
}
}
|
use std::collections::HashMap;
use super::metric::{Metric, MetricKind};
use clock_ticks;
/// Buckets are the primary internal storage type.
///
/// Each bucket contains a set of hashmaps containing
/// each set of metrics received by clients.
///
pub struct Buckets {
counters: HashMap<String, f64>,
gauges: HashMap<String, f64>,
timers: HashMap<String, Vec<f64>>,
server_start_time: u64,
last_message: u64,
bad_messages: usize,
total_messages: usize,
}
impl Buckets {
/// Create a new Buckets
///
/// ```
/// let bucket = Buckets::new();
/// ```
pub fn new() -> Buckets {
Buckets {
counters: HashMap::new(),
gauges: HashMap::new(),
timers: HashMap::new(),
bad_messages: 0,
total_messages: 0,
last_message: clock_ticks::precise_time_ms(),
server_start_time: clock_ticks::precise_time_ms(),
}
}
/// Adds a metric to the bucket storage.
///
/// # Examples
///
/// ```
/// use super::metric;
/// use std::str::FromStr;
///
/// let metric = metric::Metric::FromStr("foo:1|c");
/// let mut bucket = Buckets::new();
/// bucket.add(metric);
/// ```
pub fn add(&mut self, value: &Metric) {
let name = value.name.to_owned();
match value.kind {
MetricKind::Counter(rate) => {
let counter = self.counters.entry(name).or_insert(0.0);
*counter = *counter + value.value * (1.0 / rate);
},
MetricKind::Gauge => {
self.gauges.insert(name, value.value);
},
MetricKind::Timer => {
let slot = self.timers.entry(name).or_insert(Vec::new());
slot.push(value.value);
},
}
self.last_message = clock_ticks::precise_time_ms();
}
/// Increment the bad message count by one.
///
pub fn add_bad_message(&mut self) {
self.bad_messages += 1
}
/// Get the count of bad messages
pub fn bad_messages(&self) -> usize {
self.bad_messages
}
}
//
// Tests
//
#[cfg(test)]
mod test {
use super::*;
use super::super::metric::{Metric, MetricKind};
#[test]
fn test_bad_messages() {
let mut buckets = Buckets::new();
buckets.add_bad_message();
assert_eq!(1, buckets.bad_messages());
buckets.add_bad_message();
assert_eq!(2, buckets.bad_messages());
}
#[test]
fn test_add_increments_last_message_timer() {
let mut buckets = Buckets::new();
// duff value to ensure it changes.
let original = 10;
buckets.last_message = original;
let metric = Metric::new("some.metric", 1.0, MetricKind::Counter(1.0));
buckets.add(&metric);
assert!(buckets.last_message > original);
}
#[test]
fn test_add_counter_metric() {
let mut buckets = Buckets::new();
let metric = Metric::new("some.metric", 1.0, MetricKind::Counter(1.0));
buckets.add(&metric);
assert!(buckets.counters.contains_key("some.metric"),
"Should contain the metric key");
assert_eq!(Some(&1.0), buckets.counters.get("some.metric"));
// Increment counter
buckets.add(&metric);
assert_eq!(Some(&2.0), buckets.counters.get("some.metric"));
}
#[test]
fn test_add_counter_metric_sampled() {
let mut buckets = Buckets::new();
let metric = Metric::new("some.metric", 1.0, MetricKind::Counter(0.1));
buckets.add(&metric);
assert_eq!(Some(&10.0), buckets.counters.get("some.metric"));
let metric_two = Metric::new("some.metric", 1.0, MetricKind::Counter(0.5));
buckets.add(&metric_two);
assert_eq!(Some(&12.0), buckets.counters.get("some.metric"));
}
#[test]
fn test_add_gauge_metric() {
let mut buckets = Buckets::new();
let metric = Metric::new("some.metric", 11.5, MetricKind::Gauge);
buckets.add(&metric);
assert!(buckets.gauges.contains_key("some.metric"),
"Should contain the metric key");
assert_eq!(Some(&11.5), buckets.gauges.get("some.metric"));
}
#[test]
fn test_add_timer_metric() {
let mut buckets = Buckets::new();
let metric = Metric::new("some.metric", 11.5, MetricKind::Timer);
buckets.add(&metric);
assert!(buckets.timers.contains_key("some.metric"),
"Should contain the metric key");
assert_eq!(Some(&vec![11.5]), buckets.timers.get("some.metric"));
let metric_two = Metric::new("some.metric", 99.5, MetricKind::Timer);
buckets.add(&metric_two);
let metric_three = Metric::new("other.metric", 811.5, MetricKind::Timer);
buckets.add(&metric_three);
assert!(buckets.timers.contains_key("some.metric"),
"Should contain the metric key");
assert!(buckets.timers.contains_key("other.metric"),
"Should contain the metric key");
assert_eq!(Some(&vec![11.5, 99.5]), buckets.timers.get("some.metric"));
assert_eq!(Some(&vec![811.5]), buckets.timers.get("other.metric"));
}
}
Track total messages sent.
use std::collections::HashMap;
use super::metric::{Metric, MetricKind};
use clock_ticks;
/// Buckets are the primary internal storage type.
///
/// Each bucket contains a set of hashmaps containing
/// each set of metrics received by clients.
///
pub struct Buckets {
counters: HashMap<String, f64>,
gauges: HashMap<String, f64>,
timers: HashMap<String, Vec<f64>>,
server_start_time: u64,
last_message: u64,
bad_messages: usize,
total_messages: usize,
}
impl Buckets {
/// Create a new Buckets
///
/// ```
/// let bucket = Buckets::new();
/// ```
pub fn new() -> Buckets {
Buckets {
counters: HashMap::new(),
gauges: HashMap::new(),
timers: HashMap::new(),
bad_messages: 0,
total_messages: 0,
last_message: clock_ticks::precise_time_ms(),
server_start_time: clock_ticks::precise_time_ms(),
}
}
/// Adds a metric to the bucket storage.
///
/// # Examples
///
/// ```
/// use super::metric;
/// use std::str::FromStr;
///
/// let metric = metric::Metric::FromStr("foo:1|c");
/// let mut bucket = Buckets::new();
/// bucket.add(metric);
/// ```
pub fn add(&mut self, value: &Metric) {
let name = value.name.to_owned();
match value.kind {
MetricKind::Counter(rate) => {
let counter = self.counters.entry(name).or_insert(0.0);
*counter = *counter + value.value * (1.0 / rate);
},
MetricKind::Gauge => {
self.gauges.insert(name, value.value);
},
MetricKind::Timer => {
let slot = self.timers.entry(name).or_insert(Vec::new());
slot.push(value.value);
},
}
self.last_message = clock_ticks::precise_time_ms();
self.total_messages += 1;
}
/// Increment the bad message count by one.
///
pub fn add_bad_message(&mut self) {
self.bad_messages += 1
}
/// Get the count of bad messages
pub fn bad_messages(&self) -> usize {
self.bad_messages
}
}
//
// Tests
//
#[cfg(test)]
mod test {
use super::*;
use super::super::metric::{Metric, MetricKind};
#[test]
fn test_bad_messages() {
let mut buckets = Buckets::new();
buckets.add_bad_message();
assert_eq!(1, buckets.bad_messages());
buckets.add_bad_message();
assert_eq!(2, buckets.bad_messages());
}
#[test]
fn test_add_increments_total_messages() {
let mut buckets = Buckets::new();
// duff value to ensure it changes.
let original = 10;
buckets.last_message = original;
let metric = Metric::new("some.metric", 1.0, MetricKind::Counter(1.0));
buckets.add(&metric);
assert!(buckets.last_message > original);
}
#[test]
fn test_add_increments_last_message_timer() {
let mut buckets = Buckets::new();
let metric = Metric::new("some.metric", 1.0, MetricKind::Counter(1.0));
buckets.add(&metric);
assert_eq!(1, buckets.total_messages);
buckets.add(&metric);
assert_eq!(2, buckets.total_messages);
}
#[test]
fn test_add_counter_metric() {
let mut buckets = Buckets::new();
let metric = Metric::new("some.metric", 1.0, MetricKind::Counter(1.0));
buckets.add(&metric);
assert!(buckets.counters.contains_key("some.metric"),
"Should contain the metric key");
assert_eq!(Some(&1.0), buckets.counters.get("some.metric"));
// Increment counter
buckets.add(&metric);
assert_eq!(Some(&2.0), buckets.counters.get("some.metric"));
}
#[test]
fn test_add_counter_metric_sampled() {
let mut buckets = Buckets::new();
let metric = Metric::new("some.metric", 1.0, MetricKind::Counter(0.1));
buckets.add(&metric);
assert_eq!(Some(&10.0), buckets.counters.get("some.metric"));
let metric_two = Metric::new("some.metric", 1.0, MetricKind::Counter(0.5));
buckets.add(&metric_two);
assert_eq!(Some(&12.0), buckets.counters.get("some.metric"));
}
#[test]
fn test_add_gauge_metric() {
let mut buckets = Buckets::new();
let metric = Metric::new("some.metric", 11.5, MetricKind::Gauge);
buckets.add(&metric);
assert!(buckets.gauges.contains_key("some.metric"),
"Should contain the metric key");
assert_eq!(Some(&11.5), buckets.gauges.get("some.metric"));
}
#[test]
fn test_add_timer_metric() {
let mut buckets = Buckets::new();
let metric = Metric::new("some.metric", 11.5, MetricKind::Timer);
buckets.add(&metric);
assert!(buckets.timers.contains_key("some.metric"),
"Should contain the metric key");
assert_eq!(Some(&vec![11.5]), buckets.timers.get("some.metric"));
let metric_two = Metric::new("some.metric", 99.5, MetricKind::Timer);
buckets.add(&metric_two);
let metric_three = Metric::new("other.metric", 811.5, MetricKind::Timer);
buckets.add(&metric_three);
assert!(buckets.timers.contains_key("some.metric"),
"Should contain the metric key");
assert!(buckets.timers.contains_key("other.metric"),
"Should contain the metric key");
assert_eq!(Some(&vec![11.5, 99.5]), buckets.timers.get("some.metric"));
assert_eq!(Some(&vec![811.5]), buckets.timers.get("other.metric"));
}
}
|
use wpilib::wpilib_hal::*;
use wpilib::hal_call::*;
use wpilib::sensor;
use wpilib::usage::*;
/// A digital output used to control lights, etc from the RoboRIO.
pub struct DigitalOutput {
channel: i32,
handle: HAL_DigitalHandle,
}
impl DigitalOutput {
/// Create a new digital output on the specificed channel, returning an error if initialization
/// fails.
pub fn new(channel: i32) -> HalResult<DigitalOutput> {
if !sensor::check_digital_channel(channel) {
return Err(0);
}
let handle = hal_call!(HAL_InitializeDIOPort(HAL_GetPort(channel), false as i32))?;
report_usage(ResourceType::DigitalOutput, channel);
Ok(DigitalOutput {
channel: channel,
handle: handle,
})
}
/// Set the value to output.
pub fn set(&mut self, value: bool) -> HalResult<()> {
hal_call!(HAL_SetDIO(self.handle, value as i32))
}
/// Get the previously-written output.
pub fn get(&self) -> HalResult<bool> {
Ok(hal_call!(HAL_GetDIO(self.handle))? != 0)
}
/// Get the channel for this DIO.
pub fn get_channel(&self) -> i32 {
self.channel
}
/// Get a handle to this DIO.
pub fn get_handle(&self) -> HAL_DigitalHandle {
self.handle
}
/// Write a pulse to this output.
pub fn pulse(&mut self, length: f64) -> HalResult<()> {
hal_call!(HAL_Pulse(self.handle, length))
}
/// Is this output currently in the middle of a pulse?
pub fn is_pulsing(&self) -> HalResult<bool> {
Ok(hal_call!(HAL_IsPulsing(self.handle))? != 0)
}
}
impl Drop for DigitalOutput {
fn drop(&mut self) {
unsafe {
HAL_FreeDIOPort(self.handle);
}
}
}
Add back in DO PWM code
use wpilib::wpilib_hal::*;
use wpilib::hal_call::*;
use wpilib::sensor;
use wpilib::usage::*;
/// A digital output used to control lights, etc from the RoboRIO.
pub struct DigitalOutput {
channel: i32,
handle: HAL_DigitalHandle,
pwm: Option<HAL_DigitalPWMHandle>,
}
impl DigitalOutput {
/// Create a new digital output on the specificed channel, returning an error if initialization
/// fails.
pub fn new(channel: i32) -> HalResult<DigitalOutput> {
if !sensor::check_digital_channel(channel) {
return Err(0);
}
let handle = hal_call!(HAL_InitializeDIOPort(HAL_GetPort(channel), false as i32))?;
report_usage(ResourceType::DigitalOutput, channel);
Ok(DigitalOutput {
channel: channel,
handle: handle,
pwm: None,
})
}
/// Set the PWM rate for this output, from 0.6Hz to 19kHz. Will return an error if PWM has not
/// been enabled. All digital channels will use the same PWM rate.
pub fn set_pwm_rate(rate: f64) -> HalResult<()> {
hal_call!(HAL_SetDigitalPWMRate(rate))
}
/// Set the value to output.
pub fn set(&mut self, value: bool) -> HalResult<()> {
hal_call!(HAL_SetDIO(self.handle, value as i32))
}
/// Get the previously-written output.
pub fn get(&self) -> HalResult<bool> {
Ok(hal_call!(HAL_GetDIO(self.handle))? != 0)
}
/// Get the channel for this DIO.
pub fn get_channel(&self) -> i32 {
self.channel
}
/// Get a handle to this DIO.
pub fn get_handle(&self) -> HAL_DigitalHandle {
self.handle
}
/// Write a pulse to this output.
pub fn pulse(&mut self, length: f64) -> HalResult<()> {
hal_call!(HAL_Pulse(self.handle, length))
}
/// Is this output currently in the middle of a pulse?
pub fn is_pulsing(&self) -> HalResult<bool> {
Ok(hal_call!(HAL_IsPulsing(self.handle))? != 0)
}
/// Enable PWM for this output.
pub fn enable_pwm(&mut self, initial_duty_cycle: f64) -> HalResult<()> {
let pwm = hal_call!(HAL_AllocateDigitalPWM())?;
hal_call!(HAL_SetDigitalPWMDutyCycle(pwm, initial_duty_cycle))?;
hal_call!(HAL_SetDigitalPWMOutputChannel(pwm, self.channel))?;
self.pwm = Some(pwm);
Ok(())
}
/// Turn off PWM for this output.
pub fn disable_pwm(&mut self) -> HalResult<()> {
if let Some(pwm) = self.pwm {
hal_call!(HAL_SetDigitalPWMOutputChannel(pwm, sensor::num_digital_channels()))?;
hal_call!(HAL_FreeDigitalPWM(pwm))?;
self.pwm = None;
}
Ok(())
}
/// Set a new duty cycle to use in PWM on this output.
pub fn update_duty_cycle(&mut self, duty_cycle: f64) -> HalResult<()> {
if let Some(pwm) = self.pwm {
hal_call!(HAL_SetDigitalPWMDutyCycle(pwm, duty_cycle))
} else {
Ok(())
}
}
}
impl Drop for DigitalOutput {
fn drop(&mut self) {
let _ = self.disable_pwm();
unsafe {
HAL_FreeDIOPort(self.handle);
}
}
}
|
use std::iter::IntoIterator;
use std::str::FromStr;
use crate::error::Result;
use crate::map::Map;
use crate::source::AsyncSource;
use crate::{config::Config, path::Expression, source::Source, value::Value};
/// A configuration builder
///
/// It registers ordered sources of configuration to later build consistent [`Config`] from them.
/// Configuration sources it defines are defaults, [`Source`]s and overrides.
///
/// Defaults are alaways loaded first and can be overwritten by any of two other sources.
/// Overrides are always loaded last, thus cannot be overridden.
/// Both can be only set explicitly key by key in code
/// using [`set_default`](Self::set_default) or [`set_override`](Self::set_override).
///
/// An intermediate category, [`Source`], set groups of keys at once implicitly using data coming from external sources
/// like files, environment variables or others that one implements. Defining a [`Source`] is as simple as implementing
/// a trait for a struct.
///
/// Adding sources, setting defaults and overrides does not invoke any I/O nor builds a config.
/// It happens on demand when [`build`](Self::build) (or its alternative) is called.
/// Therefore all errors, related to any of the [`Source`] will only show up then.
///
/// # Sync and async builder
///
/// [`ConfigBuilder`] uses type parameter to keep track of builder state.
///
/// In [`DefaultState`] builder only supports [`Source`]s
///
/// In [`AsyncState`] it supports both [`Source`]s and [`AsyncSource`]s at the price of building using `async fn`.
///
/// # Examples
///
/// ```rust
/// # use config::*;
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// let mut builder = Config::builder()
/// .set_default("default", "1")?
/// .add_source(File::new("config/settings", FileFormat::Json))
/// // .add_async_source(...)
/// .set_override("override", "1")?;
///
/// match builder.build() {
/// Ok(config) => {
/// // use your config
/// },
/// Err(e) => {
/// // something went wrong
/// }
/// }
/// # Ok(())
/// # }
/// ```
///
/// If any [`AsyncSource`] is used, the builder will transition to [`AsyncState`].
/// In such case, it is required to _await_ calls to [`build`](Self::build) and its non-consuming sibling.
///
/// Calls can be not chained as well
/// ```rust
/// # use std::error::Error;
/// # use config::*;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// let mut builder = Config::builder();
/// builder = builder.set_default("default", "1")?;
/// builder = builder.add_source(File::new("config/settings", FileFormat::Json));
/// builder = builder.add_source(File::new("config/settings.prod", FileFormat::Json));
/// builder = builder.set_override("override", "1")?;
/// # Ok(())
/// # }
/// ```
///
/// Calling [`Config::builder`](Config::builder) yields builder in the default state.
/// If having an asynchronous state as the initial state is desired, _turbofish_ notation needs to be used.
/// ```rust
/// # use config::{*, builder::AsyncState};
/// let mut builder = ConfigBuilder::<AsyncState>::default();
/// ```
///
/// If for some reason acquiring builder in default state is required without calling [`Config::builder`](Config::builder)
/// it can also be achieved.
/// ```rust
/// # use config::{*, builder::DefaultState};
/// let mut builder = ConfigBuilder::<DefaultState>::default();
/// ```
#[derive(Debug, Clone, Default)]
pub struct ConfigBuilder<St: BuilderState> {
defaults: Map<Expression, Value>,
overrides: Map<Expression, Value>,
state: St,
}
/// Represents [`ConfigBuilder`] state.
pub trait BuilderState {}
/// Represents data specific to builder in default, sychronous state, without support for async.
#[derive(Debug, Default)]
pub struct DefaultState {
sources: Vec<Box<dyn Source + Send + Sync>>,
}
/// The asynchronous configuration builder.
///
/// Similar to a [`ConfigBuilder`] it maintains a set of defaults, a set of sources, and overrides.
///
/// Defaults do not override anything, sources override defaults, and overrides override anything else.
/// Within those three groups order of adding them at call site matters - entities added later take precedence.
///
/// For more detailed description and examples see [`ConfigBuilder`].
/// [`AsyncConfigBuilder`] is just an extension of it that takes async functions into account.
///
/// To obtain a [`Config`] call [`build`](AsyncConfigBuilder::build) or [`build_cloned`](AsyncConfigBuilder::build_cloned)
///
/// # Example
/// Since this library does not implement any [`AsyncSource`] an example in rustdocs cannot be given.
/// Detailed explanation about why such a source is not implemented is in [`AsyncSource`]'s documentation.
///
/// Refer to [`ConfigBuilder`] for similar API sample usage or to the examples folder of the crate, where such a source is implemented.
#[derive(Debug, Clone, Default)]
pub struct AsyncConfigBuilder {
defaults: Map<Expression, Value>,
overrides: Map<Expression, Value>,
}
/// Represents data specific to builder in asychronous state, with support for async.
#[derive(Debug, Default)]
pub struct AsyncState {
sources: Vec<SourceType>,
}
#[derive(Debug, Clone)]
enum SourceType {
Sync(Box<dyn Source + Send + Sync>),
Async(Box<dyn AsyncSource + Send + Sync>),
}
impl BuilderState for DefaultState {}
impl BuilderState for AsyncState {}
impl<St: BuilderState> ConfigBuilder<St> {
// operations allowed in any state
/// Set a default `value` at `key`
///
/// This value can be overwritten by any [`Source`], [`AsyncSource`] or override.
///
/// # Errors
///
/// Fails if `Expression::from_str(key)` fails.
pub fn set_default<S, T>(mut self, key: S, value: T) -> Result<ConfigBuilder<St>>
where
S: AsRef<str>,
T: Into<Value>,
{
self.defaults
.insert(Expression::from_str(key.as_ref())?, value.into());
Ok(self)
}
/// Set an override
///
/// This function sets an overwrite value. It will not be altered by any default, [`Source`] nor [`AsyncSource`]
///
/// # Errors
///
/// Fails if `Expression::from_str(key)` fails.
pub fn set_override<S, T>(mut self, key: S, value: T) -> Result<ConfigBuilder<St>>
where
S: AsRef<str>,
T: Into<Value>,
{
self.overrides
.insert(Expression::from_str(key.as_ref())?, value.into());
Ok(self)
}
}
impl ConfigBuilder<DefaultState> {
// operations allowed in sync state
/// Registers new [`Source`] in this builder.
///
/// Calling this method does not invoke any I/O. [`Source`] is only saved in internal register for later use.
pub fn add_source<T>(mut self, source: T) -> Self
where
T: Source + Send + Sync + 'static,
{
self.state.sources.push(Box::new(source));
self
}
/// Registers new [`AsyncSource`] in this builder and forces transition to [`AsyncState`].
///
/// Calling this method does not invoke any I/O. [`AsyncSource`] is only saved in internal register for later use.
pub fn add_async_source<T>(self, source: T) -> ConfigBuilder<AsyncState>
where
T: AsyncSource + Send + Sync + 'static,
{
let async_state = ConfigBuilder {
state: AsyncState {
sources: self
.state
.sources
.into_iter()
.map(|s| SourceType::Sync(s))
.collect(),
},
defaults: self.defaults,
overrides: self.overrides,
};
async_state.add_async_source(source)
}
/// Reads all registered [`Source`]s.
///
/// This is the method that invokes all I/O operations.
/// For a non consuming alternative see [`build_cloned`](Self::build_cloned)
///
/// # Errors
/// If source collection fails, be it technical reasons or related to inability to read data as `Config` for different reasons,
/// this method returns error.
pub fn build(self) -> Result<Config> {
Self::build_internal(self.defaults, self.overrides, &self.state.sources)
}
/// Reads all registered [`Source`]s.
///
/// Similar to [`build`](Self::build), but it does not take ownership of `ConfigBuilder` to allow later reuse.
/// Internally it clones data to achieve it.
///
/// # Errors
/// If source collection fails, be it technical reasons or related to inability to read data as `Config` for different reasons,
/// this method returns error.
pub fn build_cloned(&self) -> Result<Config> {
Self::build_internal(
self.defaults.clone(),
self.overrides.clone(),
&self.state.sources,
)
}
fn build_internal(
defaults: Map<Expression, Value>,
overrides: Map<Expression, Value>,
sources: &[Box<dyn Source + Send + Sync>],
) -> Result<Config> {
let mut cache: Value = Map::<String, Value>::new().into();
// Add defaults
for (key, val) in defaults.into_iter() {
key.set(&mut cache, val);
}
// Add sources
sources.collect_to(&mut cache)?;
// Add overrides
for (key, val) in overrides.into_iter() {
key.set(&mut cache, val);
}
Ok(Config::new(cache))
}
}
impl ConfigBuilder<AsyncState> {
// operations allowed in async state
/// Registers new [`Source`] in this builder.
///
/// Calling this method does not invoke any I/O. [`Source`] is only saved in internal register for later use.
pub fn add_source<T>(mut self, source: T) -> ConfigBuilder<AsyncState>
where
T: Source + Send + Sync + 'static,
{
self.state.sources.push(SourceType::Sync(Box::new(source)));
self
}
/// Registers new [`AsyncSource`] in this builder.
///
/// Calling this method does not invoke any I/O. [`AsyncSource`] is only saved in internal register for later use.
pub fn add_async_source<T>(mut self, source: T) -> ConfigBuilder<AsyncState>
where
T: AsyncSource + Send + Sync + 'static,
{
self.state.sources.push(SourceType::Async(Box::new(source)));
self
}
/// Reads all registered defaults, [`Source`]s, [`AsyncSource`]s and overrides.
///
/// This is the method that invokes all I/O operations.
/// For a non consuming alternative see [`build_cloned`](Self::build_cloned)
///
/// # Errors
/// If source collection fails, be it technical reasons or related to inability to read data as `Config` for different reasons,
/// this method returns error.
pub async fn build(self) -> Result<Config> {
Self::build_internal(self.defaults, self.overrides, &self.state.sources).await
}
/// Reads all registered defaults, [`Source`]s, [`AsyncSource`]s and overrides.
///
/// Similar to [`build`](Self::build), but it does not take ownership of `ConfigBuilder` to allow later reuse.
/// Internally it clones data to achieve it.
///
/// # Errors
/// If source collection fails, be it technical reasons or related to inability to read data as `Config` for different reasons,
/// this method returns error.
pub async fn build_cloned(&self) -> Result<Config> {
Self::build_internal(
self.defaults.clone(),
self.overrides.clone(),
&self.state.sources,
)
.await
}
async fn build_internal(
defaults: Map<Expression, Value>,
overrides: Map<Expression, Value>,
sources: &[SourceType],
) -> Result<Config> {
let mut cache: Value = Map::<String, Value>::new().into();
// Add defaults
for (key, val) in defaults.into_iter() {
key.set(&mut cache, val);
}
for source in sources.iter() {
match source {
SourceType::Sync(source) => source.collect_to(&mut cache)?,
SourceType::Async(source) => source.collect_to(&mut cache).await?,
}
}
// Add overrides
for (key, val) in overrides.into_iter() {
key.set(&mut cache, val);
}
Ok(Config::new(cache))
}
}
Fix clippy: Remove unused field: AsyncConfigBuilder::overrides
Signed-off-by: Matthias Beyer <1d6e1cf70ec6f9ab28d3ea4b27a49a77654d370e@beyermatthias.de>
use std::iter::IntoIterator;
use std::str::FromStr;
use crate::error::Result;
use crate::map::Map;
use crate::source::AsyncSource;
use crate::{config::Config, path::Expression, source::Source, value::Value};
/// A configuration builder
///
/// It registers ordered sources of configuration to later build consistent [`Config`] from them.
/// Configuration sources it defines are defaults, [`Source`]s and overrides.
///
/// Defaults are alaways loaded first and can be overwritten by any of two other sources.
/// Overrides are always loaded last, thus cannot be overridden.
/// Both can be only set explicitly key by key in code
/// using [`set_default`](Self::set_default) or [`set_override`](Self::set_override).
///
/// An intermediate category, [`Source`], set groups of keys at once implicitly using data coming from external sources
/// like files, environment variables or others that one implements. Defining a [`Source`] is as simple as implementing
/// a trait for a struct.
///
/// Adding sources, setting defaults and overrides does not invoke any I/O nor builds a config.
/// It happens on demand when [`build`](Self::build) (or its alternative) is called.
/// Therefore all errors, related to any of the [`Source`] will only show up then.
///
/// # Sync and async builder
///
/// [`ConfigBuilder`] uses type parameter to keep track of builder state.
///
/// In [`DefaultState`] builder only supports [`Source`]s
///
/// In [`AsyncState`] it supports both [`Source`]s and [`AsyncSource`]s at the price of building using `async fn`.
///
/// # Examples
///
/// ```rust
/// # use config::*;
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// let mut builder = Config::builder()
/// .set_default("default", "1")?
/// .add_source(File::new("config/settings", FileFormat::Json))
/// // .add_async_source(...)
/// .set_override("override", "1")?;
///
/// match builder.build() {
/// Ok(config) => {
/// // use your config
/// },
/// Err(e) => {
/// // something went wrong
/// }
/// }
/// # Ok(())
/// # }
/// ```
///
/// If any [`AsyncSource`] is used, the builder will transition to [`AsyncState`].
/// In such case, it is required to _await_ calls to [`build`](Self::build) and its non-consuming sibling.
///
/// Calls can be not chained as well
/// ```rust
/// # use std::error::Error;
/// # use config::*;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// let mut builder = Config::builder();
/// builder = builder.set_default("default", "1")?;
/// builder = builder.add_source(File::new("config/settings", FileFormat::Json));
/// builder = builder.add_source(File::new("config/settings.prod", FileFormat::Json));
/// builder = builder.set_override("override", "1")?;
/// # Ok(())
/// # }
/// ```
///
/// Calling [`Config::builder`](Config::builder) yields builder in the default state.
/// If having an asynchronous state as the initial state is desired, _turbofish_ notation needs to be used.
/// ```rust
/// # use config::{*, builder::AsyncState};
/// let mut builder = ConfigBuilder::<AsyncState>::default();
/// ```
///
/// If for some reason acquiring builder in default state is required without calling [`Config::builder`](Config::builder)
/// it can also be achieved.
/// ```rust
/// # use config::{*, builder::DefaultState};
/// let mut builder = ConfigBuilder::<DefaultState>::default();
/// ```
#[derive(Debug, Clone, Default)]
pub struct ConfigBuilder<St: BuilderState> {
defaults: Map<Expression, Value>,
overrides: Map<Expression, Value>,
state: St,
}
/// Represents [`ConfigBuilder`] state.
pub trait BuilderState {}
/// Represents data specific to builder in default, sychronous state, without support for async.
#[derive(Debug, Default)]
pub struct DefaultState {
sources: Vec<Box<dyn Source + Send + Sync>>,
}
/// The asynchronous configuration builder.
///
/// Similar to a [`ConfigBuilder`] it maintains a set of defaults, a set of sources, and overrides.
///
/// Defaults do not override anything, sources override defaults, and overrides override anything else.
/// Within those three groups order of adding them at call site matters - entities added later take precedence.
///
/// For more detailed description and examples see [`ConfigBuilder`].
/// [`AsyncConfigBuilder`] is just an extension of it that takes async functions into account.
///
/// To obtain a [`Config`] call [`build`](AsyncConfigBuilder::build) or [`build_cloned`](AsyncConfigBuilder::build_cloned)
///
/// # Example
/// Since this library does not implement any [`AsyncSource`] an example in rustdocs cannot be given.
/// Detailed explanation about why such a source is not implemented is in [`AsyncSource`]'s documentation.
///
/// Refer to [`ConfigBuilder`] for similar API sample usage or to the examples folder of the crate, where such a source is implemented.
#[derive(Debug, Clone, Default)]
pub struct AsyncConfigBuilder {
defaults: Map<Expression, Value>,
}
/// Represents data specific to builder in asychronous state, with support for async.
#[derive(Debug, Default)]
pub struct AsyncState {
sources: Vec<SourceType>,
}
#[derive(Debug, Clone)]
enum SourceType {
Sync(Box<dyn Source + Send + Sync>),
Async(Box<dyn AsyncSource + Send + Sync>),
}
impl BuilderState for DefaultState {}
impl BuilderState for AsyncState {}
impl<St: BuilderState> ConfigBuilder<St> {
// operations allowed in any state
/// Set a default `value` at `key`
///
/// This value can be overwritten by any [`Source`], [`AsyncSource`] or override.
///
/// # Errors
///
/// Fails if `Expression::from_str(key)` fails.
pub fn set_default<S, T>(mut self, key: S, value: T) -> Result<ConfigBuilder<St>>
where
S: AsRef<str>,
T: Into<Value>,
{
self.defaults
.insert(Expression::from_str(key.as_ref())?, value.into());
Ok(self)
}
/// Set an override
///
/// This function sets an overwrite value. It will not be altered by any default, [`Source`] nor [`AsyncSource`]
///
/// # Errors
///
/// Fails if `Expression::from_str(key)` fails.
pub fn set_override<S, T>(mut self, key: S, value: T) -> Result<ConfigBuilder<St>>
where
S: AsRef<str>,
T: Into<Value>,
{
self.overrides
.insert(Expression::from_str(key.as_ref())?, value.into());
Ok(self)
}
}
impl ConfigBuilder<DefaultState> {
// operations allowed in sync state
/// Registers new [`Source`] in this builder.
///
/// Calling this method does not invoke any I/O. [`Source`] is only saved in internal register for later use.
pub fn add_source<T>(mut self, source: T) -> Self
where
T: Source + Send + Sync + 'static,
{
self.state.sources.push(Box::new(source));
self
}
/// Registers new [`AsyncSource`] in this builder and forces transition to [`AsyncState`].
///
/// Calling this method does not invoke any I/O. [`AsyncSource`] is only saved in internal register for later use.
pub fn add_async_source<T>(self, source: T) -> ConfigBuilder<AsyncState>
where
T: AsyncSource + Send + Sync + 'static,
{
let async_state = ConfigBuilder {
state: AsyncState {
sources: self
.state
.sources
.into_iter()
.map(|s| SourceType::Sync(s))
.collect(),
},
defaults: self.defaults,
overrides: self.overrides,
};
async_state.add_async_source(source)
}
/// Reads all registered [`Source`]s.
///
/// This is the method that invokes all I/O operations.
/// For a non consuming alternative see [`build_cloned`](Self::build_cloned)
///
/// # Errors
/// If source collection fails, be it technical reasons or related to inability to read data as `Config` for different reasons,
/// this method returns error.
pub fn build(self) -> Result<Config> {
Self::build_internal(self.defaults, self.overrides, &self.state.sources)
}
/// Reads all registered [`Source`]s.
///
/// Similar to [`build`](Self::build), but it does not take ownership of `ConfigBuilder` to allow later reuse.
/// Internally it clones data to achieve it.
///
/// # Errors
/// If source collection fails, be it technical reasons or related to inability to read data as `Config` for different reasons,
/// this method returns error.
pub fn build_cloned(&self) -> Result<Config> {
Self::build_internal(
self.defaults.clone(),
self.overrides.clone(),
&self.state.sources,
)
}
fn build_internal(
defaults: Map<Expression, Value>,
overrides: Map<Expression, Value>,
sources: &[Box<dyn Source + Send + Sync>],
) -> Result<Config> {
let mut cache: Value = Map::<String, Value>::new().into();
// Add defaults
for (key, val) in defaults.into_iter() {
key.set(&mut cache, val);
}
// Add sources
sources.collect_to(&mut cache)?;
// Add overrides
for (key, val) in overrides.into_iter() {
key.set(&mut cache, val);
}
Ok(Config::new(cache))
}
}
impl ConfigBuilder<AsyncState> {
// operations allowed in async state
/// Registers new [`Source`] in this builder.
///
/// Calling this method does not invoke any I/O. [`Source`] is only saved in internal register for later use.
pub fn add_source<T>(mut self, source: T) -> ConfigBuilder<AsyncState>
where
T: Source + Send + Sync + 'static,
{
self.state.sources.push(SourceType::Sync(Box::new(source)));
self
}
/// Registers new [`AsyncSource`] in this builder.
///
/// Calling this method does not invoke any I/O. [`AsyncSource`] is only saved in internal register for later use.
pub fn add_async_source<T>(mut self, source: T) -> ConfigBuilder<AsyncState>
where
T: AsyncSource + Send + Sync + 'static,
{
self.state.sources.push(SourceType::Async(Box::new(source)));
self
}
/// Reads all registered defaults, [`Source`]s, [`AsyncSource`]s and overrides.
///
/// This is the method that invokes all I/O operations.
/// For a non consuming alternative see [`build_cloned`](Self::build_cloned)
///
/// # Errors
/// If source collection fails, be it technical reasons or related to inability to read data as `Config` for different reasons,
/// this method returns error.
pub async fn build(self) -> Result<Config> {
Self::build_internal(self.defaults, self.overrides, &self.state.sources).await
}
/// Reads all registered defaults, [`Source`]s, [`AsyncSource`]s and overrides.
///
/// Similar to [`build`](Self::build), but it does not take ownership of `ConfigBuilder` to allow later reuse.
/// Internally it clones data to achieve it.
///
/// # Errors
/// If source collection fails, be it technical reasons or related to inability to read data as `Config` for different reasons,
/// this method returns error.
pub async fn build_cloned(&self) -> Result<Config> {
Self::build_internal(
self.defaults.clone(),
self.overrides.clone(),
&self.state.sources,
)
.await
}
async fn build_internal(
defaults: Map<Expression, Value>,
overrides: Map<Expression, Value>,
sources: &[SourceType],
) -> Result<Config> {
let mut cache: Value = Map::<String, Value>::new().into();
// Add defaults
for (key, val) in defaults.into_iter() {
key.set(&mut cache, val);
}
for source in sources.iter() {
match source {
SourceType::Sync(source) => source.collect_to(&mut cache)?,
SourceType::Async(source) => source.collect_to(&mut cache).await?,
}
}
// Add overrides
for (key, val) in overrides.into_iter() {
key.set(&mut cache, val);
}
Ok(Config::new(cache))
}
}
|
#![deny(
non_camel_case_types,
non_snake_case,
path_statements,
trivial_numeric_casts,
unstable_features,
unused_allocation,
unused_import_braces,
unused_imports,
unused_mut,
unused_qualifications,
while_true,
)]
extern crate clap;
extern crate log;
extern crate interactor;
extern crate libimagstore;
#[macro_use]
extern crate libimagerror;
pub mod ui;
Add modules: error, result
#![deny(
non_camel_case_types,
non_snake_case,
path_statements,
trivial_numeric_casts,
unstable_features,
unused_allocation,
unused_import_braces,
unused_imports,
unused_mut,
unused_qualifications,
while_true,
)]
extern crate clap;
extern crate log;
extern crate interactor;
extern crate libimagstore;
#[macro_use]
extern crate libimagerror;
pub mod error;
pub mod result;
pub mod ui;
|
//! Implementations of things like `Eq` for fixed-length arrays
//! up to a certain length. Eventually, we should be able to generalize
//! to all lengths.
//!
//! *[See also the array primitive type](../../std/primitive.array.html).*
#![stable(feature = "core_array", since = "1.36.0")]
use crate::borrow::{Borrow, BorrowMut};
use crate::cmp::Ordering;
use crate::convert::{Infallible, TryFrom};
use crate::fmt;
use crate::hash::{self, Hash};
use crate::marker::Unsize;
use crate::slice::{Iter, IterMut};
mod iter;
#[unstable(feature = "array_value_iter", issue = "65798")]
pub use iter::IntoIter;
/// Converts a reference to `T` into a reference to an array of length 1 (without copying).
#[unstable(feature = "array_from_ref", issue = "77101")]
pub fn from_ref<T>(s: &T) -> &[T; 1] {
// SAFETY: Converting `&T` to `&[T; 1]` is sound.
unsafe { &*(s as *const T).cast::<[T; 1]>() }
}
/// Converts a mutable reference to `T` into a mutable reference to an array of length 1 (without copying).
#[unstable(feature = "array_from_ref", issue = "77101")]
pub fn from_mut<T>(s: &mut T) -> &mut [T; 1] {
// SAFETY: Converting `&mut T` to `&mut [T; 1]` is sound.
unsafe { &mut *(s as *mut T).cast::<[T; 1]>() }
}
/// Utility trait implemented only on arrays of fixed size
///
/// This trait can be used to implement other traits on fixed-size arrays
/// without causing much metadata bloat.
///
/// The trait is marked unsafe in order to restrict implementors to fixed-size
/// arrays. User of this trait can assume that implementors have the exact
/// layout in memory of a fixed size array (for example, for unsafe
/// initialization).
///
/// Note that the traits [`AsRef`] and [`AsMut`] provide similar methods for types that
/// may not be fixed-size arrays. Implementors should prefer those traits
/// instead.
#[unstable(feature = "fixed_size_array", issue = "27778")]
pub unsafe trait FixedSizeArray<T> {
/// Converts the array to immutable slice
#[unstable(feature = "fixed_size_array", issue = "27778")]
fn as_slice(&self) -> &[T];
/// Converts the array to mutable slice
#[unstable(feature = "fixed_size_array", issue = "27778")]
fn as_mut_slice(&mut self) -> &mut [T];
}
#[unstable(feature = "fixed_size_array", issue = "27778")]
unsafe impl<T, A: Unsize<[T]>> FixedSizeArray<T> for A {
#[inline]
fn as_slice(&self) -> &[T] {
self
}
#[inline]
fn as_mut_slice(&mut self) -> &mut [T] {
self
}
}
/// The error type returned when a conversion from a slice to an array fails.
#[stable(feature = "try_from", since = "1.34.0")]
#[derive(Debug, Copy, Clone)]
pub struct TryFromSliceError(());
#[stable(feature = "core_array", since = "1.36.0")]
impl fmt::Display for TryFromSliceError {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self.__description(), f)
}
}
impl TryFromSliceError {
#[unstable(
feature = "array_error_internals",
reason = "available through Error trait and this method should not \
be exposed publicly",
issue = "none"
)]
#[inline]
#[doc(hidden)]
pub fn __description(&self) -> &str {
"could not convert slice to array"
}
}
#[stable(feature = "try_from_slice_error", since = "1.36.0")]
impl From<Infallible> for TryFromSliceError {
fn from(x: Infallible) -> TryFromSliceError {
match x {}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, const N: usize> AsRef<[T]> for [T; N] {
#[inline]
fn as_ref(&self) -> &[T] {
&self[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, const N: usize> AsMut<[T]> for [T; N] {
#[inline]
fn as_mut(&mut self) -> &mut [T] {
&mut self[..]
}
}
#[stable(feature = "array_borrow", since = "1.4.0")]
impl<T, const N: usize> Borrow<[T]> for [T; N] {
fn borrow(&self) -> &[T] {
self
}
}
#[stable(feature = "array_borrow", since = "1.4.0")]
impl<T, const N: usize> BorrowMut<[T]> for [T; N] {
fn borrow_mut(&mut self) -> &mut [T] {
self
}
}
#[stable(feature = "try_from", since = "1.34.0")]
impl<T, const N: usize> TryFrom<&[T]> for [T; N]
where
T: Copy,
{
type Error = TryFromSliceError;
fn try_from(slice: &[T]) -> Result<[T; N], TryFromSliceError> {
<&Self>::try_from(slice).map(|r| *r)
}
}
#[stable(feature = "try_from", since = "1.34.0")]
impl<'a, T, const N: usize> TryFrom<&'a [T]> for &'a [T; N] {
type Error = TryFromSliceError;
fn try_from(slice: &[T]) -> Result<&[T; N], TryFromSliceError> {
if slice.len() == N {
let ptr = slice.as_ptr() as *const [T; N];
// SAFETY: ok because we just checked that the length fits
unsafe { Ok(&*ptr) }
} else {
Err(TryFromSliceError(()))
}
}
}
#[stable(feature = "try_from", since = "1.34.0")]
impl<'a, T, const N: usize> TryFrom<&'a mut [T]> for &'a mut [T; N] {
type Error = TryFromSliceError;
fn try_from(slice: &mut [T]) -> Result<&mut [T; N], TryFromSliceError> {
if slice.len() == N {
let ptr = slice.as_mut_ptr() as *mut [T; N];
// SAFETY: ok because we just checked that the length fits
unsafe { Ok(&mut *ptr) }
} else {
Err(TryFromSliceError(()))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Hash, const N: usize> Hash for [T; N] {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
Hash::hash(&self[..], state)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug, const N: usize> fmt::Debug for [T; N] {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&&self[..], f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, const N: usize> IntoIterator for &'a [T; N] {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, const N: usize> IntoIterator for &'a mut [T; N] {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B, const N: usize> PartialEq<[B; N]> for [A; N]
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &[B; N]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &[B; N]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B, const N: usize> PartialEq<[B]> for [A; N]
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &[B]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &[B]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B, const N: usize> PartialEq<[A; N]> for [B]
where
B: PartialEq<A>,
{
#[inline]
fn eq(&self, other: &[A; N]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &[A; N]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'b, A, B, const N: usize> PartialEq<&'b [B]> for [A; N]
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &&'b [B]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &&'b [B]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'b, A, B, const N: usize> PartialEq<[A; N]> for &'b [B]
where
B: PartialEq<A>,
{
#[inline]
fn eq(&self, other: &[A; N]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &[A; N]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'b, A, B, const N: usize> PartialEq<&'b mut [B]> for [A; N]
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &&'b mut [B]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &&'b mut [B]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'b, A, B, const N: usize> PartialEq<[A; N]> for &'b mut [B]
where
B: PartialEq<A>,
{
#[inline]
fn eq(&self, other: &[A; N]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &[A; N]) -> bool {
self[..] != other[..]
}
}
// NOTE: some less important impls are omitted to reduce code bloat
// __impl_slice_eq2! { [A; $N], &'b [B; $N] }
// __impl_slice_eq2! { [A; $N], &'b mut [B; $N] }
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq, const N: usize> Eq for [T; N] {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd, const N: usize> PartialOrd for [T; N] {
#[inline]
fn partial_cmp(&self, other: &[T; N]) -> Option<Ordering> {
PartialOrd::partial_cmp(&&self[..], &&other[..])
}
#[inline]
fn lt(&self, other: &[T; N]) -> bool {
PartialOrd::lt(&&self[..], &&other[..])
}
#[inline]
fn le(&self, other: &[T; N]) -> bool {
PartialOrd::le(&&self[..], &&other[..])
}
#[inline]
fn ge(&self, other: &[T; N]) -> bool {
PartialOrd::ge(&&self[..], &&other[..])
}
#[inline]
fn gt(&self, other: &[T; N]) -> bool {
PartialOrd::gt(&&self[..], &&other[..])
}
}
/// Implements comparison of arrays lexicographically.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord, const N: usize> Ord for [T; N] {
#[inline]
fn cmp(&self, other: &[T; N]) -> Ordering {
Ord::cmp(&&self[..], &&other[..])
}
}
// The Default impls cannot be generated using the array_impls! macro because
// they require array literals.
macro_rules! array_impl_default {
{$n:expr, $t:ident $($ts:ident)*} => {
#[stable(since = "1.4.0", feature = "array_default")]
impl<T> Default for [T; $n] where T: Default {
fn default() -> [T; $n] {
[$t::default(), $($ts::default()),*]
}
}
array_impl_default!{($n - 1), $($ts)*}
};
{$n:expr,} => {
#[stable(since = "1.4.0", feature = "array_default")]
impl<T> Default for [T; $n] {
fn default() -> [T; $n] { [] }
}
};
}
array_impl_default! {32, T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T}
#[lang = "array"]
impl<T, const N: usize> [T; N] {
/// Returns an array of the same size as `self`, with function `f` applied to each element
/// in order.
///
/// # Examples
///
/// ```
/// #![feature(array_map)]
/// let x = [1, 2, 3];
/// let y = x.map(|v| v + 1);
/// assert_eq!(y, [2, 3, 4]);
///
/// let x = [1, 2, 3];
/// let mut temp = 0;
/// let y = x.map(|v| { temp += 1; v * temp });
/// assert_eq!(y, [1, 4, 9]);
///
/// let x = ["Ferris", "Bueller's", "Day", "Off"];
/// let y = x.map(|v| v.len());
/// assert_eq!(y, [6, 9, 3, 3]);
/// ```
#[unstable(feature = "array_map", issue = "75243")]
pub fn map<F, U>(self, mut f: F) -> [U; N]
where
F: FnMut(T) -> U,
{
use crate::mem::MaybeUninit;
struct Guard<T, const N: usize> {
dst: *mut T,
initialized: usize,
}
impl<T, const N: usize> Drop for Guard<T, N> {
fn drop(&mut self) {
debug_assert!(self.initialized <= N);
let initialized_part =
crate::ptr::slice_from_raw_parts_mut(self.dst, self.initialized);
// SAFETY: this raw slice will contain only initialized objects
// that's why, it is allowed to drop it.
unsafe {
crate::ptr::drop_in_place(initialized_part);
}
}
}
let mut dst = MaybeUninit::uninit_array::<N>();
let mut guard: Guard<U, N> =
Guard { dst: MaybeUninit::slice_as_mut_ptr(&mut dst), initialized: 0 };
for (src, dst) in IntoIter::new(self).zip(&mut dst) {
dst.write(f(src));
guard.initialized += 1;
}
// FIXME: Convert to crate::mem::transmute once it works with generics.
// unsafe { crate::mem::transmute::<[MaybeUninit<U>; N], [U; N]>(dst) }
crate::mem::forget(guard);
// SAFETY: At this point we've properly initialized the whole array
// and we just need to cast it to the correct type.
unsafe { crate::mem::transmute_copy::<_, [U; N]>(&dst) }
}
/// Returns a slice containing the entire array. Equivalent to `&s[..]`.
#[unstable(feature = "array_methods", issue = "76118")]
pub fn as_slice(&self) -> &[T] {
self
}
/// Returns a mutable slice containing the entire array. Equivalent to
/// `&mut s[..]`.
#[unstable(feature = "array_methods", issue = "76118")]
pub fn as_mut_slice(&mut self) -> &mut [T] {
self
}
}
Fix outdated comment next to array_impl_default
The comment has become outdated as the array_impl macro
has been removed.
//! Implementations of things like `Eq` for fixed-length arrays
//! up to a certain length. Eventually, we should be able to generalize
//! to all lengths.
//!
//! *[See also the array primitive type](../../std/primitive.array.html).*
#![stable(feature = "core_array", since = "1.36.0")]
use crate::borrow::{Borrow, BorrowMut};
use crate::cmp::Ordering;
use crate::convert::{Infallible, TryFrom};
use crate::fmt;
use crate::hash::{self, Hash};
use crate::marker::Unsize;
use crate::slice::{Iter, IterMut};
mod iter;
#[unstable(feature = "array_value_iter", issue = "65798")]
pub use iter::IntoIter;
/// Converts a reference to `T` into a reference to an array of length 1 (without copying).
#[unstable(feature = "array_from_ref", issue = "77101")]
pub fn from_ref<T>(s: &T) -> &[T; 1] {
// SAFETY: Converting `&T` to `&[T; 1]` is sound.
unsafe { &*(s as *const T).cast::<[T; 1]>() }
}
/// Converts a mutable reference to `T` into a mutable reference to an array of length 1 (without copying).
#[unstable(feature = "array_from_ref", issue = "77101")]
pub fn from_mut<T>(s: &mut T) -> &mut [T; 1] {
// SAFETY: Converting `&mut T` to `&mut [T; 1]` is sound.
unsafe { &mut *(s as *mut T).cast::<[T; 1]>() }
}
/// Utility trait implemented only on arrays of fixed size
///
/// This trait can be used to implement other traits on fixed-size arrays
/// without causing much metadata bloat.
///
/// The trait is marked unsafe in order to restrict implementors to fixed-size
/// arrays. User of this trait can assume that implementors have the exact
/// layout in memory of a fixed size array (for example, for unsafe
/// initialization).
///
/// Note that the traits [`AsRef`] and [`AsMut`] provide similar methods for types that
/// may not be fixed-size arrays. Implementors should prefer those traits
/// instead.
#[unstable(feature = "fixed_size_array", issue = "27778")]
pub unsafe trait FixedSizeArray<T> {
/// Converts the array to immutable slice
#[unstable(feature = "fixed_size_array", issue = "27778")]
fn as_slice(&self) -> &[T];
/// Converts the array to mutable slice
#[unstable(feature = "fixed_size_array", issue = "27778")]
fn as_mut_slice(&mut self) -> &mut [T];
}
#[unstable(feature = "fixed_size_array", issue = "27778")]
unsafe impl<T, A: Unsize<[T]>> FixedSizeArray<T> for A {
#[inline]
fn as_slice(&self) -> &[T] {
self
}
#[inline]
fn as_mut_slice(&mut self) -> &mut [T] {
self
}
}
/// The error type returned when a conversion from a slice to an array fails.
#[stable(feature = "try_from", since = "1.34.0")]
#[derive(Debug, Copy, Clone)]
pub struct TryFromSliceError(());
#[stable(feature = "core_array", since = "1.36.0")]
impl fmt::Display for TryFromSliceError {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self.__description(), f)
}
}
impl TryFromSliceError {
#[unstable(
feature = "array_error_internals",
reason = "available through Error trait and this method should not \
be exposed publicly",
issue = "none"
)]
#[inline]
#[doc(hidden)]
pub fn __description(&self) -> &str {
"could not convert slice to array"
}
}
#[stable(feature = "try_from_slice_error", since = "1.36.0")]
impl From<Infallible> for TryFromSliceError {
fn from(x: Infallible) -> TryFromSliceError {
match x {}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, const N: usize> AsRef<[T]> for [T; N] {
#[inline]
fn as_ref(&self) -> &[T] {
&self[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, const N: usize> AsMut<[T]> for [T; N] {
#[inline]
fn as_mut(&mut self) -> &mut [T] {
&mut self[..]
}
}
#[stable(feature = "array_borrow", since = "1.4.0")]
impl<T, const N: usize> Borrow<[T]> for [T; N] {
fn borrow(&self) -> &[T] {
self
}
}
#[stable(feature = "array_borrow", since = "1.4.0")]
impl<T, const N: usize> BorrowMut<[T]> for [T; N] {
fn borrow_mut(&mut self) -> &mut [T] {
self
}
}
#[stable(feature = "try_from", since = "1.34.0")]
impl<T, const N: usize> TryFrom<&[T]> for [T; N]
where
T: Copy,
{
type Error = TryFromSliceError;
fn try_from(slice: &[T]) -> Result<[T; N], TryFromSliceError> {
<&Self>::try_from(slice).map(|r| *r)
}
}
#[stable(feature = "try_from", since = "1.34.0")]
impl<'a, T, const N: usize> TryFrom<&'a [T]> for &'a [T; N] {
type Error = TryFromSliceError;
fn try_from(slice: &[T]) -> Result<&[T; N], TryFromSliceError> {
if slice.len() == N {
let ptr = slice.as_ptr() as *const [T; N];
// SAFETY: ok because we just checked that the length fits
unsafe { Ok(&*ptr) }
} else {
Err(TryFromSliceError(()))
}
}
}
#[stable(feature = "try_from", since = "1.34.0")]
impl<'a, T, const N: usize> TryFrom<&'a mut [T]> for &'a mut [T; N] {
type Error = TryFromSliceError;
fn try_from(slice: &mut [T]) -> Result<&mut [T; N], TryFromSliceError> {
if slice.len() == N {
let ptr = slice.as_mut_ptr() as *mut [T; N];
// SAFETY: ok because we just checked that the length fits
unsafe { Ok(&mut *ptr) }
} else {
Err(TryFromSliceError(()))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Hash, const N: usize> Hash for [T; N] {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
Hash::hash(&self[..], state)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug, const N: usize> fmt::Debug for [T; N] {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&&self[..], f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, const N: usize> IntoIterator for &'a [T; N] {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, const N: usize> IntoIterator for &'a mut [T; N] {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B, const N: usize> PartialEq<[B; N]> for [A; N]
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &[B; N]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &[B; N]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B, const N: usize> PartialEq<[B]> for [A; N]
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &[B]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &[B]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B, const N: usize> PartialEq<[A; N]> for [B]
where
B: PartialEq<A>,
{
#[inline]
fn eq(&self, other: &[A; N]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &[A; N]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'b, A, B, const N: usize> PartialEq<&'b [B]> for [A; N]
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &&'b [B]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &&'b [B]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'b, A, B, const N: usize> PartialEq<[A; N]> for &'b [B]
where
B: PartialEq<A>,
{
#[inline]
fn eq(&self, other: &[A; N]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &[A; N]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'b, A, B, const N: usize> PartialEq<&'b mut [B]> for [A; N]
where
A: PartialEq<B>,
{
#[inline]
fn eq(&self, other: &&'b mut [B]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &&'b mut [B]) -> bool {
self[..] != other[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'b, A, B, const N: usize> PartialEq<[A; N]> for &'b mut [B]
where
B: PartialEq<A>,
{
#[inline]
fn eq(&self, other: &[A; N]) -> bool {
self[..] == other[..]
}
#[inline]
fn ne(&self, other: &[A; N]) -> bool {
self[..] != other[..]
}
}
// NOTE: some less important impls are omitted to reduce code bloat
// __impl_slice_eq2! { [A; $N], &'b [B; $N] }
// __impl_slice_eq2! { [A; $N], &'b mut [B; $N] }
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq, const N: usize> Eq for [T; N] {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd, const N: usize> PartialOrd for [T; N] {
#[inline]
fn partial_cmp(&self, other: &[T; N]) -> Option<Ordering> {
PartialOrd::partial_cmp(&&self[..], &&other[..])
}
#[inline]
fn lt(&self, other: &[T; N]) -> bool {
PartialOrd::lt(&&self[..], &&other[..])
}
#[inline]
fn le(&self, other: &[T; N]) -> bool {
PartialOrd::le(&&self[..], &&other[..])
}
#[inline]
fn ge(&self, other: &[T; N]) -> bool {
PartialOrd::ge(&&self[..], &&other[..])
}
#[inline]
fn gt(&self, other: &[T; N]) -> bool {
PartialOrd::gt(&&self[..], &&other[..])
}
}
/// Implements comparison of arrays lexicographically.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord, const N: usize> Ord for [T; N] {
#[inline]
fn cmp(&self, other: &[T; N]) -> Ordering {
Ord::cmp(&&self[..], &&other[..])
}
}
// The Default impls cannot be done with const generics because `[T; 0]` doesn't
// require Default to be implemented, and having different impl blocks for
// different numbers isn't supported yet.
macro_rules! array_impl_default {
{$n:expr, $t:ident $($ts:ident)*} => {
#[stable(since = "1.4.0", feature = "array_default")]
impl<T> Default for [T; $n] where T: Default {
fn default() -> [T; $n] {
[$t::default(), $($ts::default()),*]
}
}
array_impl_default!{($n - 1), $($ts)*}
};
{$n:expr,} => {
#[stable(since = "1.4.0", feature = "array_default")]
impl<T> Default for [T; $n] {
fn default() -> [T; $n] { [] }
}
};
}
array_impl_default! {32, T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T}
#[lang = "array"]
impl<T, const N: usize> [T; N] {
/// Returns an array of the same size as `self`, with function `f` applied to each element
/// in order.
///
/// # Examples
///
/// ```
/// #![feature(array_map)]
/// let x = [1, 2, 3];
/// let y = x.map(|v| v + 1);
/// assert_eq!(y, [2, 3, 4]);
///
/// let x = [1, 2, 3];
/// let mut temp = 0;
/// let y = x.map(|v| { temp += 1; v * temp });
/// assert_eq!(y, [1, 4, 9]);
///
/// let x = ["Ferris", "Bueller's", "Day", "Off"];
/// let y = x.map(|v| v.len());
/// assert_eq!(y, [6, 9, 3, 3]);
/// ```
#[unstable(feature = "array_map", issue = "75243")]
pub fn map<F, U>(self, mut f: F) -> [U; N]
where
F: FnMut(T) -> U,
{
use crate::mem::MaybeUninit;
struct Guard<T, const N: usize> {
dst: *mut T,
initialized: usize,
}
impl<T, const N: usize> Drop for Guard<T, N> {
fn drop(&mut self) {
debug_assert!(self.initialized <= N);
let initialized_part =
crate::ptr::slice_from_raw_parts_mut(self.dst, self.initialized);
// SAFETY: this raw slice will contain only initialized objects
// that's why, it is allowed to drop it.
unsafe {
crate::ptr::drop_in_place(initialized_part);
}
}
}
let mut dst = MaybeUninit::uninit_array::<N>();
let mut guard: Guard<U, N> =
Guard { dst: MaybeUninit::slice_as_mut_ptr(&mut dst), initialized: 0 };
for (src, dst) in IntoIter::new(self).zip(&mut dst) {
dst.write(f(src));
guard.initialized += 1;
}
// FIXME: Convert to crate::mem::transmute once it works with generics.
// unsafe { crate::mem::transmute::<[MaybeUninit<U>; N], [U; N]>(dst) }
crate::mem::forget(guard);
// SAFETY: At this point we've properly initialized the whole array
// and we just need to cast it to the correct type.
unsafe { crate::mem::transmute_copy::<_, [U; N]>(&dst) }
}
/// Returns a slice containing the entire array. Equivalent to `&s[..]`.
#[unstable(feature = "array_methods", issue = "76118")]
pub fn as_slice(&self) -> &[T] {
self
}
/// Returns a mutable slice containing the entire array. Equivalent to
/// `&mut s[..]`.
#[unstable(feature = "array_methods", issue = "76118")]
pub fn as_mut_slice(&mut self) -> &mut [T] {
self
}
}
|
use libc::*;
use std::old_io::process::{Command, ProcessOutput, ProcessExit,
Process, StdioContainer};
use std::old_io::process::StdioContainer::*;
use std::old_io::{IoError, IoErrorKind, IoResult};
use std::old_io::IoErrorKind::*;
use std::collections::VecMap;
use std::os::unix::prelude::*;
use std::old_io::{File, Append, Open, Read, Write};
use controls::*;
use constants::*;
use termios::*;
use signal::*;
// start off as null pointer
static mut uglobal_term:*mut TermState = 0 as *mut TermState;
unsafe extern fn term_signal(signo:c_int, u_info:*const SigInfo,
_:*const c_void) {
let term:&mut TermState = match uglobal_term.as_mut() {
Some(v) => v,
None => {
// this handler shouldn't be called when Term isn't active
panic!("Term signal interrupt called when Term not active");
}
};
let info:&SigInfo = match u_info.as_ref() {
Some(v) => v,
None => panic!("Given a null pointer for signal info")
};
match signo {
SIGCHLD => {
let fields = match info.determine_sigfields() {
SigFields::SigChld(f) => f,
_ => panic!("Signal wasn't a SIGCHLD")
};
// find the child by pid
for (_, ref mut job) in term.jobs.iter_mut() {
if job.process.id() == fields.pid {
let exit = match info.code {
CLD_EXITED => ProcessExit::ExitStatus(fields.status as isize),
_ => ProcessExit::ExitSignal(fields.status as isize)
};
job.exit = Some(exit);
return;
}
}
// child was not found
term.controls.errf(format_args!("\nSent SIGCHLD for process not found in job table: {}\n", fields.pid));
},
SIGTSTP => {
// ignore background SIGCHLDS
},
_ => term.controls.errf(format_args!("\nTerm caught unexpected signal: {}\n", signo))
}
}
pub struct Job {
pub command: String,
pub process: Process,
pub files: Vec<File>,
pub exit: Option<ProcessExit>
}
impl Job {
pub fn wait(&mut self, timeout:Option<usize>) -> IoResult<ProcessExit> {
if self.check_exit() {
// we're already dead
return Ok(self.exit.clone().unwrap());
}
let mut info; let mut fields;
loop {
info = try!(signal_wait(SIGCHLD, timeout));
fields = match info.determine_sigfields() {
SigFields::SigChld(f) => f,
_ => return Err(IoError {kind: IoErrorKind::OtherIoError,
desc: "Didn't catch SIGCHLD",
detail: Some(format!("Caught signal {} instead", info.signo))
})
};
if fields.pid == self.process.id() {
// we're dead (or stopped, but that comes later)
let exit = match info.code {
CLD_EXITED => ProcessExit::ExitStatus(fields.status as isize),
_ => ProcessExit::ExitSignal(fields.status as isize)
};
self.exit = Some(exit.clone());
return Ok(exit);
}
}
}
pub fn check_exit(&self) -> bool {
match self.exit {
Some(ProcessExit::ExitSignal(v))
if v == SIGTSTP as isize ||
v == SIGSTOP as isize ||
v == SIGCONT as isize => return false,
Some(_) => return true,
None => return false
}
}
}
impl Drop for Job {
fn drop(&mut self) {
match self.wait(Some(0)) {
Ok(_) => return,
Err(_) => {/* continue */}
}
match self.process.signal_exit() {
Err(e) => println!("Could not signal {} to exit: {}", self.process.id(), e),
_ => {/* ok */}
}
match self.wait(Some(1000)) {
Ok(_) => return,
Err(_) => {/* continue */}
}
match self.process.signal_kill() {
Err(e) => println!("Could not kill {}: {}", self.process.id(), e),
_ => {/* ok */}
}
}
}
pub struct TermState {
pub controls: Controls,
tios: Termios,
old_tios: Termios,
pub jobs: VecMap<Job>,
files: VecMap<File>,
jobstack: Vec<usize>
}
impl Drop for TermState {
fn drop (&mut self) {
// drop our signal handlers
self.unhandle_signals();
self.unset_pointer();
// then drop all of our jobs
let ids:Vec<usize> = self.jobs.keys().collect();
for id in ids.iter() {
self.jobs.remove(id);
}
}
}
impl TermState {
pub fn new() -> TermState {
let mut controls = Controls::new();
let mut tios = match Termios::get() {
Some(t) => t,
None => {
controls.err("Warning: Could not get terminal mode\n");
Termios::new()
}
};
let old_tios = tios.clone();
tios.fdisable(0, 0, ICANON|ECHO, 0);
TermState {
controls: controls,
tios: tios,
old_tios: old_tios,
jobs: VecMap::new(),
files: VecMap::new(),
jobstack: vec![]
}
}
pub fn update_terminal(&mut self) {
if !Termios::set(&self.tios) {
self.controls.err("Warning: Could not set terminal mode\n");
}
}
pub fn restore_terminal(&mut self) {
if !Termios::set(&self.old_tios) {
self.controls.err("Warning: Could not set terminal mode\n");
}
}
pub fn set_pointer(&mut self) {
unsafe {
uglobal_term = self as *mut TermState;
}
}
pub fn unset_pointer(&mut self) {
unsafe {
uglobal_term = 0 as *mut TermState;
}
}
pub fn handle_signals(&mut self) {
let sa = SigAction::handler(term_signal);
match signal_handle(SIGCHLD, &sa) {
Err(e) => self.controls.errf(format_args!("Could not set handler for SIGCHLD: {}\n", e)),
_ => {}
}
match signal_handle(SIGTSTP, &sa) {
Err(e) => self.controls.errf(format_args!("Could not set handler for SIGTSTP: {}\n", e)),
_ => {}
}
}
pub fn unhandle_signals(&mut self) {
match signal_default(SIGCHLD) {
Err(e) => self.controls.errf(format_args!("Could not unset handler for SIGCHLD: {}\n", e)),
_ => {}
}
match signal_default(SIGTSTP) {
Err(e) => self.controls.errf(format_args!("Could not unset handler for SIGTSTP: {}\n", e)),
_ => {}
}
}
pub fn remove_if_done(&mut self, id:&usize) -> Result<bool, String> {
if !self.jobs.contains_key(id) {
return Err(format!("Job not found"));
}
if self.jobs.get(id).unwrap().check_exit() {
self.jobs.remove(id);
return Ok(true);
} else {
return Ok(false);
}
}
fn find_jobs_hole(&self) -> usize {
// find a hole in the job map
let mut last = 0;
for key in self.jobs.keys() {
if key - last != 1 {
// we've found a hole
return key - 1;
} else {
last = key;
}
}
// job list is full
return last + 1;
}
pub fn output_file(&mut self, path:&Path) -> IoResult<Fd> {
// files are opened before they are attached to processes
// this allows the next *job function to attach to the file
// index, so it can be freed when the job is pruned.
let file = try!(File::open_mode(path, Append, Write));
let fid = file.as_raw_fd();
self.files.insert(fid as usize, file);
return Ok(fid);
}
pub fn input_file(&mut self, path:&Path) -> IoResult<Fd> {
let file = try!(File::open_mode(path, Open, Read));
let fid = file.as_raw_fd();
self.files.insert(fid as usize, file);
return Ok(fid);
}
pub fn get_job(&self, id:&usize) -> Result<&Job, String> {
match self.jobs.get(id) {
None => Err("Job not found".to_string()),
Some(job) => Ok(job)
}
}
pub fn front_job(&mut self) -> Option<usize> {
self.jobstack.pop()
}
pub fn restart_job(&mut self, id:&usize) -> Result<(), String> {
let mut job = match self.jobs.get_mut(id) {
None => return Err(format!("Job not found")),
Some(job) => job
};
match job.process.signal(SIGCONT as isize) {
Err(e) => return Err(format!("{}", e)),
Ok(_) => return Ok(())
}
}
pub fn start_job(&mut self, stdin:StdioContainer, stdout:StdioContainer, stderr:StdioContainer,
name:&String, args:&Vec<String>,
envs:&Vec<(String, Option<String>)>) -> Result<usize, String> {
let mut process = Command::new(name);
process.args(args.as_slice());
process.stdin(stdin);
process.stdout(stdout);
process.stderr(stderr);
for &(ref env, ref val) in envs.iter() {
match val {
&None => process.env_remove(env),
&Some(ref val) => process.env(env, val)
};
}
let child = match process.spawn() {
Err(e) => {
return Err(format!("Couldn't spawn {}: {}", name, e));
},
Ok(v) => v
};
let id = self.find_jobs_hole();
let mut job = Job {
command: name.clone(),
process: child,
files: vec![],
exit: None
};
// claim file descriptors if they exist in the file table
match stdin {
InheritFd(fd) if self.files.contains_key(&(fd as usize)) =>
job.files.push(self.files.remove(&(fd as usize)).unwrap()),
_ => {}
}
match stdout {
InheritFd(fd) if self.files.contains_key(&(fd as usize)) =>
job.files.push(self.files.remove(&(fd as usize)).unwrap()),
_ => {}
}
match stderr {
InheritFd(fd) if self.files.contains_key(&(fd as usize)) =>
job.files.push(self.files.remove(&(fd as usize)).unwrap()),
_ => {}
}
match self.jobs.insert(id.clone(), job) {
Some(_) => panic!("Overwrote job"),
_ => {/* nothing */}
}
return Ok(id);
}
fn wait_job_signal(&mut self, id:&usize, set:&SigSet) -> Result<ProcessExit, String> {
let mut info; let mut fields;
loop {
info = match signal_wait_set(set, None) {
Ok(i) => i,
Err(IoError{kind:OtherIoError, desc:_, ref detail})
if *detail == Some("interrupted system call".to_string()) => {
// our waiting was interrupted, try again
continue;
},
Err(e) => return Err(format!("Couldn't wait for child to exit: {}", e))
};
match info.signo {
SIGINT => {
// delete "^C"
self.controls.outc(BS);
self.controls.outc(BS);
self.controls.outc(SPC);
self.controls.outc(SPC);
self.controls.outc(BS);
self.controls.outc(BS);
self.controls.outs("\nInterrupt\n");
continue;
},
SIGTSTP => {
// delete "^Z"
self.controls.outc(BS);
self.controls.outc(BS);
self.controls.outc(SPC);
self.controls.outc(SPC);
self.controls.outc(BS);
self.controls.outc(BS);
self.controls.outs("\nStop\n");
continue;
},
SIGCHLD => {
fields = match info.determine_sigfields() {
SigFields::SigChld(f) => f,
_ => return Err(format!("Caught signal {} instead of SIGCHLD", info.signo))
};
if fields.pid == self.jobs.get_mut(id).unwrap().process.id() {
// process of interest died
let exit = match info.code {
CLD_EXITED => ProcessExit::ExitStatus(fields.status as isize),
_ => ProcessExit::ExitSignal(fields.status as isize)
};
self.jobs.get_mut(id).unwrap().exit = Some(exit.clone());
if info.code != CLD_EXITED && fields.status == SIGCONT {
continue;
} else {
return Ok(exit);
}
} else {
// some other job finished
// find the child by pid
for (_, ref mut job) in self.jobs.iter_mut() {
if job.process.id() == fields.pid {
let exit = match info.code {
CLD_EXITED => ProcessExit::ExitStatus(fields.status as isize),
_ => ProcessExit::ExitSignal(fields.status as isize)
};
job.exit = Some(exit);
break;
}
}
}
}, _ => return Err(format!("Caught unexpected signal: {}", info.signo))
}
}
}
pub fn wait_job(&mut self, id:&usize) -> Result<ProcessExit, String> {
if !self.jobs.contains_key(id) {
return Err("Job not found".to_string());
}
if self.jobs.get(id).unwrap().check_exit() {
// child has already exited
return Ok(self.jobs.get(id).unwrap().exit.clone().unwrap());
}
let mut set = match empty_sigset() {
Ok(s) => s,
Err(e) => return Err(format!("Couldn't get empty sigset: {}", e))
};
match sigset_add(&mut set, SIGCHLD) {
Ok(_) => {/* ok */},
Err(e) => return Err(format!("Couldn't add SIGCHLD to sigset: {}", e))
}
match sigset_add(&mut set, SIGINT) {
Ok(_) => {/* ok */},
Err(e) => return Err(format!("Couldn't add SIGINT to sigset: {}", e))
}
match sigset_add(&mut set, SIGTSTP) {
Ok(_) => {/* ok */},
Err(e) => return Err(format!("Couldn't add SIGTSTP to sigset: {}", e))
}
// set a process mask
let old_set = match signal_proc_mask(SIG_BLOCK, &set) {
Ok(set) => set,
Err(e) => return Err(format!("{}", e))
};
let out = self.wait_job_signal(id, &set);
// unset the mask
match signal_proc_mask(SIG_SETMASK, &old_set) {
Ok(_) => return out,
Err(e) => return Err(format!("{}", e))
}
}
pub fn job_output(&mut self, id:&usize) -> Result<ProcessOutput, String> {
// set the foreground job (before borrowing self)
let status = try!(self.wait_job(id));
let mut child = self.jobs.remove(id).unwrap();
let stdout = match child.process.stdout.as_mut() {
None => return Err("Child had no stdout".to_string()),
Some(st) => match st.read_to_end() {
Err(e) => return Err(format!("Could not read stdout: {}", e)),
Ok(v) => v
}
};
let stderr = match child.process.stderr.as_mut() {
None => return Err("Child had no stderr".to_string()),
Some(st) => match st.read_to_end() {
Err(e) => return Err(format!("Could not read stderr: {}", e)),
Ok(v) => v
}
};
return Ok(ProcessOutput {
status: status,
output: stdout,
error: stderr
});
}
pub fn start_command(&mut self, stdin:StdioContainer, stdout:StdioContainer, stderr:StdioContainer,
name:&String, args:&Vec<String>,
envs:&Vec<(String, Option<String>)>) -> Result<ProcessExit, String> {
// set terminal settings for process
// do this before we spawn the process
self.restore_terminal();
// start job
let id = match self.start_job(stdin, stdout, stderr, name, args, envs) {
Err(e) => {
// reset terminal to original state if spawning failed
self.update_terminal();
return Err(e);
},
Ok(id) => id
};
// wait for job to finish
let out = self.wait_job(&id);
if self.jobs.get(&id).unwrap().check_exit() {
// job is done
self.jobs.remove(&id);
} else {
// job is stopped
self.jobstack.push(id);
}
// restore settings for Wash
self.update_terminal();
return out;
}
pub fn run_job_fd(&mut self, stdin:Option<Fd>, stdout:Option<Fd>, stderr:Option<Fd>,
name:&String, args:&Vec<String>,
envs:&Vec<(String, Option<String>)>) -> Result<usize, String> {
let stdin_o = match stdin {
Some(fd) => InheritFd(fd),
None => CreatePipe(true, false)
};
let stdout_o = match stdout {
Some(fd) => InheritFd(fd),
None => CreatePipe(false, true)
};
let stderr_o = match stderr {
Some(fd) => InheritFd(fd),
None => CreatePipe(false, true)
};
self.start_job(stdin_o, stdout_o, stderr_o, name, args, envs)
}
pub fn run_job(&mut self, name:&String, args:&Vec<String>) -> Result<usize, String> {
// run the job directed
self.run_job_fd(None, None, None, name, args, &vec![])
}
pub fn run_command_fd(&mut self, stdin:Option<Fd>, stdout:Option<Fd>, stderr:Option<Fd>,
name:&String, args:&Vec<String>,
envs:&Vec<(String, Option<String>)>) -> Result<ProcessExit, String> {
// commands can only run on existing pipes
// to run a command on a new one, use a job
let stdin_o = match stdin {
Some(fd) => InheritFd(fd),
None => InheritFd(STDIN)
};
let stdout_o = match stdout {
Some(fd) => InheritFd(fd),
None => InheritFd(STDOUT)
};
let stderr_o = match stderr {
Some(fd) => InheritFd(fd),
None => InheritFd(STDERR)
};
self.start_command(stdin_o, stdout_o, stderr_o, name, args, envs)
}
pub fn run_command(&mut self, name:&String, args:&Vec<String>) -> Result<ProcessExit, String> {
// run the command on stdin/out/err
self.run_command_fd(None, None, None, name, args, &vec![])
}
pub fn clean_jobs(&mut self) -> Vec<(usize, Job)> {
let mut remove = vec![];
for (id, child) in self.jobs.iter_mut() {
if child.check_exit() {
remove.push(id);
}
}
let mut out = vec![];
for id in remove.iter() {
out.push((id.clone(), self.jobs.remove(id).unwrap()));
}
return out;
}
}
Don't warn about bad SIGCHLDs
Caused by failed process spawns, just ignore them
use libc::*;
use std::old_io::process::{Command, ProcessOutput, ProcessExit,
Process, StdioContainer};
use std::old_io::process::StdioContainer::*;
use std::old_io::{IoError, IoErrorKind, IoResult};
use std::old_io::IoErrorKind::*;
use std::collections::VecMap;
use std::os::unix::prelude::*;
use std::old_io::{File, Append, Open, Read, Write};
use controls::*;
use constants::*;
use termios::*;
use signal::*;
// start off as null pointer
static mut uglobal_term:*mut TermState = 0 as *mut TermState;
unsafe extern fn term_signal(signo:c_int, u_info:*const SigInfo,
_:*const c_void) {
let term:&mut TermState = match uglobal_term.as_mut() {
Some(v) => v,
None => {
// this handler shouldn't be called when Term isn't active
panic!("Term signal interrupt called when Term not active");
}
};
let info:&SigInfo = match u_info.as_ref() {
Some(v) => v,
None => panic!("Given a null pointer for signal info")
};
match signo {
SIGCHLD => {
let fields = match info.determine_sigfields() {
SigFields::SigChld(f) => f,
_ => panic!("Signal wasn't a SIGCHLD")
};
// find the child by pid
for (_, ref mut job) in term.jobs.iter_mut() {
if job.process.id() == fields.pid {
let exit = match info.code {
CLD_EXITED => ProcessExit::ExitStatus(fields.status as isize),
_ => ProcessExit::ExitSignal(fields.status as isize)
};
job.exit = Some(exit);
return;
}
}
// child was not found, most likely a failed process spawn.
},
SIGTSTP => {
// ignore background SIGCHLDS
},
_ => term.controls.errf(format_args!("\nTerm caught unexpected signal: {}\n", signo))
}
}
pub struct Job {
pub command: String,
pub process: Process,
pub files: Vec<File>,
pub exit: Option<ProcessExit>
}
impl Job {
pub fn wait(&mut self, timeout:Option<usize>) -> IoResult<ProcessExit> {
if self.check_exit() {
// we're already dead
return Ok(self.exit.clone().unwrap());
}
let mut info; let mut fields;
loop {
info = try!(signal_wait(SIGCHLD, timeout));
fields = match info.determine_sigfields() {
SigFields::SigChld(f) => f,
_ => return Err(IoError {kind: IoErrorKind::OtherIoError,
desc: "Didn't catch SIGCHLD",
detail: Some(format!("Caught signal {} instead", info.signo))
})
};
if fields.pid == self.process.id() {
// we're dead (or stopped, but that comes later)
let exit = match info.code {
CLD_EXITED => ProcessExit::ExitStatus(fields.status as isize),
_ => ProcessExit::ExitSignal(fields.status as isize)
};
self.exit = Some(exit.clone());
return Ok(exit);
}
}
}
pub fn check_exit(&self) -> bool {
match self.exit {
Some(ProcessExit::ExitSignal(v))
if v == SIGTSTP as isize ||
v == SIGSTOP as isize ||
v == SIGCONT as isize => return false,
Some(_) => return true,
None => return false
}
}
}
impl Drop for Job {
fn drop(&mut self) {
match self.wait(Some(0)) {
Ok(_) => return,
Err(_) => {/* continue */}
}
match self.process.signal_exit() {
Err(e) => println!("Could not signal {} to exit: {}", self.process.id(), e),
_ => {/* ok */}
}
match self.wait(Some(1000)) {
Ok(_) => return,
Err(_) => {/* continue */}
}
match self.process.signal_kill() {
Err(e) => println!("Could not kill {}: {}", self.process.id(), e),
_ => {/* ok */}
}
}
}
pub struct TermState {
pub controls: Controls,
tios: Termios,
old_tios: Termios,
pub jobs: VecMap<Job>,
files: VecMap<File>,
jobstack: Vec<usize>
}
impl Drop for TermState {
fn drop (&mut self) {
// drop our signal handlers
self.unhandle_signals();
self.unset_pointer();
// then drop all of our jobs
let ids:Vec<usize> = self.jobs.keys().collect();
for id in ids.iter() {
self.jobs.remove(id);
}
}
}
impl TermState {
pub fn new() -> TermState {
let mut controls = Controls::new();
let mut tios = match Termios::get() {
Some(t) => t,
None => {
controls.err("Warning: Could not get terminal mode\n");
Termios::new()
}
};
let old_tios = tios.clone();
tios.fdisable(0, 0, ICANON|ECHO, 0);
TermState {
controls: controls,
tios: tios,
old_tios: old_tios,
jobs: VecMap::new(),
files: VecMap::new(),
jobstack: vec![]
}
}
pub fn update_terminal(&mut self) {
if !Termios::set(&self.tios) {
self.controls.err("Warning: Could not set terminal mode\n");
}
}
pub fn restore_terminal(&mut self) {
if !Termios::set(&self.old_tios) {
self.controls.err("Warning: Could not set terminal mode\n");
}
}
pub fn set_pointer(&mut self) {
unsafe {
uglobal_term = self as *mut TermState;
}
}
pub fn unset_pointer(&mut self) {
unsafe {
uglobal_term = 0 as *mut TermState;
}
}
pub fn handle_signals(&mut self) {
let sa = SigAction::handler(term_signal);
match signal_handle(SIGCHLD, &sa) {
Err(e) => self.controls.errf(format_args!("Could not set handler for SIGCHLD: {}\n", e)),
_ => {}
}
match signal_handle(SIGTSTP, &sa) {
Err(e) => self.controls.errf(format_args!("Could not set handler for SIGTSTP: {}\n", e)),
_ => {}
}
}
pub fn unhandle_signals(&mut self) {
match signal_default(SIGCHLD) {
Err(e) => self.controls.errf(format_args!("Could not unset handler for SIGCHLD: {}\n", e)),
_ => {}
}
match signal_default(SIGTSTP) {
Err(e) => self.controls.errf(format_args!("Could not unset handler for SIGTSTP: {}\n", e)),
_ => {}
}
}
pub fn remove_if_done(&mut self, id:&usize) -> Result<bool, String> {
if !self.jobs.contains_key(id) {
return Err(format!("Job not found"));
}
if self.jobs.get(id).unwrap().check_exit() {
self.jobs.remove(id);
return Ok(true);
} else {
return Ok(false);
}
}
fn find_jobs_hole(&self) -> usize {
// find a hole in the job map
let mut last = 0;
for key in self.jobs.keys() {
if key - last != 1 {
// we've found a hole
return key - 1;
} else {
last = key;
}
}
// job list is full
return last + 1;
}
pub fn output_file(&mut self, path:&Path) -> IoResult<Fd> {
// files are opened before they are attached to processes
// this allows the next *job function to attach to the file
// index, so it can be freed when the job is pruned.
let file = try!(File::open_mode(path, Append, Write));
let fid = file.as_raw_fd();
self.files.insert(fid as usize, file);
return Ok(fid);
}
pub fn input_file(&mut self, path:&Path) -> IoResult<Fd> {
let file = try!(File::open_mode(path, Open, Read));
let fid = file.as_raw_fd();
self.files.insert(fid as usize, file);
return Ok(fid);
}
pub fn get_job(&self, id:&usize) -> Result<&Job, String> {
match self.jobs.get(id) {
None => Err("Job not found".to_string()),
Some(job) => Ok(job)
}
}
pub fn front_job(&mut self) -> Option<usize> {
self.jobstack.pop()
}
pub fn restart_job(&mut self, id:&usize) -> Result<(), String> {
let mut job = match self.jobs.get_mut(id) {
None => return Err(format!("Job not found")),
Some(job) => job
};
match job.process.signal(SIGCONT as isize) {
Err(e) => return Err(format!("{}", e)),
Ok(_) => return Ok(())
}
}
pub fn start_job(&mut self, stdin:StdioContainer, stdout:StdioContainer, stderr:StdioContainer,
name:&String, args:&Vec<String>,
envs:&Vec<(String, Option<String>)>) -> Result<usize, String> {
let mut process = Command::new(name);
process.args(args.as_slice());
process.stdin(stdin);
process.stdout(stdout);
process.stderr(stderr);
for &(ref env, ref val) in envs.iter() {
match val {
&None => process.env_remove(env),
&Some(ref val) => process.env(env, val)
};
}
let child = match process.spawn() {
Err(e) => return Err(format!("Couldn't spawn {}: {}", name, e)),
Ok(v) => v
};
let id = self.find_jobs_hole();
let mut job = Job {
command: name.clone(),
process: child,
files: vec![],
exit: None
};
// claim file descriptors if they exist in the file table
match stdin {
InheritFd(fd) if self.files.contains_key(&(fd as usize)) =>
job.files.push(self.files.remove(&(fd as usize)).unwrap()),
_ => {}
}
match stdout {
InheritFd(fd) if self.files.contains_key(&(fd as usize)) =>
job.files.push(self.files.remove(&(fd as usize)).unwrap()),
_ => {}
}
match stderr {
InheritFd(fd) if self.files.contains_key(&(fd as usize)) =>
job.files.push(self.files.remove(&(fd as usize)).unwrap()),
_ => {}
}
match self.jobs.insert(id.clone(), job) {
Some(_) => panic!("Overwrote job"),
_ => {/* nothing */}
}
return Ok(id);
}
fn wait_job_signal(&mut self, id:&usize, set:&SigSet) -> Result<ProcessExit, String> {
let mut info; let mut fields;
loop {
info = match signal_wait_set(set, None) {
Ok(i) => i,
Err(IoError{kind:OtherIoError, desc:_, ref detail})
if *detail == Some("interrupted system call".to_string()) => {
// our waiting was interrupted, try again
continue;
},
Err(e) => return Err(format!("Couldn't wait for child to exit: {}", e))
};
match info.signo {
SIGINT => {
// delete "^C"
self.controls.outc(BS);
self.controls.outc(BS);
self.controls.outc(SPC);
self.controls.outc(SPC);
self.controls.outc(BS);
self.controls.outc(BS);
self.controls.outs("\nInterrupt\n");
continue;
},
SIGTSTP => {
// delete "^Z"
self.controls.outc(BS);
self.controls.outc(BS);
self.controls.outc(SPC);
self.controls.outc(SPC);
self.controls.outc(BS);
self.controls.outc(BS);
self.controls.outs("\nStop\n");
continue;
},
SIGCHLD => {
fields = match info.determine_sigfields() {
SigFields::SigChld(f) => f,
_ => return Err(format!("Caught signal {} instead of SIGCHLD", info.signo))
};
if fields.pid == self.jobs.get_mut(id).unwrap().process.id() {
// process of interest died
let exit = match info.code {
CLD_EXITED => ProcessExit::ExitStatus(fields.status as isize),
_ => ProcessExit::ExitSignal(fields.status as isize)
};
self.jobs.get_mut(id).unwrap().exit = Some(exit.clone());
if info.code != CLD_EXITED && fields.status == SIGCONT {
continue;
} else {
return Ok(exit);
}
} else {
// some other job finished
// find the child by pid
for (_, ref mut job) in self.jobs.iter_mut() {
if job.process.id() == fields.pid {
let exit = match info.code {
CLD_EXITED => ProcessExit::ExitStatus(fields.status as isize),
_ => ProcessExit::ExitSignal(fields.status as isize)
};
job.exit = Some(exit);
break;
}
}
}
}, _ => return Err(format!("Caught unexpected signal: {}", info.signo))
}
}
}
pub fn wait_job(&mut self, id:&usize) -> Result<ProcessExit, String> {
if !self.jobs.contains_key(id) {
return Err("Job not found".to_string());
}
if self.jobs.get(id).unwrap().check_exit() {
// child has already exited
return Ok(self.jobs.get(id).unwrap().exit.clone().unwrap());
}
let mut set = match empty_sigset() {
Ok(s) => s,
Err(e) => return Err(format!("Couldn't get empty sigset: {}", e))
};
match sigset_add(&mut set, SIGCHLD) {
Ok(_) => {/* ok */},
Err(e) => return Err(format!("Couldn't add SIGCHLD to sigset: {}", e))
}
match sigset_add(&mut set, SIGINT) {
Ok(_) => {/* ok */},
Err(e) => return Err(format!("Couldn't add SIGINT to sigset: {}", e))
}
match sigset_add(&mut set, SIGTSTP) {
Ok(_) => {/* ok */},
Err(e) => return Err(format!("Couldn't add SIGTSTP to sigset: {}", e))
}
// set a process mask
let old_set = match signal_proc_mask(SIG_BLOCK, &set) {
Ok(set) => set,
Err(e) => return Err(format!("Couldn't set process mask: {}", e))
};
let out = self.wait_job_signal(id, &set);
// unset the mask
match signal_proc_mask(SIG_SETMASK, &old_set) {
Ok(_) => return out,
Err(e) => return Err(format!("Couldn't unset process mask: {}", e))
}
}
pub fn job_output(&mut self, id:&usize) -> Result<ProcessOutput, String> {
// set the foreground job (before borrowing self)
let status = try!(self.wait_job(id));
let mut child = self.jobs.remove(id).unwrap();
let stdout = match child.process.stdout.as_mut() {
None => return Err("Child had no stdout".to_string()),
Some(st) => match st.read_to_end() {
Err(e) => return Err(format!("Could not read stdout: {}", e)),
Ok(v) => v
}
};
let stderr = match child.process.stderr.as_mut() {
None => return Err("Child had no stderr".to_string()),
Some(st) => match st.read_to_end() {
Err(e) => return Err(format!("Could not read stderr: {}", e)),
Ok(v) => v
}
};
return Ok(ProcessOutput {
status: status,
output: stdout,
error: stderr
});
}
pub fn start_command(&mut self, stdin:StdioContainer, stdout:StdioContainer, stderr:StdioContainer,
name:&String, args:&Vec<String>,
envs:&Vec<(String, Option<String>)>) -> Result<ProcessExit, String> {
// set terminal settings for process
// do this before we spawn the process
self.restore_terminal();
// start job
let id = match self.start_job(stdin, stdout, stderr, name, args, envs) {
Err(e) => {
// reset terminal to original state if spawning failed
self.update_terminal();
return Err(e);
},
Ok(id) => id
};
// wait for job to finish
let out = self.wait_job(&id);
if self.jobs.get(&id).unwrap().check_exit() {
// job is done
self.jobs.remove(&id);
} else {
// job is stopped
self.jobstack.push(id);
}
// restore settings for Wash
self.update_terminal();
return out;
}
pub fn run_job_fd(&mut self, stdin:Option<Fd>, stdout:Option<Fd>, stderr:Option<Fd>,
name:&String, args:&Vec<String>,
envs:&Vec<(String, Option<String>)>) -> Result<usize, String> {
let stdin_o = match stdin {
Some(fd) => InheritFd(fd),
None => CreatePipe(true, false)
};
let stdout_o = match stdout {
Some(fd) => InheritFd(fd),
None => CreatePipe(false, true)
};
let stderr_o = match stderr {
Some(fd) => InheritFd(fd),
None => CreatePipe(false, true)
};
self.start_job(stdin_o, stdout_o, stderr_o, name, args, envs)
}
pub fn run_job(&mut self, name:&String, args:&Vec<String>) -> Result<usize, String> {
// run the job directed
self.run_job_fd(None, None, None, name, args, &vec![])
}
pub fn run_command_fd(&mut self, stdin:Option<Fd>, stdout:Option<Fd>, stderr:Option<Fd>,
name:&String, args:&Vec<String>,
envs:&Vec<(String, Option<String>)>) -> Result<ProcessExit, String> {
// commands can only run on existing pipes
// to run a command on a new one, use a job
let stdin_o = match stdin {
Some(fd) => InheritFd(fd),
None => InheritFd(STDIN)
};
let stdout_o = match stdout {
Some(fd) => InheritFd(fd),
None => InheritFd(STDOUT)
};
let stderr_o = match stderr {
Some(fd) => InheritFd(fd),
None => InheritFd(STDERR)
};
let out = self.start_command(stdin_o, stdout_o, stderr_o, name, args, envs);
// clean jobs so we don't get a bunch of "job finished" messages
self.clean_jobs();
return out;
}
pub fn run_command(&mut self, name:&String, args:&Vec<String>) -> Result<ProcessExit, String> {
// run the command on stdin/out/err
self.run_command_fd(None, None, None, name, args, &vec![])
}
pub fn clean_jobs(&mut self) -> Vec<(usize, Job)> {
let mut remove = vec![];
for (id, child) in self.jobs.iter_mut() {
if child.check_exit() {
remove.push(id);
}
}
let mut out = vec![];
for id in remove.iter() {
out.push((id.clone(), self.jobs.remove(id).unwrap()));
}
return out;
}
}
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod builder;
use {
crate::{
pdisplay::LayoutDisplay,
utils::{breadth_first_search, Name},
},
bongo_helper_derive::derive_unbounded,
codefmt::Layout,
std::{
cmp,
collections::{BTreeMap, BTreeSet},
fmt, ops,
},
};
fn ref_eq<T>(a: &T, b: &T) -> bool {
(a as *const T) == (b as *const T)
}
fn ref_cmp<T>(a: &T, b: &T) -> cmp::Ordering {
(a as *const T).cmp(&(b as *const T))
}
/// A trait which carries the underlying types for a grammar.
///
/// This allows us to specify a family of types at once as a type parameter
/// instead of forcing us to provide a number of type variables with a long list
/// of bounds.
///
/// This type is not instantiated, and will typically be a zero-sized type. It's
/// constrained by the standard set of derivable operations in order to make
/// derivations of types that use it simple.
pub trait ElementTypes: 'static {
/// The type used to identify each possible terminal.
///
/// Terminals must be cloneable, and must be Ord to be used as a key in a map.
type Term: Clone
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ LayoutDisplay
+ std::fmt::Debug
+ 'static;
// The type used to identify each possible non-terminal.
type NonTerm: Clone
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ LayoutDisplay
+ std::fmt::Debug
+ 'static;
// The type used to identify each production.
type ActionKey: Clone
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ std::fmt::Debug
+ 'static;
type ActionValue: Clone + std::fmt::Debug + 'static;
}
/// A terminal element.
///
/// This is a simple terminal type compatible with `ElementTypes`.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct Terminal(Name);
impl Terminal {
pub fn new(s: &str) -> Self {
Terminal(Name::new(s))
}
}
impl LayoutDisplay for Terminal {
fn disp(&self) -> codefmt::Layout {
let name = self.0.str();
Layout::text(name)
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct NonTerminal(Name);
impl NonTerminal {
pub fn new(s: &str) -> Self {
NonTerminal(Name::new(s))
}
}
impl LayoutDisplay for NonTerminal {
fn disp(&self) -> codefmt::Layout {
Layout::juxtapose(&[
Layout::text("<"),
Layout::text(self.0.str()),
Layout::text(">"),
])
}
}
pub struct BaseElementTypes;
impl ElementTypes for BaseElementTypes {
type Term = Terminal;
type NonTerm = NonTerminal;
type ActionKey = Name;
type ActionValue = ();
}
/// A single element (terminal or non-terminal).
pub enum Element<E: ElementTypes> {
Term(E::Term),
NonTerm(E::NonTerm),
}
// Manual definition of common traits
impl<E: ElementTypes> Clone for Element<E> {
fn clone(&self) -> Self {
match self {
Element::Term(t) => Element::Term(t.clone()),
Element::NonTerm(nt) => Element::NonTerm(nt.clone()),
}
}
}
impl<E: ElementTypes> PartialEq for Element<E> {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Element::Term(t1), Element::Term(t2)) => t1 == t2,
(Element::NonTerm(nt1), Element::NonTerm(nt2)) => nt1 == nt2,
_ => false,
}
}
}
impl<E: ElementTypes> Eq for Element<E> {}
impl<E: ElementTypes> Ord for Element<E> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match (self, other) {
(Element::Term(t1), Element::Term(t2)) => t1.cmp(t2),
(Element::NonTerm(nt1), Element::NonTerm(nt2)) => nt1.cmp(nt2),
(Element::Term(_), Element::NonTerm(_)) => cmp::Ordering::Less,
(Element::NonTerm(_), Element::Term(_)) => cmp::Ordering::Greater,
}
}
}
impl<E: ElementTypes> PartialOrd for Element<E> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<E: ElementTypes> fmt::Debug for Element<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Element::Term(t) => f.debug_tuple("Element::Term").field(t).finish(),
Element::NonTerm(nt) => {
f.debug_tuple("Element::NonTerm").field(nt).finish()
}
}
}
}
impl<E: ElementTypes> Element<E> {
/// If this element is a terminal, returns a `Some` value containing a
/// terminal datum. Returns `None` otherwise.
pub fn as_term(&self) -> Option<&E::Term> {
match self {
Element::NonTerm(_) => None,
Element::Term(t) => Some(t),
}
}
/// Gets an element as a nonterm. Returns a `None` value otherwise.
pub fn as_nonterm(&self) -> Option<&E::NonTerm> {
match self {
Element::NonTerm(nt) => Some(nt),
Element::Term(_) => None,
}
}
/// Clone this element into an element of another ElementTypes instance.
/// The `E::Term` and `E::NonTerm` datum types must be the same as those in
/// `E2`.
pub fn clone_as_other<E2>(&self) -> Element<E2>
where
E2: ElementTypes<Term = E::Term, NonTerm = E::NonTerm>,
{
match self {
Element::Term(t) => Element::Term(t.clone()),
Element::NonTerm(nt) => Element::NonTerm(nt.clone()),
}
}
}
impl<E: ElementTypes> LayoutDisplay for Element<E> {
fn disp(&self) -> codefmt::Layout {
match self {
Element::Term(t) => t.disp(),
Element::NonTerm(nt) => nt.disp(),
}
}
}
/// An element within a production. Includes an optional identifier.
#[derive_unbounded(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct ProductionElement<E: ElementTypes> {
identifier: Option<Name>,
element: Element<E>,
}
impl<E: ElementTypes> ProductionElement<E> {
pub fn new_with_name(name: Name, e: Element<E>) -> Self {
ProductionElement {
identifier: Some(name),
element: e,
}
}
pub fn new(name: Option<Name>, e: Element<E>) -> Self {
ProductionElement {
identifier: name,
element: e,
}
}
pub fn new_empty(e: Element<E>) -> Self {
ProductionElement {
identifier: None,
element: e,
}
}
pub fn id(&self) -> Option<&Name> {
self.identifier.as_ref()
}
pub fn elem(&self) -> &Element<E> {
&self.element
}
pub fn clone_as_other<E2>(&self) -> ProductionElement<E2>
where
E2: ElementTypes<Term = E::Term, NonTerm = E::NonTerm>,
{
ProductionElement {
identifier: self.identifier.clone(),
element: self.element.clone_as_other(),
}
}
}
impl<E: ElementTypes> LayoutDisplay for ProductionElement<E> {
fn disp(&self) -> codefmt::Layout {
match &self.identifier {
Some(name) => Layout::juxtapose(&[
name.layout(),
Layout::text(": "),
self.element.disp(),
]),
None => self.element.disp(),
}
}
}
impl<E: ElementTypes> From<Element<E>> for ProductionElement<E> {
fn from(e: Element<E>) -> ProductionElement<E> {
ProductionElement {
identifier: None,
element: e,
}
}
}
/// A production within a rule.
#[derive_unbounded(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
struct Production<E: ElementTypes> {
action_key: E::ActionKey,
elements: Vec<ProductionElement<E>>,
}
impl<E: ElementTypes> Production<E> {
fn new(
action_key: E::ActionKey,
elements: Vec<ProductionElement<E>>,
) -> Production<E> {
Production {
action_key,
elements,
}
}
pub fn prod_elements(&self) -> &Vec<ProductionElement<E>> {
&self.elements
}
pub fn elements_iter(&self) -> impl Iterator<Item = &Element<E>> {
self.elements.iter().map(|prod_elem| &prod_elem.element)
}
pub fn element_at(&self, index: usize) -> Option<&Element<E>> {
self.elements.get(index).map(|prod_elem| &prod_elem.element)
}
pub fn action_key(&self) -> &E::ActionKey {
&self.action_key
}
}
impl<E: ElementTypes> LayoutDisplay for Production<E> {
fn disp(&self) -> Layout {
let elements =
Layout::wrap(self.elements.iter().map(|x| x.disp()).collect::<Vec<_>>());
Layout::juxtapose(&[
elements,
Layout::text(" => "),
Layout::text(format!("{:?}", self.action_key)),
])
}
}
#[derive_unbounded(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct ProdKey<E: ElementTypes> {
head: E::NonTerm,
action_key: E::ActionKey,
}
#[derive_unbounded(Clone, Debug)]
struct Rule<E: ElementTypes> {
head: E::NonTerm,
prods: Vec<Production<E>>,
}
impl<E: ElementTypes> Rule<E> {
pub fn new(head: E::NonTerm, prods: Vec<Production<E>>) -> Self {
Rule { head, prods }
}
pub fn head(&self) -> &E::NonTerm {
&self.head
}
pub fn prods(&self) -> &Vec<Production<E>> {
&self.prods
}
}
impl<E: ElementTypes> LayoutDisplay for Rule<E> {
fn disp(&self) -> Layout {
let prod_layouts: Vec<_> =
self.prods.iter().map(|prod| prod.disp()).collect();
Layout::stack(prod_layouts)
}
}
/// A grammar
#[derive_unbounded(Clone)]
pub struct Grammar<E: ElementTypes> {
start_symbol: E::NonTerm,
rule_set: BTreeMap<E::NonTerm, Rule<E>>,
action_map: BTreeMap<E::ActionKey, E::ActionValue>,
}
impl<E: ElementTypes> std::fmt::Debug for Grammar<E> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut dbg_struct = f.debug_struct("Grammar");
dbg_struct.field("Terms", &self.get_terminals().collect::<Vec<_>>());
unimplemented!()
}
}
impl<E: ElementTypes> Grammar<E> {
fn new(
start: E::NonTerm,
rule_set: impl IntoIterator<Item = Rule<E>>,
action_map: BTreeMap<E::ActionKey, E::ActionValue>,
) -> Result<Self, GrammarErrors<E>> {
let g = Grammar {
start_symbol: start,
rule_set: rule_set
.into_iter()
.map(|r| (r.head().clone(), r))
.collect(),
action_map,
};
g.check_grammar().map(|_| g)
}
pub fn start_nt(&self) -> &E::NonTerm {
&self.start_symbol
}
pub fn rules<'a>(&'a self) -> impl Iterator<Item = RuleRef<'a, E>> + 'a {
self.rule_set.iter().map(move |(_, rule)| RuleRef {
grammar: ParentRef(self),
rule: RefCompare(rule),
})
}
pub fn rule_set<'a>(&'a self) -> BTreeMap<&'a E::NonTerm, RuleRef<'a, E>> {
self
.rule_set
.iter()
.map(|(k, rule)| {
(
k,
RuleRef {
grammar: ParentRef(self),
rule: RefCompare(rule),
},
)
})
.collect()
}
pub fn get_rule<'a>(&'a self, nt: &E::NonTerm) -> Option<RuleRef<'a, E>> {
self.rule_set.get(nt).map(|rule| RuleRef {
grammar: ParentRef(self),
rule: RefCompare(rule),
})
}
pub fn prods<'a>(&'a self) -> impl Iterator<Item = ProdRef<'a, E>> {
self.rules().flat_map(|rule| rule.prods())
}
fn get_elements(&self) -> impl Iterator<Item = &Element<E>> {
self
.rule_set
.values()
.flat_map(|r| &r.prods)
.flat_map(|p| p.elements_iter())
}
fn get_terminals(&self) -> impl Iterator<Item = &E::Term> {
self.get_elements().filter_map(|e| e.as_term())
}
fn get_nonterminals(&self) -> impl Iterator<Item = &E::NonTerm> {
self.get_elements().filter_map(|e| e.as_nonterm())
}
fn nonterminals_without_rules(&self) -> BTreeSet<&E::NonTerm> {
self
.get_nonterminals()
.filter(move |nt| !self.rule_set.contains_key(nt))
.collect()
}
fn rules_without_prods<'a>(&'a self) -> BTreeSet<&'a E::NonTerm> {
let rules = self.rules();
let prodless_rules = rules.filter(|r| r.prods().is_empty());
let head_iter = prodless_rules.map(|r| r.head());
head_iter.collect()
}
fn reachable_nonterms(&self) -> BTreeSet<&E::NonTerm> {
breadth_first_search(std::iter::once(&self.start_symbol), |nt| {
match self.get_rule(nt) {
Some(rule) => rule
.prods()
.iter()
.flat_map(|p| p.elements_iter())
.filter_map(|e| e.as_nonterm())
.collect(),
None => BTreeSet::new(),
}
})
}
fn unreachable_nonterms(&self) -> BTreeSet<&E::NonTerm> {
let reachable_nonterms = self.reachable_nonterms();
self
.get_nonterminals()
.filter(|nt| !reachable_nonterms.contains(nt))
.collect()
}
}
#[derive_unbounded(Clone, Debug)]
pub struct GrammarErrors<E: ElementTypes> {
unreachable_nonterms: BTreeSet<E::NonTerm>,
nonterms_without_rules: BTreeSet<E::NonTerm>,
rules_without_prods: BTreeSet<E::NonTerm>,
}
impl<E: ElementTypes> GrammarErrors<E> {
fn into_result(self) -> Result<(), Self> {
if self.unreachable_nonterms.is_empty()
&& self.nonterms_without_rules.is_empty()
&& self.rules_without_prods.is_empty()
{
Ok(())
} else {
Err(self)
}
}
}
impl<E: ElementTypes> Grammar<E> {
fn check_grammar(&self) -> Result<(), GrammarErrors<E>> {
GrammarErrors {
unreachable_nonterms: self
.unreachable_nonterms()
.into_iter()
.cloned()
.collect(),
nonterms_without_rules: self
.nonterminals_without_rules()
.into_iter()
.cloned()
.collect(),
rules_without_prods: self
.rules_without_prods()
.into_iter()
.cloned()
.collect(),
}
.into_result()
}
}
impl<E: ElementTypes> LayoutDisplay for Grammar<E> {
fn disp(&self) -> Layout {
let mut stack = Vec::new();
for (k, v) in &self.rule_set {
let name_layout = if &self.start_symbol == k {
Layout::juxtapose(&[Layout::text("*"), k.disp()])
} else {
k.disp()
};
stack.push(Layout::juxtapose(&[name_layout, Layout::text(":")]));
stack.push(Layout::juxtapose(&[Layout::text(" "), v.disp()]));
}
Layout::stack(stack)
}
}
// ------------
/// A simple deref wrapper that ensures that two references _must_ be the same during
/// comparison. This ensures that we can't accidentally incorporate refs from different parents together.
#[derive(Debug)]
struct ParentRef<'a, T>(&'a T);
impl<'a, T> ParentRef<'a, T> {
fn new(r: &'a T) -> Self {
ParentRef(r)
}
}
impl<T> cmp::PartialEq for ParentRef<'_, T> {
fn eq(&self, other: &Self) -> bool {
assert!(ref_eq(self.0, other.0));
true
}
}
impl<T> cmp::Eq for ParentRef<'_, T> {}
impl<T> cmp::PartialOrd for ParentRef<'_, T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T> cmp::Ord for ParentRef<'_, T> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
assert!(ref_eq(self.0, other.0));
cmp::Ordering::Equal
}
}
impl<'a, T> ops::Deref for ParentRef<'a, T> {
type Target = &'a T;
fn deref(&self) -> &&'a T {
&self.0
}
}
impl<T> Clone for ParentRef<'_, T> {
fn clone(&self) -> Self {
ParentRef(self.0)
}
}
impl<T> Copy for ParentRef<'_, T> {}
// ------------
#[derive(Clone, Copy, Debug)]
struct NoCompare<T>(T);
impl<T> cmp::PartialEq for NoCompare<T> {
fn eq(&self, _: &Self) -> bool {
true
}
}
impl<T> cmp::Eq for NoCompare<T> {}
impl<T> cmp::PartialOrd for NoCompare<T> {
fn partial_cmp(&self, _: &Self) -> Option<cmp::Ordering> {
Some(cmp::Ordering::Equal)
}
}
impl<T> cmp::Ord for NoCompare<T> {
fn cmp(&self, _: &Self) -> cmp::Ordering {
cmp::Ordering::Equal
}
}
impl<T> ops::Deref for NoCompare<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
// ------------
#[derive(Debug)]
struct RefCompare<'a, T>(&'a T);
impl<T> cmp::PartialEq for RefCompare<'_, T> {
fn eq(&self, other: &Self) -> bool {
ref_eq(self.0, other.0)
}
}
impl<T> cmp::Eq for RefCompare<'_, T> {}
impl<T> cmp::PartialOrd for RefCompare<'_, T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T> cmp::Ord for RefCompare<'_, T> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
ref_cmp(self.0, other.0)
}
}
impl<'a, T> ops::Deref for RefCompare<'a, T> {
type Target = &'a T;
fn deref(&self) -> &&'a T {
&self.0
}
}
impl<T> Clone for RefCompare<'_, T> {
fn clone(&self) -> Self {
RefCompare(self.0)
}
}
impl<T> Copy for RefCompare<'_, T> {}
// ------------
#[derive_unbounded(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct RuleRef<'a, E: ElementTypes> {
grammar: ParentRef<'a, Grammar<E>>,
rule: RefCompare<'a, Rule<E>>,
}
impl<'a, E: ElementTypes> RuleRef<'a, E> {
pub fn head(&self) -> &'a E::NonTerm {
&self.rule.head
}
pub fn prods(&self) -> Vec<ProdRef<'a, E>> {
self
.rule
.prods
.iter()
.map(|prod| ProdRef {
grammar: self.grammar,
head: &self.rule.head,
prod: RefCompare(prod),
action_value: NoCompare(
self.grammar.action_map.get(&prod.action_key).unwrap(),
),
})
.collect()
}
}
// ------------
#[derive_unbounded(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct ProdRef<'a, E: ElementTypes> {
grammar: ParentRef<'a, Grammar<E>>,
head: &'a E::NonTerm,
prod: RefCompare<'a, Production<E>>,
action_value: NoCompare<&'a E::ActionValue>,
}
impl<'a, E: ElementTypes> ProdRef<'a, E> {
pub fn head(&self) -> &'a E::NonTerm {
self.head
}
pub fn prod_elements(&self) -> &'a Vec<ProductionElement<E>> {
&self.prod.elements
}
pub fn elements_iter(&self) -> impl Iterator<Item = &'a Element<E>> {
self.prod.elements_iter()
}
pub fn prod_element_at(
&self,
index: usize,
) -> Option<&'a ProductionElement<E>> {
self.prod.elements.get(index)
}
pub fn element_at(&self, index: usize) -> Option<&'a Element<E>> {
self.prod.element_at(index)
}
pub fn action_key(&self) -> &'a E::ActionKey {
self.prod.action_key()
}
pub fn action_value(&self) -> &'a E::ActionValue {
*self.action_value
}
pub fn prod_key(&self) -> ProdKey<E> {
ProdKey {
head: self.head().clone(),
action_key: self.action_key().clone(),
}
}
}
Adding more doc comments in base.rs
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod builder;
use {
crate::{
pdisplay::LayoutDisplay,
utils::{breadth_first_search, Name},
},
bongo_helper_derive::derive_unbounded,
codefmt::Layout,
std::{
cmp,
collections::{BTreeMap, BTreeSet},
fmt, ops,
},
};
fn ref_eq<T>(a: &T, b: &T) -> bool {
(a as *const T) == (b as *const T)
}
fn ref_cmp<T>(a: &T, b: &T) -> cmp::Ordering {
(a as *const T).cmp(&(b as *const T))
}
/// A trait which carries the underlying types for a grammar.
///
/// This allows us to specify a family of types at once as a type parameter
/// instead of forcing us to provide a number of type variables with a long list
/// of bounds.
///
/// This type is not instantiated, and will typically be a zero-sized type.
pub trait ElementTypes: 'static {
/// The type used to identify each possible terminal.
///
/// Terminals must be cloneable, and must be Ord to be used as a key in a map.
type Term: Clone
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ LayoutDisplay
+ std::fmt::Debug
+ 'static;
// The type used to identify each possible non-terminal.
type NonTerm: Clone
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ LayoutDisplay
+ std::fmt::Debug
+ 'static;
// The type used to identify each production.
type ActionKey: Clone
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ std::fmt::Debug
+ 'static;
type ActionValue: Clone + std::fmt::Debug + 'static;
}
/// A terminal element.
///
/// This is a simple terminal type compatible with `ElementTypes`.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct Terminal(Name);
impl Terminal {
pub fn new(s: &str) -> Self {
Terminal(Name::new(s))
}
}
impl LayoutDisplay for Terminal {
fn disp(&self) -> codefmt::Layout {
let name = self.0.str();
Layout::text(name)
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct NonTerminal(Name);
impl NonTerminal {
pub fn new(s: &str) -> Self {
NonTerminal(Name::new(s))
}
}
impl LayoutDisplay for NonTerminal {
fn disp(&self) -> codefmt::Layout {
Layout::juxtapose(&[
Layout::text("<"),
Layout::text(self.0.str()),
Layout::text(">"),
])
}
}
pub struct BaseElementTypes;
impl ElementTypes for BaseElementTypes {
type Term = Terminal;
type NonTerm = NonTerminal;
type ActionKey = Name;
type ActionValue = ();
}
/// A single element (terminal or non-terminal).
pub enum Element<E: ElementTypes> {
Term(E::Term),
NonTerm(E::NonTerm),
}
// Manual definition of common traits
//
// Note: This is still required with derive_unbounded, as we do not support enums at this time.
impl<E: ElementTypes> Clone for Element<E> {
fn clone(&self) -> Self {
match self {
Element::Term(t) => Element::Term(t.clone()),
Element::NonTerm(nt) => Element::NonTerm(nt.clone()),
}
}
}
impl<E: ElementTypes> PartialEq for Element<E> {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Element::Term(t1), Element::Term(t2)) => t1 == t2,
(Element::NonTerm(nt1), Element::NonTerm(nt2)) => nt1 == nt2,
_ => false,
}
}
}
impl<E: ElementTypes> Eq for Element<E> {}
impl<E: ElementTypes> Ord for Element<E> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match (self, other) {
(Element::Term(t1), Element::Term(t2)) => t1.cmp(t2),
(Element::NonTerm(nt1), Element::NonTerm(nt2)) => nt1.cmp(nt2),
(Element::Term(_), Element::NonTerm(_)) => cmp::Ordering::Less,
(Element::NonTerm(_), Element::Term(_)) => cmp::Ordering::Greater,
}
}
}
impl<E: ElementTypes> PartialOrd for Element<E> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<E: ElementTypes> fmt::Debug for Element<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Element::Term(t) => f.debug_tuple("Element::Term").field(t).finish(),
Element::NonTerm(nt) => {
f.debug_tuple("Element::NonTerm").field(nt).finish()
}
}
}
}
impl<E: ElementTypes> Element<E> {
/// If this element is a terminal, returns a `Some` value containing a
/// terminal datum. Returns `None` otherwise.
pub fn as_term(&self) -> Option<&E::Term> {
match self {
Element::NonTerm(_) => None,
Element::Term(t) => Some(t),
}
}
/// Gets an element as a nonterm. Returns a `None` value otherwise.
pub fn as_nonterm(&self) -> Option<&E::NonTerm> {
match self {
Element::NonTerm(nt) => Some(nt),
Element::Term(_) => None,
}
}
/// Clone this element into an element of another ElementTypes instance.
/// The `E::Term` and `E::NonTerm` datum types must be the same as those in
/// `E2`.
pub fn clone_as_other<E2>(&self) -> Element<E2>
where
E2: ElementTypes<Term = E::Term, NonTerm = E::NonTerm>,
{
match self {
Element::Term(t) => Element::Term(t.clone()),
Element::NonTerm(nt) => Element::NonTerm(nt.clone()),
}
}
}
impl<E: ElementTypes> LayoutDisplay for Element<E> {
fn disp(&self) -> codefmt::Layout {
match self {
Element::Term(t) => t.disp(),
Element::NonTerm(nt) => nt.disp(),
}
}
}
/// An element within a production. Includes an optional identifier.
#[derive_unbounded(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct ProductionElement<E: ElementTypes> {
identifier: Option<Name>,
element: Element<E>,
}
impl<E: ElementTypes> ProductionElement<E> {
pub fn new_with_name(name: Name, e: Element<E>) -> Self {
ProductionElement {
identifier: Some(name),
element: e,
}
}
pub fn new(name: Option<Name>, e: Element<E>) -> Self {
ProductionElement {
identifier: name,
element: e,
}
}
pub fn new_empty(e: Element<E>) -> Self {
ProductionElement {
identifier: None,
element: e,
}
}
pub fn id(&self) -> Option<&Name> {
self.identifier.as_ref()
}
pub fn elem(&self) -> &Element<E> {
&self.element
}
pub fn clone_as_other<E2>(&self) -> ProductionElement<E2>
where
E2: ElementTypes<Term = E::Term, NonTerm = E::NonTerm>,
{
ProductionElement {
identifier: self.identifier.clone(),
element: self.element.clone_as_other(),
}
}
}
impl<E: ElementTypes> LayoutDisplay for ProductionElement<E> {
fn disp(&self) -> codefmt::Layout {
match &self.identifier {
Some(name) => Layout::juxtapose(&[
name.layout(),
Layout::text(": "),
self.element.disp(),
]),
None => self.element.disp(),
}
}
}
impl<E: ElementTypes> From<Element<E>> for ProductionElement<E> {
fn from(e: Element<E>) -> ProductionElement<E> {
ProductionElement {
identifier: None,
element: e,
}
}
}
/// A production within a rule.
#[derive_unbounded(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
struct Production<E: ElementTypes> {
action_key: E::ActionKey,
elements: Vec<ProductionElement<E>>,
}
impl<E: ElementTypes> Production<E> {
fn new(
action_key: E::ActionKey,
elements: Vec<ProductionElement<E>>,
) -> Production<E> {
Production {
action_key,
elements,
}
}
pub fn prod_elements(&self) -> &Vec<ProductionElement<E>> {
&self.elements
}
pub fn elements_iter(&self) -> impl Iterator<Item = &Element<E>> {
self.elements.iter().map(|prod_elem| &prod_elem.element)
}
pub fn element_at(&self, index: usize) -> Option<&Element<E>> {
self.elements.get(index).map(|prod_elem| &prod_elem.element)
}
pub fn action_key(&self) -> &E::ActionKey {
&self.action_key
}
}
impl<E: ElementTypes> LayoutDisplay for Production<E> {
fn disp(&self) -> Layout {
let elements =
Layout::wrap(self.elements.iter().map(|x| x.disp()).collect::<Vec<_>>());
Layout::juxtapose(&[
elements,
Layout::text(" => "),
Layout::text(format!("{:?}", self.action_key)),
])
}
}
#[derive_unbounded(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct ProdKey<E: ElementTypes> {
head: E::NonTerm,
action_key: E::ActionKey,
}
#[derive_unbounded(Clone, Debug)]
struct Rule<E: ElementTypes> {
head: E::NonTerm,
prods: Vec<Production<E>>,
}
impl<E: ElementTypes> Rule<E> {
pub fn new(head: E::NonTerm, prods: Vec<Production<E>>) -> Self {
Rule { head, prods }
}
pub fn head(&self) -> &E::NonTerm {
&self.head
}
pub fn prods(&self) -> &Vec<Production<E>> {
&self.prods
}
}
impl<E: ElementTypes> LayoutDisplay for Rule<E> {
fn disp(&self) -> Layout {
let prod_layouts: Vec<_> =
self.prods.iter().map(|prod| prod.disp()).collect();
Layout::stack(prod_layouts)
}
}
/// A context-free language grammar.
///
/// This is a context-free grammar consisting of
///
/// - A start nonterminal
/// - A set of rules, each which consist of
/// - A head nonterminal
/// - A set of productions, where each production consist of
/// - A list of production elements, which may have an identifier,
/// and is either a terminal or nonterminal (an element).
/// - An action key (identifier unique to that production)
/// - An action value (Data associated with that action key)
///
/// Grammars are read-only, and the accessors use the lifetime of the
/// grammar object.
#[derive_unbounded(Clone)]
pub struct Grammar<E: ElementTypes> {
start_symbol: E::NonTerm,
rule_set: BTreeMap<E::NonTerm, Rule<E>>,
action_map: BTreeMap<E::ActionKey, E::ActionValue>,
}
impl<E: ElementTypes> std::fmt::Debug for Grammar<E> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut dbg_struct = f.debug_struct("Grammar");
dbg_struct.field("Terms", &self.get_terminals().collect::<Vec<_>>());
unimplemented!()
}
}
impl<E: ElementTypes> Grammar<E> {
fn new(
start: E::NonTerm,
rule_set: impl IntoIterator<Item = Rule<E>>,
action_map: BTreeMap<E::ActionKey, E::ActionValue>,
) -> Result<Self, GrammarErrors<E>> {
let g = Grammar {
start_symbol: start,
rule_set: rule_set
.into_iter()
.map(|r| (r.head().clone(), r))
.collect(),
action_map,
};
g.check_grammar().map(|_| g)
}
/// Returns the start nonterminal for this grammar.
pub fn start_nt(&self) -> &E::NonTerm {
&self.start_symbol
}
/// Returns an iterator over all of the rules for this grammar.
pub fn rules<'a>(&'a self) -> impl Iterator<Item = RuleRef<'a, E>> {
self.rule_set.iter().map(move |(_, rule)| RuleRef {
grammar: ParentRef(self),
rule: RefCompare(rule),
})
}
/// Returns a map over rules of the grammar, keyed by the rule's head nonterminal.
pub fn rule_set<'a>(&'a self) -> BTreeMap<&'a E::NonTerm, RuleRef<'a, E>> {
self
.rule_set
.iter()
.map(|(k, rule)| {
(
k,
RuleRef {
grammar: ParentRef(self),
rule: RefCompare(rule),
},
)
})
.collect()
}
/// Gets the rule that has the given nonterminal as a head.
pub fn get_rule<'a>(&'a self, nt: &E::NonTerm) -> Option<RuleRef<'a, E>> {
self.rule_set.get(nt).map(|rule| RuleRef {
grammar: ParentRef(self),
rule: RefCompare(rule),
})
}
/// Gets an iterator over all productions in the grammar.
pub fn prods<'a>(&'a self) -> impl Iterator<Item = ProdRef<'a, E>> {
self.rules().flat_map(|rule| rule.prods())
}
fn get_elements(&self) -> impl Iterator<Item = &Element<E>> {
self
.rule_set
.values()
.flat_map(|r| &r.prods)
.flat_map(|p| p.elements_iter())
}
fn get_terminals(&self) -> impl Iterator<Item = &E::Term> {
self.get_elements().filter_map(|e| e.as_term())
}
fn get_nonterminals(&self) -> impl Iterator<Item = &E::NonTerm> {
self.get_elements().filter_map(|e| e.as_nonterm())
}
fn nonterminals_without_rules(&self) -> BTreeSet<&E::NonTerm> {
self
.get_nonterminals()
.filter(move |nt| !self.rule_set.contains_key(nt))
.collect()
}
fn rules_without_prods<'a>(&'a self) -> BTreeSet<&'a E::NonTerm> {
let rules = self.rules();
let prodless_rules = rules.into_iter().filter(|r| r.prods().is_empty());
let head_iter = prodless_rules.map(|r| r.head());
head_iter.collect()
}
fn reachable_nonterms(&self) -> BTreeSet<&E::NonTerm> {
breadth_first_search(std::iter::once(&self.start_symbol), |nt| {
match self.get_rule(nt) {
Some(rule) => rule
.prods()
.iter()
.flat_map(|p| p.elements_iter())
.filter_map(|e| e.as_nonterm())
.collect(),
None => BTreeSet::new(),
}
})
}
fn unreachable_nonterms(&self) -> BTreeSet<&E::NonTerm> {
let reachable_nonterms = self.reachable_nonterms();
self
.get_nonterminals()
.filter(|nt| !reachable_nonterms.contains(nt))
.collect()
}
}
#[derive_unbounded(Clone, Debug)]
pub struct GrammarErrors<E: ElementTypes> {
unreachable_nonterms: BTreeSet<E::NonTerm>,
nonterms_without_rules: BTreeSet<E::NonTerm>,
rules_without_prods: BTreeSet<E::NonTerm>,
}
impl<E: ElementTypes> GrammarErrors<E> {
fn into_result(self) -> Result<(), Self> {
if self.unreachable_nonterms.is_empty()
&& self.nonterms_without_rules.is_empty()
&& self.rules_without_prods.is_empty()
{
Ok(())
} else {
Err(self)
}
}
}
impl<E: ElementTypes> Grammar<E> {
fn check_grammar(&self) -> Result<(), GrammarErrors<E>> {
GrammarErrors {
unreachable_nonterms: self
.unreachable_nonterms()
.into_iter()
.cloned()
.collect(),
nonterms_without_rules: self
.nonterminals_without_rules()
.into_iter()
.cloned()
.collect(),
rules_without_prods: self
.rules_without_prods()
.into_iter()
.cloned()
.collect(),
}
.into_result()
}
}
impl<E: ElementTypes> LayoutDisplay for Grammar<E> {
fn disp(&self) -> Layout {
let mut stack = Vec::new();
for (k, v) in &self.rule_set {
let name_layout = if &self.start_symbol == k {
Layout::juxtapose(&[Layout::text("*"), k.disp()])
} else {
k.disp()
};
stack.push(Layout::juxtapose(&[name_layout, Layout::text(":")]));
stack.push(Layout::juxtapose(&[Layout::text(" "), v.disp()]));
}
Layout::stack(stack)
}
}
// ------------
/// A simple deref wrapper that ensures that two references _must_ be the same during
/// comparison. This ensures that we can't accidentally incorporate refs from different parents together.
#[derive(Debug)]
struct ParentRef<'a, T>(&'a T);
impl<'a, T> ParentRef<'a, T> {
fn new(r: &'a T) -> Self {
ParentRef(r)
}
}
impl<T> cmp::PartialEq for ParentRef<'_, T> {
fn eq(&self, other: &Self) -> bool {
assert!(ref_eq(self.0, other.0));
true
}
}
impl<T> cmp::Eq for ParentRef<'_, T> {}
impl<T> cmp::PartialOrd for ParentRef<'_, T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T> cmp::Ord for ParentRef<'_, T> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
assert!(ref_eq(self.0, other.0));
cmp::Ordering::Equal
}
}
impl<'a, T> ops::Deref for ParentRef<'a, T> {
type Target = &'a T;
fn deref(&self) -> &&'a T {
&self.0
}
}
impl<T> Clone for ParentRef<'_, T> {
fn clone(&self) -> Self {
ParentRef(self.0)
}
}
impl<T> Copy for ParentRef<'_, T> {}
// ------------
#[derive(Clone, Copy, Debug)]
struct NoCompare<T>(T);
impl<T> cmp::PartialEq for NoCompare<T> {
fn eq(&self, _: &Self) -> bool {
true
}
}
impl<T> cmp::Eq for NoCompare<T> {}
impl<T> cmp::PartialOrd for NoCompare<T> {
fn partial_cmp(&self, _: &Self) -> Option<cmp::Ordering> {
Some(cmp::Ordering::Equal)
}
}
impl<T> cmp::Ord for NoCompare<T> {
fn cmp(&self, _: &Self) -> cmp::Ordering {
cmp::Ordering::Equal
}
}
impl<T> ops::Deref for NoCompare<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
// ------------
#[derive(Debug)]
struct RefCompare<'a, T>(&'a T);
impl<T> cmp::PartialEq for RefCompare<'_, T> {
fn eq(&self, other: &Self) -> bool {
ref_eq(self.0, other.0)
}
}
impl<T> cmp::Eq for RefCompare<'_, T> {}
impl<T> cmp::PartialOrd for RefCompare<'_, T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T> cmp::Ord for RefCompare<'_, T> {
fn cmp(&self, other: &Self) -> cmp::Ordering {
ref_cmp(self.0, other.0)
}
}
impl<'a, T> ops::Deref for RefCompare<'a, T> {
type Target = &'a T;
fn deref(&self) -> &&'a T {
&self.0
}
}
impl<T> Clone for RefCompare<'_, T> {
fn clone(&self) -> Self {
RefCompare(self.0)
}
}
impl<T> Copy for RefCompare<'_, T> {}
// ------------
#[derive_unbounded(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct RuleRef<'a, E: ElementTypes> {
grammar: ParentRef<'a, Grammar<E>>,
rule: RefCompare<'a, Rule<E>>,
}
impl<'a, E: ElementTypes> RuleRef<'a, E> {
pub fn head(&self) -> &'a E::NonTerm {
&self.rule.head
}
pub fn prods(&self) -> Vec<ProdRef<'a, E>> {
self
.rule
.prods
.iter()
.map(|prod| ProdRef {
grammar: self.grammar,
head: &self.rule.head,
prod: RefCompare(prod),
action_value: NoCompare(
self.grammar.action_map.get(&prod.action_key).unwrap(),
),
})
.collect()
}
}
// ------------
#[derive_unbounded(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct ProdRef<'a, E: ElementTypes> {
grammar: ParentRef<'a, Grammar<E>>,
head: &'a E::NonTerm,
prod: RefCompare<'a, Production<E>>,
action_value: NoCompare<&'a E::ActionValue>,
}
impl<'a, E: ElementTypes> ProdRef<'a, E> {
pub fn head(&self) -> &'a E::NonTerm {
self.head
}
pub fn prod_elements(&self) -> &'a Vec<ProductionElement<E>> {
&self.prod.elements
}
pub fn elements_iter(&self) -> impl Iterator<Item = &'a Element<E>> {
self.prod.elements_iter()
}
pub fn prod_element_at(
&self,
index: usize,
) -> Option<&'a ProductionElement<E>> {
self.prod.elements.get(index)
}
pub fn element_at(&self, index: usize) -> Option<&'a Element<E>> {
self.prod.element_at(index)
}
pub fn action_key(&self) -> &'a E::ActionKey {
self.prod.action_key()
}
pub fn action_value(&self) -> &'a E::ActionValue {
*self.action_value
}
pub fn prod_key(&self) -> ProdKey<E> {
ProdKey {
head: self.head().clone(),
action_key: self.action_key().clone(),
}
}
}
|
// ignore-tidy-filelength
//! Slice management and manipulation.
//!
//! For more details see [`std::slice`].
//!
//! [`std::slice`]: ../../std/slice/index.html
#![stable(feature = "rust1", since = "1.0.0")]
use crate::cmp::Ordering::{self, Greater, Less};
use crate::marker::Copy;
use crate::mem;
use crate::num::NonZeroUsize;
use crate::ops::{FnMut, Range, RangeBounds};
use crate::option::Option;
use crate::option::Option::{None, Some};
use crate::ptr;
use crate::result::Result;
use crate::result::Result::{Err, Ok};
use crate::slice;
#[unstable(
feature = "slice_internals",
issue = "none",
reason = "exposed from core to be reused in std; use the memchr crate"
)]
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;
mod ascii;
mod cmp;
mod index;
mod iter;
mod raw;
mod rotate;
mod sort;
mod specialize;
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Chunks, ChunksMut, Windows};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Iter, IterMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{RSplitN, RSplitNMut, Split, SplitMut, SplitN, SplitNMut};
#[stable(feature = "slice_rsplit", since = "1.27.0")]
pub use iter::{RSplit, RSplitMut};
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub use iter::{ChunksExact, ChunksExactMut};
#[stable(feature = "rchunks", since = "1.31.0")]
pub use iter::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
#[unstable(feature = "array_chunks", issue = "74985")]
pub use iter::{ArrayChunks, ArrayChunksMut};
#[unstable(feature = "array_windows", issue = "75027")]
pub use iter::ArrayWindows;
#[unstable(feature = "slice_group_by", issue = "80552")]
pub use iter::{GroupBy, GroupByMut};
#[stable(feature = "split_inclusive", since = "1.51.0")]
pub use iter::{SplitInclusive, SplitInclusiveMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use raw::{from_raw_parts, from_raw_parts_mut};
#[stable(feature = "from_ref", since = "1.28.0")]
pub use raw::{from_mut, from_ref};
// This function is public only because there is no other way to unit test heapsort.
#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
pub use sort::heapsort;
#[stable(feature = "slice_get_slice", since = "1.28.0")]
pub use index::SliceIndex;
#[unstable(feature = "slice_range", issue = "76393")]
pub use index::range;
#[unstable(feature = "inherent_ascii_escape", issue = "77174")]
pub use ascii::EscapeAscii;
#[lang = "slice"]
#[cfg(not(test))]
impl<T> [T] {
/// Returns the number of elements in the slice.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert_eq!(a.len(), 3);
/// ```
#[lang = "slice_len_fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_len", since = "1.39.0")]
#[inline]
// SAFETY: const sound because we transmute out the length field as a usize (which it must be)
#[cfg_attr(bootstrap, rustc_allow_const_fn_unstable(const_fn_union))]
pub const fn len(&self) -> usize {
// FIXME: Replace with `crate::ptr::metadata(self)` when that is const-stable.
// As of this writing this causes a "Const-stable functions can only call other
// const-stable functions" error.
// SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T
// and PtrComponents<T> have the same memory layouts. Only std can make this
// guarantee.
unsafe { crate::ptr::PtrRepr { const_ptr: self }.components.metadata }
}
/// Returns `true` if the slice has a length of 0.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_is_empty", since = "1.39.0")]
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&10), v.first());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.first());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(first) = x.first_mut() {
/// *first = 5;
/// }
/// assert_eq!(x, &[5, 1, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first() {
/// assert_eq!(first, &0);
/// assert_eq!(elements, &[1, 2]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first_mut() {
/// *first = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[3, 4, 5]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last() {
/// assert_eq!(last, &2);
/// assert_eq!(elements, &[0, 1]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last_mut() {
/// *last = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[4, 5, 3]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&30), v.last());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.last());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a mutable pointer to the last item in the slice.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(last) = x.last_mut() {
/// *last = 10;
/// }
/// assert_eq!(x, &[0, 1, 10]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a reference to an element or subslice depending on the type of
/// index.
///
/// - If given a position, returns a reference to the element at that
/// position or `None` if out of bounds.
/// - If given a range, returns the subslice corresponding to that range,
/// or `None` if out of bounds.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&40), v.get(1));
/// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
/// assert_eq!(None, v.get(3));
/// assert_eq!(None, v.get(0..4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: SliceIndex<Self>,
{
index.get(self)
}
/// Returns a mutable reference to an element or subslice depending on the
/// type of index (see [`get`]) or `None` if the index is out of bounds.
///
/// [`get`]: slice::get
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(elem) = x.get_mut(1) {
/// *elem = 42;
/// }
/// assert_eq!(x, &[0, 42, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: SliceIndex<Self>,
{
index.get_mut(self)
}
/// Returns a reference to an element or subslice, without doing bounds
/// checking.
///
/// For a safe alternative see [`get`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get`]: slice::get
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
///
/// unsafe {
/// assert_eq!(x.get_unchecked(1), &2);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
/// Returns a mutable reference to an element or subslice, without doing
/// bounds checking.
///
/// For a safe alternative see [`get_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get_mut`]: slice::get_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
///
/// unsafe {
/// let elem = x.get_unchecked_mut(1);
/// *elem = 13;
/// }
/// assert_eq!(x, &[1, 13, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
/// Returns a raw pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// The caller must also ensure that the memory the pointer (non-transitively) points to
/// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
/// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let x_ptr = x.as_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
/// }
/// }
/// ```
///
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
#[inline]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
/// Returns an unsafe mutable pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// let x_ptr = x.as_mut_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// *x_ptr.add(i) += 2;
/// }
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
/// Returns the two raw pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_ptr`] for warnings on using these pointers. The end pointer
/// requires extra caution, as it does not point to a valid element in the
/// slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// It can also be useful to check if a pointer to an element refers to an
/// element of this slice:
///
/// ```
/// let a = [1, 2, 3];
/// let x = &a[1] as *const _;
/// let y = &5 as *const _;
///
/// assert!(a.as_ptr_range().contains(&x));
/// assert!(!a.as_ptr_range().contains(&y));
/// ```
///
/// [`as_ptr`]: slice::as_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_ptr_range(&self) -> Range<*const T> {
let start = self.as_ptr();
// SAFETY: The `add` here is safe, because:
//
// - Both pointers are part of the same object, as pointing directly
// past the object also counts.
//
// - The size of the slice is never larger than isize::MAX bytes, as
// noted here:
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
// - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
// - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
// (This doesn't seem normative yet, but the very same assumption is
// made in many places, including the Index implementation of slices.)
//
// - There is no wrapping around involved, as slices do not wrap past
// the end of the address space.
//
// See the documentation of pointer::add.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Returns the two unsafe mutable pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_mut_ptr`] for warnings on using these pointers. The end
/// pointer requires extra caution, as it does not point to a valid element
/// in the slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
let start = self.as_mut_ptr();
// SAFETY: See as_ptr_range() above for why `add` here is safe.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Swaps two elements in the slice.
///
/// # Arguments
///
/// * a - The index of the first element
/// * b - The index of the second element
///
/// # Panics
///
/// Panics if `a` or `b` are out of bounds.
///
/// # Examples
///
/// ```
/// let mut v = ["a", "b", "c", "d"];
/// v.swap(1, 3);
/// assert!(v == ["a", "d", "c", "b"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn swap(&mut self, a: usize, b: usize) {
// Can't take two mutable loans from one vector, so instead use raw pointers.
let pa = ptr::addr_of_mut!(self[a]);
let pb = ptr::addr_of_mut!(self[b]);
// SAFETY: `pa` and `pb` have been created from safe mutable references and refer
// to elements in the slice and therefore are guaranteed to be valid and aligned.
// Note that accessing the elements behind `a` and `b` is checked and will
// panic when out of bounds.
unsafe {
ptr::swap(pa, pb);
}
}
/// Reverses the order of elements in the slice, in place.
///
/// # Examples
///
/// ```
/// let mut v = [1, 2, 3];
/// v.reverse();
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn reverse(&mut self) {
let mut i: usize = 0;
let ln = self.len();
// For very small types, all the individual reads in the normal
// path perform poorly. We can do better, given efficient unaligned
// load/store, by loading a larger chunk and reversing a register.
// Ideally LLVM would do this for us, as it knows better than we do
// whether unaligned reads are efficient (since that changes between
// different ARM versions, for example) and what the best chunk size
// would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
// the loop, so we need to do this ourselves. (Hypothesis: reverse
// is troublesome because the sides can be aligned differently --
// will be, when the length is odd -- so there's no way of emitting
// pre- and postludes to use fully-aligned SIMD in the middle.)
let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
if fast_unaligned && mem::size_of::<T>() == 1 {
// Use the llvm.bswap intrinsic to reverse u8s in a usize
let chunk = mem::size_of::<usize>();
while i + chunk - 1 < ln / 2 {
// SAFETY: There are several things to check here:
//
// - Note that `chunk` is either 4 or 8 due to the cfg check
// above. So `chunk - 1` is positive.
// - Indexing with index `i` is fine as the loop check guarantees
// `i + chunk - 1 < ln / 2`
// <=> `i < ln / 2 - (chunk - 1) < ln / 2 < ln`.
// - Indexing with index `ln - i - chunk = ln - (i + chunk)` is fine:
// - `i + chunk > 0` is trivially true.
// - The loop check guarantees:
// `i + chunk - 1 < ln / 2`
// <=> `i + chunk ≤ ln / 2 ≤ ln`, thus subtraction does not underflow.
// - The `read_unaligned` and `write_unaligned` calls are fine:
// - `pa` points to index `i` where `i < ln / 2 - (chunk - 1)`
// (see above) and `pb` points to index `ln - i - chunk`, so
// both are at least `chunk`
// many bytes away from the end of `self`.
// - Any initialized memory is valid `usize`.
unsafe {
let ptr = self.as_mut_ptr();
let pa = ptr.add(i);
let pb = ptr.add(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut usize);
let vb = ptr::read_unaligned(pb as *mut usize);
ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
}
i += chunk;
}
}
if fast_unaligned && mem::size_of::<T>() == 2 {
// Use rotate-by-16 to reverse u16s in a u32
let chunk = mem::size_of::<u32>() / 2;
while i + chunk - 1 < ln / 2 {
// SAFETY: An unaligned u32 can be read from `i` if `i + 1 < ln`
// (and obviously `i < ln`), because each element is 2 bytes and
// we're reading 4.
//
// `i + chunk - 1 < ln / 2` # while condition
// `i + 2 - 1 < ln / 2`
// `i + 1 < ln / 2`
//
// Since it's less than the length divided by 2, then it must be
// in bounds.
//
// This also means that the condition `0 < i + chunk <= ln` is
// always respected, ensuring the `pb` pointer can be used
// safely.
unsafe {
let ptr = self.as_mut_ptr();
let pa = ptr.add(i);
let pb = ptr.add(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut u32);
let vb = ptr::read_unaligned(pb as *mut u32);
ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
}
i += chunk;
}
}
while i < ln / 2 {
// SAFETY: `i` is inferior to half the length of the slice so
// accessing `i` and `ln - i - 1` is safe (`i` starts at 0 and
// will not go further than `ln / 2 - 1`).
// The resulting pointers `pa` and `pb` are therefore valid and
// aligned, and can be read from and written to.
unsafe {
// Unsafe swap to avoid the bounds check in safe swap.
let ptr = self.as_mut_ptr();
let pa = ptr.add(i);
let pb = ptr.add(ln - i - 1);
ptr::swap(pa, pb);
}
i += 1;
}
}
/// Returns an iterator over the slice.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let mut iterator = x.iter();
///
/// assert_eq!(iterator.next(), Some(&1));
/// assert_eq!(iterator.next(), Some(&2));
/// assert_eq!(iterator.next(), Some(&4));
/// assert_eq!(iterator.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter(&self) -> Iter<'_, T> {
Iter::new(self)
}
/// Returns an iterator that allows modifying each value.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// for elem in x.iter_mut() {
/// *elem += 2;
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut::new(self)
}
/// Returns an iterator over all contiguous windows of length
/// `size`. The windows overlap. If the slice is shorter than
/// `size`, the iterator returns no values.
///
/// # Panics
///
/// Panics if `size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['r', 'u', 's', 't'];
/// let mut iter = slice.windows(2);
/// assert_eq!(iter.next().unwrap(), &['r', 'u']);
/// assert_eq!(iter.next().unwrap(), &['u', 's']);
/// assert_eq!(iter.next().unwrap(), &['s', 't']);
/// assert!(iter.next().is_none());
/// ```
///
/// If the slice is shorter than `size`:
///
/// ```
/// let slice = ['f', 'o', 'o'];
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<'_, T> {
let size = NonZeroUsize::new(size).expect("size is zero");
Windows::new(self, size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert_eq!(iter.next().unwrap(), &['m']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`chunks_exact`]: slice::chunks_exact
/// [`rchunks`]: slice::rchunks
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
assert_ne!(chunk_size, 0);
Chunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at
/// the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 3]);
/// ```
///
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
/// [`rchunks_mut`]: slice::rchunks_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks`]: slice::chunks
/// [`rchunks_exact`]: slice::rchunks_exact
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of
/// the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_mut`]: slice::chunks_mut
/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExactMut::new(self, chunk_size)
}
/// Splits the slice into a slice of `N`-element arrays,
/// assuming that there's no remainder.
///
/// # Safety
///
/// This may only be called when
/// - The slice splits exactly into `N`-element chunks (aka `self.len() % N == 0`).
/// - `N != 0`.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice: &[char] = &['l', 'o', 'r', 'e', 'm', '!'];
/// let chunks: &[[char; 1]] =
/// // SAFETY: 1-element chunks never have remainder
/// unsafe { slice.as_chunks_unchecked() };
/// assert_eq!(chunks, &[['l'], ['o'], ['r'], ['e'], ['m'], ['!']]);
/// let chunks: &[[char; 3]] =
/// // SAFETY: The slice length (6) is a multiple of 3
/// unsafe { slice.as_chunks_unchecked() };
/// assert_eq!(chunks, &[['l', 'o', 'r'], ['e', 'm', '!']]);
///
/// // These would be unsound:
/// // let chunks: &[[_; 5]] = slice.as_chunks_unchecked() // The slice length is not a multiple of 5
/// // let chunks: &[[_; 0]] = slice.as_chunks_unchecked() // Zero-length chunks are never allowed
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
debug_assert_ne!(N, 0);
debug_assert_eq!(self.len() % N, 0);
let new_len =
// SAFETY: Our precondition is exactly what's needed to call this
unsafe { crate::intrinsics::exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts(self.as_ptr().cast(), new_len) }
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the beginning of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let (chunks, remainder) = slice.as_chunks();
/// assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
/// assert_eq!(remainder, &['m']);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
(array_slice, remainder)
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the end of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let (remainder, chunks) = slice.as_rchunks();
/// assert_eq!(remainder, &['l']);
/// assert_eq!(chunks, &[['o', 'r'], ['e', 'm']]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
(remainder, array_slice)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are array references and do not overlap. If `N` does not divide the
/// length of the slice, then the last up to `N-1` elements will be omitted and can be
/// retrieved from the `remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.array_chunks();
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks_exact`]: slice::chunks_exact
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
assert_ne!(N, 0);
ArrayChunks::new(self)
}
/// Splits the slice into a slice of `N`-element arrays,
/// assuming that there's no remainder.
///
/// # Safety
///
/// This may only be called when
/// - The slice splits exactly into `N`-element chunks (aka `self.len() % N == 0`).
/// - `N != 0`.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice: &mut [char] = &mut ['l', 'o', 'r', 'e', 'm', '!'];
/// let chunks: &mut [[char; 1]] =
/// // SAFETY: 1-element chunks never have remainder
/// unsafe { slice.as_chunks_unchecked_mut() };
/// chunks[0] = ['L'];
/// assert_eq!(chunks, &[['L'], ['o'], ['r'], ['e'], ['m'], ['!']]);
/// let chunks: &mut [[char; 3]] =
/// // SAFETY: The slice length (6) is a multiple of 3
/// unsafe { slice.as_chunks_unchecked_mut() };
/// chunks[1] = ['a', 'x', '?'];
/// assert_eq!(slice, &['L', 'o', 'r', 'a', 'x', '?']);
///
/// // These would be unsound:
/// // let chunks: &[[_; 5]] = slice.as_chunks_unchecked_mut() // The slice length is not a multiple of 5
/// // let chunks: &[[_; 0]] = slice.as_chunks_unchecked_mut() // Zero-length chunks are never allowed
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub unsafe fn as_chunks_unchecked_mut<const N: usize>(&mut self) -> &mut [[T; N]] {
debug_assert_ne!(N, 0);
debug_assert_eq!(self.len() % N, 0);
let new_len =
// SAFETY: Our precondition is exactly what's needed to call this
unsafe { crate::intrinsics::exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts_mut(self.as_mut_ptr().cast(), new_len) }
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the beginning of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// let (chunks, remainder) = v.as_chunks_mut();
/// remainder[0] = 9;
/// for chunk in chunks {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 9]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at_mut(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked_mut() };
(array_slice, remainder)
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the end of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// let (remainder, chunks) = v.as_rchunks_mut();
/// remainder[0] = 9;
/// for chunk in chunks {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[9, 1, 1, 2, 2]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at_mut(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked_mut() };
(remainder, array_slice)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable array references and do not overlap. If `N` does not divide
/// the length of the slice, then the last up to `N-1` elements will be omitted and
/// can be retrieved from the `into_remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact_mut`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.array_chunks_mut() {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
assert_ne!(N, 0);
ArrayChunksMut::new(self)
}
/// Returns an iterator over overlapping windows of `N` elements of a slice,
/// starting at the beginning of the slice.
///
/// This is the const generic equivalent of [`windows`].
///
/// If `N` is greater than the size of the slice, it will return no windows.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_windows)]
/// let slice = [0, 1, 2, 3];
/// let mut iter = slice.array_windows();
/// assert_eq!(iter.next().unwrap(), &[0, 1]);
/// assert_eq!(iter.next().unwrap(), &[1, 2]);
/// assert_eq!(iter.next().unwrap(), &[2, 3]);
/// assert!(iter.next().is_none());
/// ```
///
/// [`windows`]: slice::windows
#[unstable(feature = "array_windows", issue = "75027")]
#[inline]
pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> {
assert_ne!(N, 0);
ArrayWindows::new(self)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert_eq!(iter.next().unwrap(), &['l']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`rchunks_exact`]: slice::rchunks_exact
/// [`chunks`]: slice::chunks
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
assert!(chunk_size != 0);
RChunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the
/// beginning of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[3, 2, 2, 1, 1]);
/// ```
///
/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
/// [`chunks_mut`]: slice::chunks_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
assert!(chunk_size != 0);
RChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// end of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['l']);
/// ```
///
/// [`chunks`]: slice::chunks
/// [`rchunks`]: slice::rchunks
/// [`chunks_exact`]: slice::chunks_exact
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
assert!(chunk_size != 0);
RChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[0, 2, 2, 1, 1]);
/// ```
///
/// [`chunks_mut`]: slice::chunks_mut
/// [`rchunks_mut`]: slice::rchunks_mut
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
assert!(chunk_size != 0);
RChunksExactMut::new(self, chunk_size)
}
/// Returns an iterator over the slice producing non-overlapping runs
/// of elements using the predicate to separate them.
///
/// The predicate is called on two elements following themselves,
/// it means the predicate is called on `slice[0]` and `slice[1]`
/// then on `slice[1]` and `slice[2]` and so on.
///
/// # Examples
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &[1, 1, 1, 3, 3, 2, 2, 2];
///
/// let mut iter = slice.group_by(|a, b| a == b);
///
/// assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
/// assert_eq!(iter.next(), Some(&[3, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
/// assert_eq!(iter.next(), None);
/// ```
///
/// This method can be used to extract the sorted subslices:
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &[1, 1, 2, 3, 2, 3, 2, 3, 4];
///
/// let mut iter = slice.group_by(|a, b| a <= b);
///
/// assert_eq!(iter.next(), Some(&[1, 1, 2, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 3, 4][..]));
/// assert_eq!(iter.next(), None);
/// ```
#[unstable(feature = "slice_group_by", issue = "80552")]
#[inline]
pub fn group_by<F>(&self, pred: F) -> GroupBy<'_, T, F>
where
F: FnMut(&T, &T) -> bool,
{
GroupBy::new(self, pred)
}
/// Returns an iterator over the slice producing non-overlapping mutable
/// runs of elements using the predicate to separate them.
///
/// The predicate is called on two elements following themselves,
/// it means the predicate is called on `slice[0]` and `slice[1]`
/// then on `slice[1]` and `slice[2]` and so on.
///
/// # Examples
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &mut [1, 1, 1, 3, 3, 2, 2, 2];
///
/// let mut iter = slice.group_by_mut(|a, b| a == b);
///
/// assert_eq!(iter.next(), Some(&mut [1, 1, 1][..]));
/// assert_eq!(iter.next(), Some(&mut [3, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 2, 2][..]));
/// assert_eq!(iter.next(), None);
/// ```
///
/// This method can be used to extract the sorted subslices:
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &mut [1, 1, 2, 3, 2, 3, 2, 3, 4];
///
/// let mut iter = slice.group_by_mut(|a, b| a <= b);
///
/// assert_eq!(iter.next(), Some(&mut [1, 1, 2, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 3, 4][..]));
/// assert_eq!(iter.next(), None);
/// ```
#[unstable(feature = "slice_group_by", issue = "80552")]
#[inline]
pub fn group_by_mut<F>(&mut self, pred: F) -> GroupByMut<'_, T, F>
where
F: FnMut(&T, &T) -> bool,
{
GroupByMut::new(self, pred)
}
/// Divides one slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 4, 5, 6];
///
/// {
/// let (left, right) = v.split_at(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_unchecked(mid) }
}
/// Divides one mutable slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let mut v = [1, 0, 3, 0, 5, 6];
/// let (left, right) = v.split_at_mut(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_mut_unchecked(mid) }
}
/// Divides one slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at`]: slice::split_at
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```compile_fail
/// #![feature(slice_split_at_unchecked)]
///
/// let v = [1, 2, 3, 4, 5, 6];
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at_mut`]: slice::split_at_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```compile_fail
/// #![feature(slice_split_at_unchecked)]
///
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// unsafe {
/// let (left, right) = v.split_at_mut_unchecked(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
// SAFETY: Caller has to check that `0 <= mid <= self.len()`.
//
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
unsafe { (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid)) }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the first element is matched, an empty slice will be the first item
/// returned by the iterator. Similarly, if the last element in the slice
/// is matched, an empty slice will be the last item returned by the
/// iterator:
///
/// ```
/// let slice = [10, 40, 33];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert!(iter.next().is_none());
/// ```
///
/// If two matched elements are directly adjacent, an empty slice will be
/// present between them:
///
/// ```
/// let slice = [10, 6, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
where
F: FnMut(&T) -> bool,
{
Split::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_mut(|num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is contained in the end of the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the last element of the slice is matched,
/// that element will be considered the terminator of the preceding slice.
/// That slice will be the last item returned by the iterator.
///
/// ```
/// let slice = [3, 10, 40, 33];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[3]);
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "split_inclusive", since = "1.51.0")]
#[inline]
pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusive::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is contained in the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
/// let terminator_idx = group.len()-1;
/// group[terminator_idx] = 1;
/// }
/// assert_eq!(v, [10, 40, 1, 20, 1, 1]);
/// ```
#[stable(feature = "split_inclusive", since = "1.51.0")]
#[inline]
pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusiveMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, starting at the end of the slice and working backwards.
/// The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [11, 22, 33, 0, 44, 55];
/// let mut iter = slice.rsplit(|num| *num == 0);
///
/// assert_eq!(iter.next().unwrap(), &[44, 55]);
/// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
/// assert_eq!(iter.next(), None);
/// ```
///
/// As with `split()`, if the first or last element is matched, an empty
/// slice will be the first (or last) item returned by the iterator.
///
/// ```
/// let v = &[0, 1, 1, 2, 3, 5, 8];
/// let mut it = v.rsplit(|n| *n % 2 == 0);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next().unwrap(), &[3, 5]);
/// assert_eq!(it.next().unwrap(), &[1, 1]);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next(), None);
/// ```
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplit::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`, starting at the end of the slice and working
/// backwards. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [100, 400, 300, 200, 600, 500];
///
/// let mut count = 0;
/// for group in v.rsplit_mut(|num| *num % 3 == 0) {
/// count += 1;
/// group[0] = count;
/// }
/// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
/// ```
///
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once by numbers divisible by 3 (i.e., `[10, 40]`,
/// `[20, 60, 50]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitN::new(self.split(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitNMut::new(self.split_mut(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once, starting from the end, by numbers divisible
/// by 3 (i.e., `[50]`, `[10, 40, 30, 20]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.rsplitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitN::new(self.rsplit(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut s = [10, 40, 30, 20, 60, 50];
///
/// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitNMut::new(self.rsplit_mut(pred), n)
}
/// Returns `true` if the slice contains an element with the given value.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.contains(&30));
/// assert!(!v.contains(&50));
/// ```
///
/// If you do not have a `&T`, but some other value that you can compare
/// with one (for example, `String` implements `PartialEq<str>`), you can
/// use `iter().any`:
///
/// ```
/// let v = [String::from("hello"), String::from("world")]; // slice of `String`
/// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
/// assert!(!v.iter().any(|e| e == "hi"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn contains(&self, x: &T) -> bool
where
T: PartialEq,
{
cmp::SliceContains::slice_contains(x, self)
}
/// Returns `true` if `needle` is a prefix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.starts_with(&[10]));
/// assert!(v.starts_with(&[10, 40]));
/// assert!(!v.starts_with(&[50]));
/// assert!(!v.starts_with(&[10, 50]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.starts_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.starts_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
/// Returns `true` if `needle` is a suffix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.ends_with(&[30]));
/// assert!(v.ends_with(&[40, 30]));
/// assert!(!v.ends_with(&[50]));
/// assert!(!v.ends_with(&[50, 30]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.ends_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.ends_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let (m, n) = (self.len(), needle.len());
m >= n && needle == &self[m - n..]
}
/// Returns a subslice with the prefix removed.
///
/// If the slice starts with `prefix`, returns the subslice after the prefix, wrapped in `Some`.
/// If `prefix` is empty, simply returns the original slice.
///
/// If the slice does not start with `prefix`, returns `None`.
///
/// # Examples
///
/// ```
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
/// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
/// assert_eq!(v.strip_prefix(&[50]), None);
/// assert_eq!(v.strip_prefix(&[10, 50]), None);
///
/// let prefix : &str = "he";
/// assert_eq!(b"hello".strip_prefix(prefix.as_bytes()),
/// Some(b"llo".as_ref()));
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[stable(feature = "slice_strip", since = "1.51.0")]
pub fn strip_prefix<P: SlicePattern<Item = T> + ?Sized>(&self, prefix: &P) -> Option<&[T]>
where
T: PartialEq,
{
// This function will need rewriting if and when SlicePattern becomes more sophisticated.
let prefix = prefix.as_slice();
let n = prefix.len();
if n <= self.len() {
let (head, tail) = self.split_at(n);
if head == prefix {
return Some(tail);
}
}
None
}
/// Returns a subslice with the suffix removed.
///
/// If the slice ends with `suffix`, returns the subslice before the suffix, wrapped in `Some`.
/// If `suffix` is empty, simply returns the original slice.
///
/// If the slice does not end with `suffix`, returns `None`.
///
/// # Examples
///
/// ```
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
/// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
/// assert_eq!(v.strip_suffix(&[50]), None);
/// assert_eq!(v.strip_suffix(&[50, 30]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[stable(feature = "slice_strip", since = "1.51.0")]
pub fn strip_suffix<P: SlicePattern<Item = T> + ?Sized>(&self, suffix: &P) -> Option<&[T]>
where
T: PartialEq,
{
// This function will need rewriting if and when SlicePattern becomes more sophisticated.
let suffix = suffix.as_slice();
let (len, n) = (self.len(), suffix.len());
if n <= len {
let (head, tail) = self.split_at(len - n);
if tail == suffix {
return Some(head);
}
}
None
}
/// Binary searches this sorted slice for a given element.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
///
/// [`binary_search_by`]: slice::binary_search_by
/// [`binary_search_by_key`]: slice::binary_search_by_key
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// assert_eq!(s.binary_search(&13), Ok(9));
/// assert_eq!(s.binary_search(&4), Err(7));
/// assert_eq!(s.binary_search(&100), Err(13));
/// let r = s.binary_search(&1);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
///
/// If you want to insert an item to a sorted vector, while maintaining
/// sort order:
///
/// ```
/// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
/// let num = 42;
/// let idx = s.binary_search(&num).unwrap_or_else(|x| x);
/// s.insert(idx, num);
/// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
where
T: Ord,
{
self.binary_search_by(|p| p.cmp(x))
}
/// Binary searches this sorted slice with a comparator function.
///
/// The comparator function should implement an order consistent
/// with the sort order of the underlying slice, returning an
/// order code that indicates whether its argument is `Less`,
/// `Equal` or `Greater` the desired target.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
///
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by_key`]: slice::binary_search_by_key
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// let seek = 13;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
/// let seek = 4;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
/// let seek = 100;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
/// let seek = 1;
/// let r = s.binary_search_by(|probe| probe.cmp(&seek));
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> Ordering,
{
let mut size = self.len();
let mut left = 0;
let mut right = size;
while left < right {
let mid = left + size / 2;
// SAFETY: the call is made safe by the following invariants:
// - `mid >= 0`
// - `mid < size`: `mid` is limited by `[left; right)` bound.
let cmp = f(unsafe { self.get_unchecked(mid) });
// The reason why we use if/else control flow rather than match
// is because match reorders comparison operations, which is perf sensitive.
// This is x86 asm for u8: https://rust.godbolt.org/z/8Y8Pra.
if cmp == Less {
left = mid + 1;
} else if cmp == Greater {
right = mid;
} else {
// SAFETY: same as the `get_unchecked` above
unsafe { crate::intrinsics::assume(mid < self.len()) };
return Ok(mid);
}
size = right - left;
}
Err(left)
}
/// Binary searches this sorted slice with a key extraction function.
///
/// Assumes that the slice is sorted by the key, for instance with
/// [`sort_by_key`] using the same key extraction function.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
///
/// [`sort_by_key`]: slice::sort_by_key
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by`]: slice::binary_search_by
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements in a slice of pairs sorted by
/// their second elements. The first is found, with a uniquely
/// determined position; the second and third are not found; the
/// fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
/// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
/// (1, 21), (2, 34), (4, 55)];
///
/// assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b), Ok(9));
/// assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b), Err(7));
/// assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13));
/// let r = s.binary_search_by_key(&1, |&(a, b)| b);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
// Lint rustdoc::broken_intra_doc_links is allowed as `slice::sort_by_key` is
// in crate `alloc`, and as such doesn't exists yet when building `core`.
// links to downstream crate: #74481. Since primitives are only documented in
// libstd (#73423), this never leads to broken links in practice.
#[allow(rustdoc::broken_intra_doc_links)]
#[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
#[inline]
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> B,
B: Ord,
{
self.binary_search_by(|k| f(k).cmp(b))
}
/// Sorts the slice, but might not preserve the order of equal elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort_unstable();
/// assert!(v == [-5, -3, 1, 2, 4]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable(&mut self)
where
T: Ord,
{
sort::quicksort(self, |a, b| a.lt(b));
}
/// Sorts the slice with a comparator function, but might not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
/// total order if it is (for all `a`, `b` and `c`):
///
/// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
/// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
///
/// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
/// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
///
/// ```
/// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
/// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
/// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
/// ```
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_unstable_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
/// v.sort_unstable_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by<F>(&mut self, mut compare: F)
where
F: FnMut(&T, &T) -> Ordering,
{
sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less);
}
/// Sorts the slice with a key extraction function, but might not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
/// *O*(*m*).
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
/// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
/// cases where the key function is expensive.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_unstable_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
where
F: FnMut(&T) -> K,
K: Ord,
{
sort::quicksort(self, |a, b| f(a).lt(&f(b)));
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use the select_nth_unstable() instead")]
#[inline]
pub fn partition_at_index(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
self.select_nth_unstable(index)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use select_nth_unstable_by() instead")]
#[inline]
pub fn partition_at_index_by<F>(
&mut self,
index: usize,
compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
self.select_nth_unstable_by(index, compare)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use the select_nth_unstable_by_key() instead")]
#[inline]
pub fn partition_at_index_by_key<K, F>(
&mut self,
index: usize,
f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
self.select_nth_unstable_by_key(index, f)
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index`. Additionally, this reordering is
/// unstable (i.e. any number of equal elements may end up at position `index`), in-place
/// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
/// element" in other libraries. It returns a triplet of the following values: all elements less
/// than the one at the given index, the value at the given index, and all elements greater than
/// the one at the given index.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median
/// v.select_nth_unstable(2);
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [-3, -5, 1, 2, 4] ||
/// v == [-5, -3, 1, 2, 4] ||
/// v == [-3, -5, 1, 4, 2] ||
/// v == [-5, -3, 1, 4, 2]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
let mut f = |a: &T, b: &T| a.lt(b);
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the comparator function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index,
/// and all elements greater than the one at the given index, using the provided comparator
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median as if the slice were sorted in descending order.
/// v.select_nth_unstable_by(2, |a, b| b.cmp(a));
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [2, 4, 1, -5, -3] ||
/// v == [2, 4, 1, -3, -5] ||
/// v == [4, 2, 1, -5, -3] ||
/// v == [4, 2, 1, -3, -5]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable_by<F>(
&mut self,
index: usize,
mut compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
let mut f = |a: &T, b: &T| compare(a, b) == Less;
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the key extraction function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index, and
/// all elements greater than the one at the given index, using the provided key extraction
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Return the median as if the array were sorted according to absolute value.
/// v.select_nth_unstable_by_key(2, |a| a.abs());
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [1, 2, -3, 4, -5] ||
/// v == [1, 2, -3, -5, 4] ||
/// v == [2, 1, -3, 4, -5] ||
/// v == [2, 1, -3, -5, 4]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable_by_key<K, F>(
&mut self,
index: usize,
mut f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
let mut g = |a: &T, b: &T| f(a).lt(&f(b));
sort::partition_at_index(self, index, &mut g)
}
/// Moves all consecutive repeated elements to the end of the slice according to the
/// [`PartialEq`] trait implementation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
///
/// let (dedup, duplicates) = slice.partition_dedup();
///
/// assert_eq!(dedup, [1, 2, 3, 2, 1]);
/// assert_eq!(duplicates, [2, 3, 1]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
where
T: PartialEq,
{
self.partition_dedup_by(|a, b| a == b)
}
/// Moves all but the first of consecutive elements to the end of the slice satisfying
/// a given equality relation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// The `same_bucket` function is passed references to two elements from the slice and
/// must determine if the elements compare equal. The elements are passed in opposite order
/// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is moved
/// at the end of the slice.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
///
/// let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
///
/// assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
/// assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by<F>(&mut self, mut same_bucket: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T, &mut T) -> bool,
{
// Although we have a mutable reference to `self`, we cannot make
// *arbitrary* changes. The `same_bucket` calls could panic, so we
// must ensure that the slice is in a valid state at all times.
//
// The way that we handle this is by using swaps; we iterate
// over all the elements, swapping as we go so that at the end
// the elements we wish to keep are in the front, and those we
// wish to reject are at the back. We can then split the slice.
// This operation is still `O(n)`.
//
// Example: We start in this state, where `r` represents "next
// read" and `w` represents "next_write`.
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate, so
// we swap self[r] and self[w] (no effect as r==w) and then increment both
// r and w, leaving us with:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this value is a duplicate,
// so we increment `r` but leave everything else unchanged:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate,
// so swap self[r] and self[w] and advance r and w:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 1 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Not a duplicate, repeat:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 3 | 1 | 3 |
// +---+---+---+---+---+---+
// w
//
// Duplicate, advance r. End of slice. Split at w.
let len = self.len();
if len <= 1 {
return (self, &mut []);
}
let ptr = self.as_mut_ptr();
let mut next_read: usize = 1;
let mut next_write: usize = 1;
// SAFETY: the `while` condition guarantees `next_read` and `next_write`
// are less than `len`, thus are inside `self`. `prev_ptr_write` points to
// one element before `ptr_write`, but `next_write` starts at 1, so
// `prev_ptr_write` is never less than 0 and is inside the slice.
// This fulfils the requirements for dereferencing `ptr_read`, `prev_ptr_write`
// and `ptr_write`, and for using `ptr.add(next_read)`, `ptr.add(next_write - 1)`
// and `prev_ptr_write.offset(1)`.
//
// `next_write` is also incremented at most once per loop at most meaning
// no element is skipped when it may need to be swapped.
//
// `ptr_read` and `prev_ptr_write` never point to the same element. This
// is required for `&mut *ptr_read`, `&mut *prev_ptr_write` to be safe.
// The explanation is simply that `next_read >= next_write` is always true,
// thus `next_read > next_write - 1` is too.
unsafe {
// Avoid bounds checks by using raw pointers.
while next_read < len {
let ptr_read = ptr.add(next_read);
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
let ptr_write = prev_ptr_write.offset(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
}
next_read += 1;
}
}
self.split_at_mut(next_write)
}
/// Moves all but the first of consecutive elements to the end of the slice that resolve
/// to the same key.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
///
/// let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
///
/// assert_eq!(dedup, [10, 20, 30, 20, 11]);
/// assert_eq!(duplicates, [21, 30, 13]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by_key<K, F>(&mut self, mut key: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T) -> K,
K: PartialEq,
{
self.partition_dedup_by(|a, b| key(a) == key(b))
}
/// Rotates the slice in-place such that the first `mid` elements of the
/// slice move to the end while the last `self.len() - mid` elements move to
/// the front. After calling `rotate_left`, the element previously at index
/// `mid` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `mid` is greater than the length of the
/// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_left(2);
/// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
/// ```
///
/// Rotating a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_left(1);
/// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Rotates the slice in-place such that the first `self.len() - k`
/// elements of the slice move to the end while the last `k` elements move
/// to the front. After calling `rotate_right`, the element previously at
/// index `self.len() - k` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `k` is greater than the length of the
/// slice. Note that `k == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_right(2);
/// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
/// ```
///
/// Rotate a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_right(1);
/// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Fills `self` with elements by cloning `value`.
///
/// # Examples
///
/// ```
/// let mut buf = vec![0; 10];
/// buf.fill(1);
/// assert_eq!(buf, vec![1; 10]);
/// ```
#[doc(alias = "memset")]
#[stable(feature = "slice_fill", since = "1.50.0")]
pub fn fill(&mut self, value: T)
where
T: Clone,
{
specialize::SpecFill::spec_fill(self, value);
}
/// Fills `self` with elements returned by calling a closure repeatedly.
///
/// This method uses a closure to create new values. If you'd rather
/// [`Clone`] a given value, use [`fill`]. If you want to use the [`Default`]
/// trait to generate values, you can pass [`Default::default`] as the
/// argument.
///
/// [`fill`]: slice::fill
///
/// # Examples
///
/// ```
/// let mut buf = vec![1; 10];
/// buf.fill_with(Default::default);
/// assert_eq!(buf, vec![0; 10]);
/// ```
#[doc(alias = "memset")]
#[stable(feature = "slice_fill_with", since = "1.51.0")]
pub fn fill_with<F>(&mut self, mut f: F)
where
F: FnMut() -> T,
{
for el in self {
*el = f();
}
}
/// Copies the elements from `src` into `self`.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` implements `Copy`, it can be more performant to use
/// [`copy_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Cloning two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.clone_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `clone_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.clone_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`copy_from_slice`]: slice::copy_from_slice
/// [`split_at_mut`]: slice::split_at_mut
#[stable(feature = "clone_from_slice", since = "1.7.0")]
pub fn clone_from_slice(&mut self, src: &[T])
where
T: Clone,
{
self.spec_clone_from(src);
}
/// Copies all elements from `src` into `self`, using a memcpy.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` does not implement `Copy`, use [`clone_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Copying two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.copy_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `copy_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.copy_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`clone_from_slice`]: slice::clone_from_slice
/// [`split_at_mut`]: slice::split_at_mut
#[doc(alias = "memcpy")]
#[stable(feature = "copy_from_slice", since = "1.9.0")]
pub fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
// The panic code path was put into a cold function to not bloat the
// call site.
#[inline(never)]
#[cold]
#[track_caller]
fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
panic!(
"source slice length ({}) does not match destination slice length ({})",
src_len, dst_len,
);
}
if self.len() != src.len() {
len_mismatch_fail(self.len(), src.len());
}
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
}
}
/// Copies elements from one part of the slice to another part of itself,
/// using a memmove.
///
/// `src` is the range within `self` to copy from. `dest` is the starting
/// index of the range within `self` to copy to, which will have the same
/// length as `src`. The two ranges may overlap. The ends of the two ranges
/// must be less than or equal to `self.len()`.
///
/// # Panics
///
/// This function will panic if either range exceeds the end of the slice,
/// or if the end of `src` is before the start.
///
/// # Examples
///
/// Copying four bytes within a slice:
///
/// ```
/// let mut bytes = *b"Hello, World!";
///
/// bytes.copy_within(1..5, 8);
///
/// assert_eq!(&bytes, b"Hello, Wello!");
/// ```
#[stable(feature = "copy_within", since = "1.37.0")]
#[track_caller]
pub fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dest: usize)
where
T: Copy,
{
let Range { start: src_start, end: src_end } = slice::range(src, ..self.len());
let count = src_end - src_start;
assert!(dest <= self.len() - count, "dest is out of bounds");
// SAFETY: the conditions for `ptr::copy` have all been checked above,
// as have those for `ptr::add`.
unsafe {
// Derive both `src_ptr` and `dest_ptr` from the same loan
let ptr = self.as_mut_ptr();
let src_ptr = ptr.add(src_start);
let dest_ptr = ptr.add(dest);
ptr::copy(src_ptr, dest_ptr, count);
}
}
/// Swaps all elements in `self` with those in `other`.
///
/// The length of `other` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Example
///
/// Swapping two elements across slices:
///
/// ```
/// let mut slice1 = [0, 0];
/// let mut slice2 = [1, 2, 3, 4];
///
/// slice1.swap_with_slice(&mut slice2[2..]);
///
/// assert_eq!(slice1, [3, 4]);
/// assert_eq!(slice2, [1, 2, 0, 0]);
/// ```
///
/// Rust enforces that there can only be one mutable reference to a
/// particular piece of data in a particular scope. Because of this,
/// attempting to use `swap_with_slice` on a single slice will result in
/// a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
/// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// mutable sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.swap_with_slice(&mut right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 1, 2]);
/// ```
///
/// [`split_at_mut`]: slice::split_at_mut
#[stable(feature = "swap_with_slice", since = "1.27.0")]
pub fn swap_with_slice(&mut self, other: &mut [T]) {
assert!(self.len() == other.len(), "destination and source slices have different lengths");
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::swap_nonoverlapping(self.as_mut_ptr(), other.as_mut_ptr(), self.len());
}
}
/// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
#[inline]
fn gcd(a: usize, b: usize) -> usize {
use crate::intrinsics;
// iterative stein’s algorithm
// We should still make this `const fn` (and revert to recursive algorithm if we do)
// because relying on llvm to consteval all this is… well, it makes me uncomfortable.
// SAFETY: `a` and `b` are checked to be non-zero values.
let (ctz_a, mut ctz_b) = unsafe {
if a == 0 {
return b;
}
if b == 0 {
return a;
}
(intrinsics::cttz_nonzero(a), intrinsics::cttz_nonzero(b))
};
let k = ctz_a.min(ctz_b);
let mut a = a >> ctz_a;
let mut b = b;
loop {
// remove all factors of 2 from b
b >>= ctz_b;
if a > b {
mem::swap(&mut a, &mut b);
}
b = b - a;
// SAFETY: `b` is checked to be non-zero.
unsafe {
if b == 0 {
break;
}
ctz_b = intrinsics::cttz_nonzero(b);
}
}
a << k
}
let gcd: usize = gcd(mem::size_of::<T>(), mem::size_of::<U>());
let ts: usize = mem::size_of::<U>() / gcd;
let us: usize = mem::size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
(us_len, ts_len)
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: See the `align_to_mut` method for the detailed safety comment.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
(
left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
)
}
}
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: Here we are ensuring we will use aligned pointers for U for the
// rest of the method. This is done by passing a pointer to &[T] with an
// alignment targeted for U.
// `crate::ptr::align_offset` is called with a correctly aligned and
// valid pointer `ptr` (it comes from a reference to `self`) and with
// a size that is a power of two (since it comes from the alignement for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
(
left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
)
}
}
}
/// Checks if the elements of this slice are sorted.
///
/// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
/// slice yields exactly zero or one element, `true` is returned.
///
/// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
/// implies that this function returns `false` if any two consecutive items are not
/// comparable.
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
/// let empty: [i32; 0] = [];
///
/// assert!([1, 2, 2, 9].is_sorted());
/// assert!(![1, 3, 2, 4].is_sorted());
/// assert!([0].is_sorted());
/// assert!(empty.is_sorted());
/// assert!(![0.0, 1.0, f32::NAN].is_sorted());
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted(&self) -> bool
where
T: PartialOrd,
{
self.is_sorted_by(|a, b| a.partial_cmp(b))
}
/// Checks if the elements of this slice are sorted using the given comparator function.
///
/// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
/// function to determine the ordering of two elements. Apart from that, it's equivalent to
/// [`is_sorted`]; see its documentation for more information.
///
/// [`is_sorted`]: slice::is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
where
F: FnMut(&T, &T) -> Option<Ordering>,
{
self.iter().is_sorted_by(|a, b| compare(*a, *b))
}
/// Checks if the elements of this slice are sorted using the given key extraction function.
///
/// Instead of comparing the slice's elements directly, this function compares the keys of the
/// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
/// documentation for more information.
///
/// [`is_sorted`]: slice::is_sorted
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
///
/// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
/// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
where
F: FnMut(&T) -> K,
K: PartialOrd,
{
self.iter().is_sorted_by_key(f)
}
/// Returns the index of the partition point according to the given predicate
/// (the index of the first element of the second partition).
///
/// The slice is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the slice
/// and all elements for which the predicate returns false are at the end.
/// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
/// (all odd numbers are at the start, all even at the end).
///
/// If this slice is not partitioned, the returned result is unspecified and meaningless,
/// as this method performs a kind of binary search.
///
/// See also [`binary_search`], [`binary_search_by`], and [`binary_search_by_key`].
///
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by`]: slice::binary_search_by
/// [`binary_search_by_key`]: slice::binary_search_by_key
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 3, 5, 6, 7];
/// let i = v.partition_point(|&x| x < 5);
///
/// assert_eq!(i, 4);
/// assert!(v[..i].iter().all(|&x| x < 5));
/// assert!(v[i..].iter().all(|&x| !(x < 5)));
/// ```
#[stable(feature = "partition_point", since = "1.52.0")]
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
{
self.binary_search_by(|x| if pred(x) { Less } else { Greater }).unwrap_or_else(|i| i)
}
}
trait CloneFromSpec<T> {
fn spec_clone_from(&mut self, src: &[T]);
}
impl<T> CloneFromSpec<T> for [T]
where
T: Clone,
{
default fn spec_clone_from(&mut self, src: &[T]) {
assert!(self.len() == src.len(), "destination and source slices have different lengths");
// NOTE: We need to explicitly slice them to the same length
// to make it easier for the optimizer to elide bounds checking.
// But since it can't be relied on we also have an explicit specialization for T: Copy.
let len = self.len();
let src = &src[..len];
for i in 0..len {
self[i].clone_from(&src[i]);
}
}
}
impl<T> CloneFromSpec<T> for [T]
where
T: Copy,
{
fn spec_clone_from(&mut self, src: &[T]) {
self.copy_from_slice(src);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for &[T] {
/// Creates an empty slice.
fn default() -> Self {
&[]
}
}
#[stable(feature = "mut_slice_default", since = "1.5.0")]
impl<T> Default for &mut [T] {
/// Creates a mutable empty slice.
fn default() -> Self {
&mut []
}
}
#[unstable(feature = "slice_pattern", reason = "stopgap trait for slice patterns", issue = "56345")]
/// Patterns in slices - currently, only used by `strip_prefix` and `strip_suffix`. At a future
/// point, we hope to generalise `core::str::Pattern` (which at the time of writing is limited to
/// `str`) to slices, and then this trait will be replaced or abolished.
pub trait SlicePattern {
/// The element type of the slice being matched on.
type Item;
/// Currently, the consumers of `SlicePattern` need a slice.
fn as_slice(&self) -> &[Self::Item];
}
#[stable(feature = "slice_strip", since = "1.51.0")]
impl<T> SlicePattern for [T] {
type Item = T;
#[inline]
fn as_slice(&self) -> &[Self::Item] {
self
}
}
#[stable(feature = "slice_strip", since = "1.51.0")]
impl<T, const N: usize> SlicePattern for [T; N] {
type Item = T;
#[inline]
fn as_slice(&self) -> &[Self::Item] {
self
}
}
Rollup merge of #86593 - jhpratt:stabilize-const_slice_first_last, r=m-ou-se
Partially stabilize `const_slice_first_last`
This stabilizes the non-`mut` methods of `const_slice_first_last` as `const`. These methods are trivial to implement and have no blockers that I am aware of.
`@rustbot` label +A-const-fn +S-waiting-on-review +T-libs-api
// ignore-tidy-filelength
//! Slice management and manipulation.
//!
//! For more details see [`std::slice`].
//!
//! [`std::slice`]: ../../std/slice/index.html
#![stable(feature = "rust1", since = "1.0.0")]
use crate::cmp::Ordering::{self, Greater, Less};
use crate::marker::Copy;
use crate::mem;
use crate::num::NonZeroUsize;
use crate::ops::{FnMut, Range, RangeBounds};
use crate::option::Option;
use crate::option::Option::{None, Some};
use crate::ptr;
use crate::result::Result;
use crate::result::Result::{Err, Ok};
use crate::slice;
#[unstable(
feature = "slice_internals",
issue = "none",
reason = "exposed from core to be reused in std; use the memchr crate"
)]
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;
mod ascii;
mod cmp;
mod index;
mod iter;
mod raw;
mod rotate;
mod sort;
mod specialize;
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Chunks, ChunksMut, Windows};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Iter, IterMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{RSplitN, RSplitNMut, Split, SplitMut, SplitN, SplitNMut};
#[stable(feature = "slice_rsplit", since = "1.27.0")]
pub use iter::{RSplit, RSplitMut};
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub use iter::{ChunksExact, ChunksExactMut};
#[stable(feature = "rchunks", since = "1.31.0")]
pub use iter::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
#[unstable(feature = "array_chunks", issue = "74985")]
pub use iter::{ArrayChunks, ArrayChunksMut};
#[unstable(feature = "array_windows", issue = "75027")]
pub use iter::ArrayWindows;
#[unstable(feature = "slice_group_by", issue = "80552")]
pub use iter::{GroupBy, GroupByMut};
#[stable(feature = "split_inclusive", since = "1.51.0")]
pub use iter::{SplitInclusive, SplitInclusiveMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use raw::{from_raw_parts, from_raw_parts_mut};
#[stable(feature = "from_ref", since = "1.28.0")]
pub use raw::{from_mut, from_ref};
// This function is public only because there is no other way to unit test heapsort.
#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
pub use sort::heapsort;
#[stable(feature = "slice_get_slice", since = "1.28.0")]
pub use index::SliceIndex;
#[unstable(feature = "slice_range", issue = "76393")]
pub use index::range;
#[unstable(feature = "inherent_ascii_escape", issue = "77174")]
pub use ascii::EscapeAscii;
#[lang = "slice"]
#[cfg(not(test))]
impl<T> [T] {
/// Returns the number of elements in the slice.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert_eq!(a.len(), 3);
/// ```
#[lang = "slice_len_fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_len", since = "1.39.0")]
#[inline]
// SAFETY: const sound because we transmute out the length field as a usize (which it must be)
#[cfg_attr(bootstrap, rustc_allow_const_fn_unstable(const_fn_union))]
pub const fn len(&self) -> usize {
// FIXME: Replace with `crate::ptr::metadata(self)` when that is const-stable.
// As of this writing this causes a "Const-stable functions can only call other
// const-stable functions" error.
// SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T
// and PtrComponents<T> have the same memory layouts. Only std can make this
// guarantee.
unsafe { crate::ptr::PtrRepr { const_ptr: self }.components.metadata }
}
/// Returns `true` if the slice has a length of 0.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_is_empty", since = "1.39.0")]
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&10), v.first());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.first());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(first) = x.first_mut() {
/// *first = 5;
/// }
/// assert_eq!(x, &[5, 1, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first() {
/// assert_eq!(first, &0);
/// assert_eq!(elements, &[1, 2]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first_mut() {
/// *first = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[3, 4, 5]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last() {
/// assert_eq!(last, &2);
/// assert_eq!(elements, &[0, 1]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last_mut() {
/// *last = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[4, 5, 3]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&30), v.last());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.last());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a mutable pointer to the last item in the slice.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(last) = x.last_mut() {
/// *last = 10;
/// }
/// assert_eq!(x, &[0, 1, 10]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a reference to an element or subslice depending on the type of
/// index.
///
/// - If given a position, returns a reference to the element at that
/// position or `None` if out of bounds.
/// - If given a range, returns the subslice corresponding to that range,
/// or `None` if out of bounds.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&40), v.get(1));
/// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
/// assert_eq!(None, v.get(3));
/// assert_eq!(None, v.get(0..4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: SliceIndex<Self>,
{
index.get(self)
}
/// Returns a mutable reference to an element or subslice depending on the
/// type of index (see [`get`]) or `None` if the index is out of bounds.
///
/// [`get`]: slice::get
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(elem) = x.get_mut(1) {
/// *elem = 42;
/// }
/// assert_eq!(x, &[0, 42, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: SliceIndex<Self>,
{
index.get_mut(self)
}
/// Returns a reference to an element or subslice, without doing bounds
/// checking.
///
/// For a safe alternative see [`get`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get`]: slice::get
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
///
/// unsafe {
/// assert_eq!(x.get_unchecked(1), &2);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
/// Returns a mutable reference to an element or subslice, without doing
/// bounds checking.
///
/// For a safe alternative see [`get_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get_mut`]: slice::get_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
///
/// unsafe {
/// let elem = x.get_unchecked_mut(1);
/// *elem = 13;
/// }
/// assert_eq!(x, &[1, 13, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
/// Returns a raw pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// The caller must also ensure that the memory the pointer (non-transitively) points to
/// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
/// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let x_ptr = x.as_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
/// }
/// }
/// ```
///
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
#[inline]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
/// Returns an unsafe mutable pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// let x_ptr = x.as_mut_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// *x_ptr.add(i) += 2;
/// }
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
/// Returns the two raw pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_ptr`] for warnings on using these pointers. The end pointer
/// requires extra caution, as it does not point to a valid element in the
/// slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// It can also be useful to check if a pointer to an element refers to an
/// element of this slice:
///
/// ```
/// let a = [1, 2, 3];
/// let x = &a[1] as *const _;
/// let y = &5 as *const _;
///
/// assert!(a.as_ptr_range().contains(&x));
/// assert!(!a.as_ptr_range().contains(&y));
/// ```
///
/// [`as_ptr`]: slice::as_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_ptr_range(&self) -> Range<*const T> {
let start = self.as_ptr();
// SAFETY: The `add` here is safe, because:
//
// - Both pointers are part of the same object, as pointing directly
// past the object also counts.
//
// - The size of the slice is never larger than isize::MAX bytes, as
// noted here:
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
// - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
// - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
// (This doesn't seem normative yet, but the very same assumption is
// made in many places, including the Index implementation of slices.)
//
// - There is no wrapping around involved, as slices do not wrap past
// the end of the address space.
//
// See the documentation of pointer::add.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Returns the two unsafe mutable pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_mut_ptr`] for warnings on using these pointers. The end
/// pointer requires extra caution, as it does not point to a valid element
/// in the slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
let start = self.as_mut_ptr();
// SAFETY: See as_ptr_range() above for why `add` here is safe.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Swaps two elements in the slice.
///
/// # Arguments
///
/// * a - The index of the first element
/// * b - The index of the second element
///
/// # Panics
///
/// Panics if `a` or `b` are out of bounds.
///
/// # Examples
///
/// ```
/// let mut v = ["a", "b", "c", "d"];
/// v.swap(1, 3);
/// assert!(v == ["a", "d", "c", "b"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn swap(&mut self, a: usize, b: usize) {
// Can't take two mutable loans from one vector, so instead use raw pointers.
let pa = ptr::addr_of_mut!(self[a]);
let pb = ptr::addr_of_mut!(self[b]);
// SAFETY: `pa` and `pb` have been created from safe mutable references and refer
// to elements in the slice and therefore are guaranteed to be valid and aligned.
// Note that accessing the elements behind `a` and `b` is checked and will
// panic when out of bounds.
unsafe {
ptr::swap(pa, pb);
}
}
/// Reverses the order of elements in the slice, in place.
///
/// # Examples
///
/// ```
/// let mut v = [1, 2, 3];
/// v.reverse();
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn reverse(&mut self) {
let mut i: usize = 0;
let ln = self.len();
// For very small types, all the individual reads in the normal
// path perform poorly. We can do better, given efficient unaligned
// load/store, by loading a larger chunk and reversing a register.
// Ideally LLVM would do this for us, as it knows better than we do
// whether unaligned reads are efficient (since that changes between
// different ARM versions, for example) and what the best chunk size
// would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
// the loop, so we need to do this ourselves. (Hypothesis: reverse
// is troublesome because the sides can be aligned differently --
// will be, when the length is odd -- so there's no way of emitting
// pre- and postludes to use fully-aligned SIMD in the middle.)
let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
if fast_unaligned && mem::size_of::<T>() == 1 {
// Use the llvm.bswap intrinsic to reverse u8s in a usize
let chunk = mem::size_of::<usize>();
while i + chunk - 1 < ln / 2 {
// SAFETY: There are several things to check here:
//
// - Note that `chunk` is either 4 or 8 due to the cfg check
// above. So `chunk - 1` is positive.
// - Indexing with index `i` is fine as the loop check guarantees
// `i + chunk - 1 < ln / 2`
// <=> `i < ln / 2 - (chunk - 1) < ln / 2 < ln`.
// - Indexing with index `ln - i - chunk = ln - (i + chunk)` is fine:
// - `i + chunk > 0` is trivially true.
// - The loop check guarantees:
// `i + chunk - 1 < ln / 2`
// <=> `i + chunk ≤ ln / 2 ≤ ln`, thus subtraction does not underflow.
// - The `read_unaligned` and `write_unaligned` calls are fine:
// - `pa` points to index `i` where `i < ln / 2 - (chunk - 1)`
// (see above) and `pb` points to index `ln - i - chunk`, so
// both are at least `chunk`
// many bytes away from the end of `self`.
// - Any initialized memory is valid `usize`.
unsafe {
let ptr = self.as_mut_ptr();
let pa = ptr.add(i);
let pb = ptr.add(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut usize);
let vb = ptr::read_unaligned(pb as *mut usize);
ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
}
i += chunk;
}
}
if fast_unaligned && mem::size_of::<T>() == 2 {
// Use rotate-by-16 to reverse u16s in a u32
let chunk = mem::size_of::<u32>() / 2;
while i + chunk - 1 < ln / 2 {
// SAFETY: An unaligned u32 can be read from `i` if `i + 1 < ln`
// (and obviously `i < ln`), because each element is 2 bytes and
// we're reading 4.
//
// `i + chunk - 1 < ln / 2` # while condition
// `i + 2 - 1 < ln / 2`
// `i + 1 < ln / 2`
//
// Since it's less than the length divided by 2, then it must be
// in bounds.
//
// This also means that the condition `0 < i + chunk <= ln` is
// always respected, ensuring the `pb` pointer can be used
// safely.
unsafe {
let ptr = self.as_mut_ptr();
let pa = ptr.add(i);
let pb = ptr.add(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut u32);
let vb = ptr::read_unaligned(pb as *mut u32);
ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
}
i += chunk;
}
}
while i < ln / 2 {
// SAFETY: `i` is inferior to half the length of the slice so
// accessing `i` and `ln - i - 1` is safe (`i` starts at 0 and
// will not go further than `ln / 2 - 1`).
// The resulting pointers `pa` and `pb` are therefore valid and
// aligned, and can be read from and written to.
unsafe {
// Unsafe swap to avoid the bounds check in safe swap.
let ptr = self.as_mut_ptr();
let pa = ptr.add(i);
let pb = ptr.add(ln - i - 1);
ptr::swap(pa, pb);
}
i += 1;
}
}
/// Returns an iterator over the slice.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let mut iterator = x.iter();
///
/// assert_eq!(iterator.next(), Some(&1));
/// assert_eq!(iterator.next(), Some(&2));
/// assert_eq!(iterator.next(), Some(&4));
/// assert_eq!(iterator.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter(&self) -> Iter<'_, T> {
Iter::new(self)
}
/// Returns an iterator that allows modifying each value.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// for elem in x.iter_mut() {
/// *elem += 2;
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut::new(self)
}
/// Returns an iterator over all contiguous windows of length
/// `size`. The windows overlap. If the slice is shorter than
/// `size`, the iterator returns no values.
///
/// # Panics
///
/// Panics if `size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['r', 'u', 's', 't'];
/// let mut iter = slice.windows(2);
/// assert_eq!(iter.next().unwrap(), &['r', 'u']);
/// assert_eq!(iter.next().unwrap(), &['u', 's']);
/// assert_eq!(iter.next().unwrap(), &['s', 't']);
/// assert!(iter.next().is_none());
/// ```
///
/// If the slice is shorter than `size`:
///
/// ```
/// let slice = ['f', 'o', 'o'];
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<'_, T> {
let size = NonZeroUsize::new(size).expect("size is zero");
Windows::new(self, size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert_eq!(iter.next().unwrap(), &['m']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`chunks_exact`]: slice::chunks_exact
/// [`rchunks`]: slice::rchunks
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
assert_ne!(chunk_size, 0);
Chunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at
/// the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 3]);
/// ```
///
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
/// [`rchunks_mut`]: slice::rchunks_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks`]: slice::chunks
/// [`rchunks_exact`]: slice::rchunks_exact
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of
/// the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_mut`]: slice::chunks_mut
/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExactMut::new(self, chunk_size)
}
/// Splits the slice into a slice of `N`-element arrays,
/// assuming that there's no remainder.
///
/// # Safety
///
/// This may only be called when
/// - The slice splits exactly into `N`-element chunks (aka `self.len() % N == 0`).
/// - `N != 0`.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice: &[char] = &['l', 'o', 'r', 'e', 'm', '!'];
/// let chunks: &[[char; 1]] =
/// // SAFETY: 1-element chunks never have remainder
/// unsafe { slice.as_chunks_unchecked() };
/// assert_eq!(chunks, &[['l'], ['o'], ['r'], ['e'], ['m'], ['!']]);
/// let chunks: &[[char; 3]] =
/// // SAFETY: The slice length (6) is a multiple of 3
/// unsafe { slice.as_chunks_unchecked() };
/// assert_eq!(chunks, &[['l', 'o', 'r'], ['e', 'm', '!']]);
///
/// // These would be unsound:
/// // let chunks: &[[_; 5]] = slice.as_chunks_unchecked() // The slice length is not a multiple of 5
/// // let chunks: &[[_; 0]] = slice.as_chunks_unchecked() // Zero-length chunks are never allowed
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
debug_assert_ne!(N, 0);
debug_assert_eq!(self.len() % N, 0);
let new_len =
// SAFETY: Our precondition is exactly what's needed to call this
unsafe { crate::intrinsics::exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts(self.as_ptr().cast(), new_len) }
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the beginning of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let (chunks, remainder) = slice.as_chunks();
/// assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
/// assert_eq!(remainder, &['m']);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
(array_slice, remainder)
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the end of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let (remainder, chunks) = slice.as_rchunks();
/// assert_eq!(remainder, &['l']);
/// assert_eq!(chunks, &[['o', 'r'], ['e', 'm']]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
(remainder, array_slice)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are array references and do not overlap. If `N` does not divide the
/// length of the slice, then the last up to `N-1` elements will be omitted and can be
/// retrieved from the `remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.array_chunks();
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks_exact`]: slice::chunks_exact
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
assert_ne!(N, 0);
ArrayChunks::new(self)
}
/// Splits the slice into a slice of `N`-element arrays,
/// assuming that there's no remainder.
///
/// # Safety
///
/// This may only be called when
/// - The slice splits exactly into `N`-element chunks (aka `self.len() % N == 0`).
/// - `N != 0`.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice: &mut [char] = &mut ['l', 'o', 'r', 'e', 'm', '!'];
/// let chunks: &mut [[char; 1]] =
/// // SAFETY: 1-element chunks never have remainder
/// unsafe { slice.as_chunks_unchecked_mut() };
/// chunks[0] = ['L'];
/// assert_eq!(chunks, &[['L'], ['o'], ['r'], ['e'], ['m'], ['!']]);
/// let chunks: &mut [[char; 3]] =
/// // SAFETY: The slice length (6) is a multiple of 3
/// unsafe { slice.as_chunks_unchecked_mut() };
/// chunks[1] = ['a', 'x', '?'];
/// assert_eq!(slice, &['L', 'o', 'r', 'a', 'x', '?']);
///
/// // These would be unsound:
/// // let chunks: &[[_; 5]] = slice.as_chunks_unchecked_mut() // The slice length is not a multiple of 5
/// // let chunks: &[[_; 0]] = slice.as_chunks_unchecked_mut() // Zero-length chunks are never allowed
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub unsafe fn as_chunks_unchecked_mut<const N: usize>(&mut self) -> &mut [[T; N]] {
debug_assert_ne!(N, 0);
debug_assert_eq!(self.len() % N, 0);
let new_len =
// SAFETY: Our precondition is exactly what's needed to call this
unsafe { crate::intrinsics::exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts_mut(self.as_mut_ptr().cast(), new_len) }
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the beginning of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// let (chunks, remainder) = v.as_chunks_mut();
/// remainder[0] = 9;
/// for chunk in chunks {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 9]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at_mut(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked_mut() };
(array_slice, remainder)
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the end of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// let (remainder, chunks) = v.as_rchunks_mut();
/// remainder[0] = 9;
/// for chunk in chunks {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[9, 1, 1, 2, 2]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at_mut(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked_mut() };
(remainder, array_slice)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable array references and do not overlap. If `N` does not divide
/// the length of the slice, then the last up to `N-1` elements will be omitted and
/// can be retrieved from the `into_remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact_mut`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.array_chunks_mut() {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
assert_ne!(N, 0);
ArrayChunksMut::new(self)
}
/// Returns an iterator over overlapping windows of `N` elements of a slice,
/// starting at the beginning of the slice.
///
/// This is the const generic equivalent of [`windows`].
///
/// If `N` is greater than the size of the slice, it will return no windows.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_windows)]
/// let slice = [0, 1, 2, 3];
/// let mut iter = slice.array_windows();
/// assert_eq!(iter.next().unwrap(), &[0, 1]);
/// assert_eq!(iter.next().unwrap(), &[1, 2]);
/// assert_eq!(iter.next().unwrap(), &[2, 3]);
/// assert!(iter.next().is_none());
/// ```
///
/// [`windows`]: slice::windows
#[unstable(feature = "array_windows", issue = "75027")]
#[inline]
pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> {
assert_ne!(N, 0);
ArrayWindows::new(self)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert_eq!(iter.next().unwrap(), &['l']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`rchunks_exact`]: slice::rchunks_exact
/// [`chunks`]: slice::chunks
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
assert!(chunk_size != 0);
RChunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the
/// beginning of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[3, 2, 2, 1, 1]);
/// ```
///
/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
/// [`chunks_mut`]: slice::chunks_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
assert!(chunk_size != 0);
RChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// end of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['l']);
/// ```
///
/// [`chunks`]: slice::chunks
/// [`rchunks`]: slice::rchunks
/// [`chunks_exact`]: slice::chunks_exact
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
assert!(chunk_size != 0);
RChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[0, 2, 2, 1, 1]);
/// ```
///
/// [`chunks_mut`]: slice::chunks_mut
/// [`rchunks_mut`]: slice::rchunks_mut
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
assert!(chunk_size != 0);
RChunksExactMut::new(self, chunk_size)
}
/// Returns an iterator over the slice producing non-overlapping runs
/// of elements using the predicate to separate them.
///
/// The predicate is called on two elements following themselves,
/// it means the predicate is called on `slice[0]` and `slice[1]`
/// then on `slice[1]` and `slice[2]` and so on.
///
/// # Examples
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &[1, 1, 1, 3, 3, 2, 2, 2];
///
/// let mut iter = slice.group_by(|a, b| a == b);
///
/// assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
/// assert_eq!(iter.next(), Some(&[3, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
/// assert_eq!(iter.next(), None);
/// ```
///
/// This method can be used to extract the sorted subslices:
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &[1, 1, 2, 3, 2, 3, 2, 3, 4];
///
/// let mut iter = slice.group_by(|a, b| a <= b);
///
/// assert_eq!(iter.next(), Some(&[1, 1, 2, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 3, 4][..]));
/// assert_eq!(iter.next(), None);
/// ```
#[unstable(feature = "slice_group_by", issue = "80552")]
#[inline]
pub fn group_by<F>(&self, pred: F) -> GroupBy<'_, T, F>
where
F: FnMut(&T, &T) -> bool,
{
GroupBy::new(self, pred)
}
/// Returns an iterator over the slice producing non-overlapping mutable
/// runs of elements using the predicate to separate them.
///
/// The predicate is called on two elements following themselves,
/// it means the predicate is called on `slice[0]` and `slice[1]`
/// then on `slice[1]` and `slice[2]` and so on.
///
/// # Examples
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &mut [1, 1, 1, 3, 3, 2, 2, 2];
///
/// let mut iter = slice.group_by_mut(|a, b| a == b);
///
/// assert_eq!(iter.next(), Some(&mut [1, 1, 1][..]));
/// assert_eq!(iter.next(), Some(&mut [3, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 2, 2][..]));
/// assert_eq!(iter.next(), None);
/// ```
///
/// This method can be used to extract the sorted subslices:
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &mut [1, 1, 2, 3, 2, 3, 2, 3, 4];
///
/// let mut iter = slice.group_by_mut(|a, b| a <= b);
///
/// assert_eq!(iter.next(), Some(&mut [1, 1, 2, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 3, 4][..]));
/// assert_eq!(iter.next(), None);
/// ```
#[unstable(feature = "slice_group_by", issue = "80552")]
#[inline]
pub fn group_by_mut<F>(&mut self, pred: F) -> GroupByMut<'_, T, F>
where
F: FnMut(&T, &T) -> bool,
{
GroupByMut::new(self, pred)
}
/// Divides one slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 4, 5, 6];
///
/// {
/// let (left, right) = v.split_at(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_unchecked(mid) }
}
/// Divides one mutable slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let mut v = [1, 0, 3, 0, 5, 6];
/// let (left, right) = v.split_at_mut(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_mut_unchecked(mid) }
}
/// Divides one slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at`]: slice::split_at
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```compile_fail
/// #![feature(slice_split_at_unchecked)]
///
/// let v = [1, 2, 3, 4, 5, 6];
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at_mut`]: slice::split_at_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```compile_fail
/// #![feature(slice_split_at_unchecked)]
///
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// unsafe {
/// let (left, right) = v.split_at_mut_unchecked(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
// SAFETY: Caller has to check that `0 <= mid <= self.len()`.
//
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
unsafe { (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid)) }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the first element is matched, an empty slice will be the first item
/// returned by the iterator. Similarly, if the last element in the slice
/// is matched, an empty slice will be the last item returned by the
/// iterator:
///
/// ```
/// let slice = [10, 40, 33];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert!(iter.next().is_none());
/// ```
///
/// If two matched elements are directly adjacent, an empty slice will be
/// present between them:
///
/// ```
/// let slice = [10, 6, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
where
F: FnMut(&T) -> bool,
{
Split::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_mut(|num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is contained in the end of the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the last element of the slice is matched,
/// that element will be considered the terminator of the preceding slice.
/// That slice will be the last item returned by the iterator.
///
/// ```
/// let slice = [3, 10, 40, 33];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[3]);
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "split_inclusive", since = "1.51.0")]
#[inline]
pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusive::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is contained in the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
/// let terminator_idx = group.len()-1;
/// group[terminator_idx] = 1;
/// }
/// assert_eq!(v, [10, 40, 1, 20, 1, 1]);
/// ```
#[stable(feature = "split_inclusive", since = "1.51.0")]
#[inline]
pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusiveMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, starting at the end of the slice and working backwards.
/// The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [11, 22, 33, 0, 44, 55];
/// let mut iter = slice.rsplit(|num| *num == 0);
///
/// assert_eq!(iter.next().unwrap(), &[44, 55]);
/// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
/// assert_eq!(iter.next(), None);
/// ```
///
/// As with `split()`, if the first or last element is matched, an empty
/// slice will be the first (or last) item returned by the iterator.
///
/// ```
/// let v = &[0, 1, 1, 2, 3, 5, 8];
/// let mut it = v.rsplit(|n| *n % 2 == 0);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next().unwrap(), &[3, 5]);
/// assert_eq!(it.next().unwrap(), &[1, 1]);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next(), None);
/// ```
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplit::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`, starting at the end of the slice and working
/// backwards. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [100, 400, 300, 200, 600, 500];
///
/// let mut count = 0;
/// for group in v.rsplit_mut(|num| *num % 3 == 0) {
/// count += 1;
/// group[0] = count;
/// }
/// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
/// ```
///
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once by numbers divisible by 3 (i.e., `[10, 40]`,
/// `[20, 60, 50]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitN::new(self.split(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitNMut::new(self.split_mut(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once, starting from the end, by numbers divisible
/// by 3 (i.e., `[50]`, `[10, 40, 30, 20]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.rsplitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitN::new(self.rsplit(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut s = [10, 40, 30, 20, 60, 50];
///
/// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitNMut::new(self.rsplit_mut(pred), n)
}
/// Returns `true` if the slice contains an element with the given value.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.contains(&30));
/// assert!(!v.contains(&50));
/// ```
///
/// If you do not have a `&T`, but some other value that you can compare
/// with one (for example, `String` implements `PartialEq<str>`), you can
/// use `iter().any`:
///
/// ```
/// let v = [String::from("hello"), String::from("world")]; // slice of `String`
/// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
/// assert!(!v.iter().any(|e| e == "hi"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn contains(&self, x: &T) -> bool
where
T: PartialEq,
{
cmp::SliceContains::slice_contains(x, self)
}
/// Returns `true` if `needle` is a prefix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.starts_with(&[10]));
/// assert!(v.starts_with(&[10, 40]));
/// assert!(!v.starts_with(&[50]));
/// assert!(!v.starts_with(&[10, 50]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.starts_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.starts_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
/// Returns `true` if `needle` is a suffix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.ends_with(&[30]));
/// assert!(v.ends_with(&[40, 30]));
/// assert!(!v.ends_with(&[50]));
/// assert!(!v.ends_with(&[50, 30]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.ends_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.ends_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let (m, n) = (self.len(), needle.len());
m >= n && needle == &self[m - n..]
}
/// Returns a subslice with the prefix removed.
///
/// If the slice starts with `prefix`, returns the subslice after the prefix, wrapped in `Some`.
/// If `prefix` is empty, simply returns the original slice.
///
/// If the slice does not start with `prefix`, returns `None`.
///
/// # Examples
///
/// ```
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
/// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
/// assert_eq!(v.strip_prefix(&[50]), None);
/// assert_eq!(v.strip_prefix(&[10, 50]), None);
///
/// let prefix : &str = "he";
/// assert_eq!(b"hello".strip_prefix(prefix.as_bytes()),
/// Some(b"llo".as_ref()));
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[stable(feature = "slice_strip", since = "1.51.0")]
pub fn strip_prefix<P: SlicePattern<Item = T> + ?Sized>(&self, prefix: &P) -> Option<&[T]>
where
T: PartialEq,
{
// This function will need rewriting if and when SlicePattern becomes more sophisticated.
let prefix = prefix.as_slice();
let n = prefix.len();
if n <= self.len() {
let (head, tail) = self.split_at(n);
if head == prefix {
return Some(tail);
}
}
None
}
/// Returns a subslice with the suffix removed.
///
/// If the slice ends with `suffix`, returns the subslice before the suffix, wrapped in `Some`.
/// If `suffix` is empty, simply returns the original slice.
///
/// If the slice does not end with `suffix`, returns `None`.
///
/// # Examples
///
/// ```
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
/// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
/// assert_eq!(v.strip_suffix(&[50]), None);
/// assert_eq!(v.strip_suffix(&[50, 30]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[stable(feature = "slice_strip", since = "1.51.0")]
pub fn strip_suffix<P: SlicePattern<Item = T> + ?Sized>(&self, suffix: &P) -> Option<&[T]>
where
T: PartialEq,
{
// This function will need rewriting if and when SlicePattern becomes more sophisticated.
let suffix = suffix.as_slice();
let (len, n) = (self.len(), suffix.len());
if n <= len {
let (head, tail) = self.split_at(len - n);
if tail == suffix {
return Some(head);
}
}
None
}
/// Binary searches this sorted slice for a given element.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
///
/// [`binary_search_by`]: slice::binary_search_by
/// [`binary_search_by_key`]: slice::binary_search_by_key
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// assert_eq!(s.binary_search(&13), Ok(9));
/// assert_eq!(s.binary_search(&4), Err(7));
/// assert_eq!(s.binary_search(&100), Err(13));
/// let r = s.binary_search(&1);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
///
/// If you want to insert an item to a sorted vector, while maintaining
/// sort order:
///
/// ```
/// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
/// let num = 42;
/// let idx = s.binary_search(&num).unwrap_or_else(|x| x);
/// s.insert(idx, num);
/// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
where
T: Ord,
{
self.binary_search_by(|p| p.cmp(x))
}
/// Binary searches this sorted slice with a comparator function.
///
/// The comparator function should implement an order consistent
/// with the sort order of the underlying slice, returning an
/// order code that indicates whether its argument is `Less`,
/// `Equal` or `Greater` the desired target.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
///
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by_key`]: slice::binary_search_by_key
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// let seek = 13;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
/// let seek = 4;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
/// let seek = 100;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
/// let seek = 1;
/// let r = s.binary_search_by(|probe| probe.cmp(&seek));
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> Ordering,
{
let mut size = self.len();
let mut left = 0;
let mut right = size;
while left < right {
let mid = left + size / 2;
// SAFETY: the call is made safe by the following invariants:
// - `mid >= 0`
// - `mid < size`: `mid` is limited by `[left; right)` bound.
let cmp = f(unsafe { self.get_unchecked(mid) });
// The reason why we use if/else control flow rather than match
// is because match reorders comparison operations, which is perf sensitive.
// This is x86 asm for u8: https://rust.godbolt.org/z/8Y8Pra.
if cmp == Less {
left = mid + 1;
} else if cmp == Greater {
right = mid;
} else {
// SAFETY: same as the `get_unchecked` above
unsafe { crate::intrinsics::assume(mid < self.len()) };
return Ok(mid);
}
size = right - left;
}
Err(left)
}
/// Binary searches this sorted slice with a key extraction function.
///
/// Assumes that the slice is sorted by the key, for instance with
/// [`sort_by_key`] using the same key extraction function.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
///
/// [`sort_by_key`]: slice::sort_by_key
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by`]: slice::binary_search_by
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements in a slice of pairs sorted by
/// their second elements. The first is found, with a uniquely
/// determined position; the second and third are not found; the
/// fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
/// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
/// (1, 21), (2, 34), (4, 55)];
///
/// assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b), Ok(9));
/// assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b), Err(7));
/// assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13));
/// let r = s.binary_search_by_key(&1, |&(a, b)| b);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
// Lint rustdoc::broken_intra_doc_links is allowed as `slice::sort_by_key` is
// in crate `alloc`, and as such doesn't exists yet when building `core`.
// links to downstream crate: #74481. Since primitives are only documented in
// libstd (#73423), this never leads to broken links in practice.
#[allow(rustdoc::broken_intra_doc_links)]
#[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
#[inline]
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> B,
B: Ord,
{
self.binary_search_by(|k| f(k).cmp(b))
}
/// Sorts the slice, but might not preserve the order of equal elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort_unstable();
/// assert!(v == [-5, -3, 1, 2, 4]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable(&mut self)
where
T: Ord,
{
sort::quicksort(self, |a, b| a.lt(b));
}
/// Sorts the slice with a comparator function, but might not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
/// total order if it is (for all `a`, `b` and `c`):
///
/// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
/// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
///
/// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
/// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
///
/// ```
/// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
/// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
/// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
/// ```
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_unstable_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
/// v.sort_unstable_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by<F>(&mut self, mut compare: F)
where
F: FnMut(&T, &T) -> Ordering,
{
sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less);
}
/// Sorts the slice with a key extraction function, but might not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
/// *O*(*m*).
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
/// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
/// cases where the key function is expensive.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_unstable_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
where
F: FnMut(&T) -> K,
K: Ord,
{
sort::quicksort(self, |a, b| f(a).lt(&f(b)));
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use the select_nth_unstable() instead")]
#[inline]
pub fn partition_at_index(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
self.select_nth_unstable(index)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use select_nth_unstable_by() instead")]
#[inline]
pub fn partition_at_index_by<F>(
&mut self,
index: usize,
compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
self.select_nth_unstable_by(index, compare)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use the select_nth_unstable_by_key() instead")]
#[inline]
pub fn partition_at_index_by_key<K, F>(
&mut self,
index: usize,
f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
self.select_nth_unstable_by_key(index, f)
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index`. Additionally, this reordering is
/// unstable (i.e. any number of equal elements may end up at position `index`), in-place
/// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
/// element" in other libraries. It returns a triplet of the following values: all elements less
/// than the one at the given index, the value at the given index, and all elements greater than
/// the one at the given index.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median
/// v.select_nth_unstable(2);
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [-3, -5, 1, 2, 4] ||
/// v == [-5, -3, 1, 2, 4] ||
/// v == [-3, -5, 1, 4, 2] ||
/// v == [-5, -3, 1, 4, 2]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
let mut f = |a: &T, b: &T| a.lt(b);
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the comparator function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index,
/// and all elements greater than the one at the given index, using the provided comparator
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median as if the slice were sorted in descending order.
/// v.select_nth_unstable_by(2, |a, b| b.cmp(a));
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [2, 4, 1, -5, -3] ||
/// v == [2, 4, 1, -3, -5] ||
/// v == [4, 2, 1, -5, -3] ||
/// v == [4, 2, 1, -3, -5]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable_by<F>(
&mut self,
index: usize,
mut compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
let mut f = |a: &T, b: &T| compare(a, b) == Less;
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the key extraction function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index, and
/// all elements greater than the one at the given index, using the provided key extraction
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Return the median as if the array were sorted according to absolute value.
/// v.select_nth_unstable_by_key(2, |a| a.abs());
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [1, 2, -3, 4, -5] ||
/// v == [1, 2, -3, -5, 4] ||
/// v == [2, 1, -3, 4, -5] ||
/// v == [2, 1, -3, -5, 4]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable_by_key<K, F>(
&mut self,
index: usize,
mut f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
let mut g = |a: &T, b: &T| f(a).lt(&f(b));
sort::partition_at_index(self, index, &mut g)
}
/// Moves all consecutive repeated elements to the end of the slice according to the
/// [`PartialEq`] trait implementation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
///
/// let (dedup, duplicates) = slice.partition_dedup();
///
/// assert_eq!(dedup, [1, 2, 3, 2, 1]);
/// assert_eq!(duplicates, [2, 3, 1]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
where
T: PartialEq,
{
self.partition_dedup_by(|a, b| a == b)
}
/// Moves all but the first of consecutive elements to the end of the slice satisfying
/// a given equality relation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// The `same_bucket` function is passed references to two elements from the slice and
/// must determine if the elements compare equal. The elements are passed in opposite order
/// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is moved
/// at the end of the slice.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
///
/// let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
///
/// assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
/// assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by<F>(&mut self, mut same_bucket: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T, &mut T) -> bool,
{
// Although we have a mutable reference to `self`, we cannot make
// *arbitrary* changes. The `same_bucket` calls could panic, so we
// must ensure that the slice is in a valid state at all times.
//
// The way that we handle this is by using swaps; we iterate
// over all the elements, swapping as we go so that at the end
// the elements we wish to keep are in the front, and those we
// wish to reject are at the back. We can then split the slice.
// This operation is still `O(n)`.
//
// Example: We start in this state, where `r` represents "next
// read" and `w` represents "next_write`.
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate, so
// we swap self[r] and self[w] (no effect as r==w) and then increment both
// r and w, leaving us with:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this value is a duplicate,
// so we increment `r` but leave everything else unchanged:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate,
// so swap self[r] and self[w] and advance r and w:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 1 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Not a duplicate, repeat:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 3 | 1 | 3 |
// +---+---+---+---+---+---+
// w
//
// Duplicate, advance r. End of slice. Split at w.
let len = self.len();
if len <= 1 {
return (self, &mut []);
}
let ptr = self.as_mut_ptr();
let mut next_read: usize = 1;
let mut next_write: usize = 1;
// SAFETY: the `while` condition guarantees `next_read` and `next_write`
// are less than `len`, thus are inside `self`. `prev_ptr_write` points to
// one element before `ptr_write`, but `next_write` starts at 1, so
// `prev_ptr_write` is never less than 0 and is inside the slice.
// This fulfils the requirements for dereferencing `ptr_read`, `prev_ptr_write`
// and `ptr_write`, and for using `ptr.add(next_read)`, `ptr.add(next_write - 1)`
// and `prev_ptr_write.offset(1)`.
//
// `next_write` is also incremented at most once per loop at most meaning
// no element is skipped when it may need to be swapped.
//
// `ptr_read` and `prev_ptr_write` never point to the same element. This
// is required for `&mut *ptr_read`, `&mut *prev_ptr_write` to be safe.
// The explanation is simply that `next_read >= next_write` is always true,
// thus `next_read > next_write - 1` is too.
unsafe {
// Avoid bounds checks by using raw pointers.
while next_read < len {
let ptr_read = ptr.add(next_read);
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
let ptr_write = prev_ptr_write.offset(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
}
next_read += 1;
}
}
self.split_at_mut(next_write)
}
/// Moves all but the first of consecutive elements to the end of the slice that resolve
/// to the same key.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
///
/// let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
///
/// assert_eq!(dedup, [10, 20, 30, 20, 11]);
/// assert_eq!(duplicates, [21, 30, 13]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by_key<K, F>(&mut self, mut key: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T) -> K,
K: PartialEq,
{
self.partition_dedup_by(|a, b| key(a) == key(b))
}
/// Rotates the slice in-place such that the first `mid` elements of the
/// slice move to the end while the last `self.len() - mid` elements move to
/// the front. After calling `rotate_left`, the element previously at index
/// `mid` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `mid` is greater than the length of the
/// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_left(2);
/// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
/// ```
///
/// Rotating a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_left(1);
/// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Rotates the slice in-place such that the first `self.len() - k`
/// elements of the slice move to the end while the last `k` elements move
/// to the front. After calling `rotate_right`, the element previously at
/// index `self.len() - k` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `k` is greater than the length of the
/// slice. Note that `k == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_right(2);
/// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
/// ```
///
/// Rotate a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_right(1);
/// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Fills `self` with elements by cloning `value`.
///
/// # Examples
///
/// ```
/// let mut buf = vec![0; 10];
/// buf.fill(1);
/// assert_eq!(buf, vec![1; 10]);
/// ```
#[doc(alias = "memset")]
#[stable(feature = "slice_fill", since = "1.50.0")]
pub fn fill(&mut self, value: T)
where
T: Clone,
{
specialize::SpecFill::spec_fill(self, value);
}
/// Fills `self` with elements returned by calling a closure repeatedly.
///
/// This method uses a closure to create new values. If you'd rather
/// [`Clone`] a given value, use [`fill`]. If you want to use the [`Default`]
/// trait to generate values, you can pass [`Default::default`] as the
/// argument.
///
/// [`fill`]: slice::fill
///
/// # Examples
///
/// ```
/// let mut buf = vec![1; 10];
/// buf.fill_with(Default::default);
/// assert_eq!(buf, vec![0; 10]);
/// ```
#[doc(alias = "memset")]
#[stable(feature = "slice_fill_with", since = "1.51.0")]
pub fn fill_with<F>(&mut self, mut f: F)
where
F: FnMut() -> T,
{
for el in self {
*el = f();
}
}
/// Copies the elements from `src` into `self`.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` implements `Copy`, it can be more performant to use
/// [`copy_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Cloning two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.clone_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `clone_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.clone_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`copy_from_slice`]: slice::copy_from_slice
/// [`split_at_mut`]: slice::split_at_mut
#[stable(feature = "clone_from_slice", since = "1.7.0")]
pub fn clone_from_slice(&mut self, src: &[T])
where
T: Clone,
{
self.spec_clone_from(src);
}
/// Copies all elements from `src` into `self`, using a memcpy.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` does not implement `Copy`, use [`clone_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Copying two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.copy_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `copy_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.copy_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`clone_from_slice`]: slice::clone_from_slice
/// [`split_at_mut`]: slice::split_at_mut
#[doc(alias = "memcpy")]
#[stable(feature = "copy_from_slice", since = "1.9.0")]
pub fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
// The panic code path was put into a cold function to not bloat the
// call site.
#[inline(never)]
#[cold]
#[track_caller]
fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
panic!(
"source slice length ({}) does not match destination slice length ({})",
src_len, dst_len,
);
}
if self.len() != src.len() {
len_mismatch_fail(self.len(), src.len());
}
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
}
}
/// Copies elements from one part of the slice to another part of itself,
/// using a memmove.
///
/// `src` is the range within `self` to copy from. `dest` is the starting
/// index of the range within `self` to copy to, which will have the same
/// length as `src`. The two ranges may overlap. The ends of the two ranges
/// must be less than or equal to `self.len()`.
///
/// # Panics
///
/// This function will panic if either range exceeds the end of the slice,
/// or if the end of `src` is before the start.
///
/// # Examples
///
/// Copying four bytes within a slice:
///
/// ```
/// let mut bytes = *b"Hello, World!";
///
/// bytes.copy_within(1..5, 8);
///
/// assert_eq!(&bytes, b"Hello, Wello!");
/// ```
#[stable(feature = "copy_within", since = "1.37.0")]
#[track_caller]
pub fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dest: usize)
where
T: Copy,
{
let Range { start: src_start, end: src_end } = slice::range(src, ..self.len());
let count = src_end - src_start;
assert!(dest <= self.len() - count, "dest is out of bounds");
// SAFETY: the conditions for `ptr::copy` have all been checked above,
// as have those for `ptr::add`.
unsafe {
// Derive both `src_ptr` and `dest_ptr` from the same loan
let ptr = self.as_mut_ptr();
let src_ptr = ptr.add(src_start);
let dest_ptr = ptr.add(dest);
ptr::copy(src_ptr, dest_ptr, count);
}
}
/// Swaps all elements in `self` with those in `other`.
///
/// The length of `other` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Example
///
/// Swapping two elements across slices:
///
/// ```
/// let mut slice1 = [0, 0];
/// let mut slice2 = [1, 2, 3, 4];
///
/// slice1.swap_with_slice(&mut slice2[2..]);
///
/// assert_eq!(slice1, [3, 4]);
/// assert_eq!(slice2, [1, 2, 0, 0]);
/// ```
///
/// Rust enforces that there can only be one mutable reference to a
/// particular piece of data in a particular scope. Because of this,
/// attempting to use `swap_with_slice` on a single slice will result in
/// a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
/// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// mutable sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.swap_with_slice(&mut right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 1, 2]);
/// ```
///
/// [`split_at_mut`]: slice::split_at_mut
#[stable(feature = "swap_with_slice", since = "1.27.0")]
pub fn swap_with_slice(&mut self, other: &mut [T]) {
assert!(self.len() == other.len(), "destination and source slices have different lengths");
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::swap_nonoverlapping(self.as_mut_ptr(), other.as_mut_ptr(), self.len());
}
}
/// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
#[inline]
fn gcd(a: usize, b: usize) -> usize {
use crate::intrinsics;
// iterative stein’s algorithm
// We should still make this `const fn` (and revert to recursive algorithm if we do)
// because relying on llvm to consteval all this is… well, it makes me uncomfortable.
// SAFETY: `a` and `b` are checked to be non-zero values.
let (ctz_a, mut ctz_b) = unsafe {
if a == 0 {
return b;
}
if b == 0 {
return a;
}
(intrinsics::cttz_nonzero(a), intrinsics::cttz_nonzero(b))
};
let k = ctz_a.min(ctz_b);
let mut a = a >> ctz_a;
let mut b = b;
loop {
// remove all factors of 2 from b
b >>= ctz_b;
if a > b {
mem::swap(&mut a, &mut b);
}
b = b - a;
// SAFETY: `b` is checked to be non-zero.
unsafe {
if b == 0 {
break;
}
ctz_b = intrinsics::cttz_nonzero(b);
}
}
a << k
}
let gcd: usize = gcd(mem::size_of::<T>(), mem::size_of::<U>());
let ts: usize = mem::size_of::<U>() / gcd;
let us: usize = mem::size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
(us_len, ts_len)
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: See the `align_to_mut` method for the detailed safety comment.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
(
left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
)
}
}
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: Here we are ensuring we will use aligned pointers for U for the
// rest of the method. This is done by passing a pointer to &[T] with an
// alignment targeted for U.
// `crate::ptr::align_offset` is called with a correctly aligned and
// valid pointer `ptr` (it comes from a reference to `self`) and with
// a size that is a power of two (since it comes from the alignement for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
(
left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
)
}
}
}
/// Checks if the elements of this slice are sorted.
///
/// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
/// slice yields exactly zero or one element, `true` is returned.
///
/// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
/// implies that this function returns `false` if any two consecutive items are not
/// comparable.
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
/// let empty: [i32; 0] = [];
///
/// assert!([1, 2, 2, 9].is_sorted());
/// assert!(![1, 3, 2, 4].is_sorted());
/// assert!([0].is_sorted());
/// assert!(empty.is_sorted());
/// assert!(![0.0, 1.0, f32::NAN].is_sorted());
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted(&self) -> bool
where
T: PartialOrd,
{
self.is_sorted_by(|a, b| a.partial_cmp(b))
}
/// Checks if the elements of this slice are sorted using the given comparator function.
///
/// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
/// function to determine the ordering of two elements. Apart from that, it's equivalent to
/// [`is_sorted`]; see its documentation for more information.
///
/// [`is_sorted`]: slice::is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
where
F: FnMut(&T, &T) -> Option<Ordering>,
{
self.iter().is_sorted_by(|a, b| compare(*a, *b))
}
/// Checks if the elements of this slice are sorted using the given key extraction function.
///
/// Instead of comparing the slice's elements directly, this function compares the keys of the
/// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
/// documentation for more information.
///
/// [`is_sorted`]: slice::is_sorted
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
///
/// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
/// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
where
F: FnMut(&T) -> K,
K: PartialOrd,
{
self.iter().is_sorted_by_key(f)
}
/// Returns the index of the partition point according to the given predicate
/// (the index of the first element of the second partition).
///
/// The slice is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the slice
/// and all elements for which the predicate returns false are at the end.
/// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
/// (all odd numbers are at the start, all even at the end).
///
/// If this slice is not partitioned, the returned result is unspecified and meaningless,
/// as this method performs a kind of binary search.
///
/// See also [`binary_search`], [`binary_search_by`], and [`binary_search_by_key`].
///
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by`]: slice::binary_search_by
/// [`binary_search_by_key`]: slice::binary_search_by_key
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 3, 5, 6, 7];
/// let i = v.partition_point(|&x| x < 5);
///
/// assert_eq!(i, 4);
/// assert!(v[..i].iter().all(|&x| x < 5));
/// assert!(v[i..].iter().all(|&x| !(x < 5)));
/// ```
#[stable(feature = "partition_point", since = "1.52.0")]
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
{
self.binary_search_by(|x| if pred(x) { Less } else { Greater }).unwrap_or_else(|i| i)
}
}
trait CloneFromSpec<T> {
fn spec_clone_from(&mut self, src: &[T]);
}
impl<T> CloneFromSpec<T> for [T]
where
T: Clone,
{
default fn spec_clone_from(&mut self, src: &[T]) {
assert!(self.len() == src.len(), "destination and source slices have different lengths");
// NOTE: We need to explicitly slice them to the same length
// to make it easier for the optimizer to elide bounds checking.
// But since it can't be relied on we also have an explicit specialization for T: Copy.
let len = self.len();
let src = &src[..len];
for i in 0..len {
self[i].clone_from(&src[i]);
}
}
}
impl<T> CloneFromSpec<T> for [T]
where
T: Copy,
{
fn spec_clone_from(&mut self, src: &[T]) {
self.copy_from_slice(src);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for &[T] {
/// Creates an empty slice.
fn default() -> Self {
&[]
}
}
#[stable(feature = "mut_slice_default", since = "1.5.0")]
impl<T> Default for &mut [T] {
/// Creates a mutable empty slice.
fn default() -> Self {
&mut []
}
}
#[unstable(feature = "slice_pattern", reason = "stopgap trait for slice patterns", issue = "56345")]
/// Patterns in slices - currently, only used by `strip_prefix` and `strip_suffix`. At a future
/// point, we hope to generalise `core::str::Pattern` (which at the time of writing is limited to
/// `str`) to slices, and then this trait will be replaced or abolished.
pub trait SlicePattern {
/// The element type of the slice being matched on.
type Item;
/// Currently, the consumers of `SlicePattern` need a slice.
fn as_slice(&self) -> &[Self::Item];
}
#[stable(feature = "slice_strip", since = "1.51.0")]
impl<T> SlicePattern for [T] {
type Item = T;
#[inline]
fn as_slice(&self) -> &[Self::Item] {
self
}
}
#[stable(feature = "slice_strip", since = "1.51.0")]
impl<T, const N: usize> SlicePattern for [T; N] {
type Item = T;
#[inline]
fn as_slice(&self) -> &[Self::Item] {
self
}
}
|
//! Slice management and manipulation.
//!
//! For more details see [`std::slice`].
//!
//! [`std::slice`]: ../../std/slice/index.html
#![stable(feature = "rust1", since = "1.0.0")]
use crate::cmp::Ordering::{self, Greater, Less};
use crate::marker::Copy;
use crate::mem;
use crate::num::NonZeroUsize;
use crate::ops::{FnMut, Range, RangeBounds};
use crate::option::Option;
use crate::option::Option::{None, Some};
use crate::ptr;
use crate::result::Result;
use crate::result::Result::{Err, Ok};
use crate::slice;
#[unstable(
feature = "slice_internals",
issue = "none",
reason = "exposed from core to be reused in std; use the memchr crate"
)]
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;
mod ascii;
mod cmp;
mod index;
mod iter;
mod raw;
mod rotate;
mod sort;
mod specialize;
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Chunks, ChunksMut, Windows};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Iter, IterMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{RSplitN, RSplitNMut, Split, SplitMut, SplitN, SplitNMut};
#[stable(feature = "slice_rsplit", since = "1.27.0")]
pub use iter::{RSplit, RSplitMut};
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub use iter::{ChunksExact, ChunksExactMut};
#[stable(feature = "rchunks", since = "1.31.0")]
pub use iter::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
#[unstable(feature = "array_chunks", issue = "74985")]
pub use iter::{ArrayChunks, ArrayChunksMut};
#[unstable(feature = "array_windows", issue = "75027")]
pub use iter::ArrayWindows;
#[unstable(feature = "slice_group_by", issue = "80552")]
pub use iter::{GroupBy, GroupByMut};
#[stable(feature = "split_inclusive", since = "1.51.0")]
pub use iter::{SplitInclusive, SplitInclusiveMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use raw::{from_raw_parts, from_raw_parts_mut};
#[stable(feature = "from_ref", since = "1.28.0")]
pub use raw::{from_mut, from_ref};
// This function is public only because there is no other way to unit test heapsort.
#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
pub use sort::heapsort;
#[stable(feature = "slice_get_slice", since = "1.28.0")]
pub use index::SliceIndex;
#[unstable(feature = "slice_range", issue = "76393")]
pub use index::range;
#[unstable(feature = "inherent_ascii_escape", issue = "77174")]
pub use ascii::EscapeAscii;
#[lang = "slice"]
#[cfg(not(test))]
impl<T> [T] {
/// Returns the number of elements in the slice.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert_eq!(a.len(), 3);
/// ```
#[lang = "slice_len_fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_len", since = "1.39.0")]
#[inline]
// SAFETY: const sound because we transmute out the length field as a usize (which it must be)
pub const fn len(&self) -> usize {
// FIXME: Replace with `crate::ptr::metadata(self)` when that is const-stable.
// As of this writing this causes a "Const-stable functions can only call other
// const-stable functions" error.
// SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T
// and PtrComponents<T> have the same memory layouts. Only std can make this
// guarantee.
unsafe { crate::ptr::PtrRepr { const_ptr: self }.components.metadata }
}
/// Returns `true` if the slice has a length of 0.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_is_empty", since = "1.39.0")]
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&10), v.first());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.first());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(first) = x.first_mut() {
/// *first = 5;
/// }
/// assert_eq!(x, &[5, 1, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first() {
/// assert_eq!(first, &0);
/// assert_eq!(elements, &[1, 2]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first_mut() {
/// *first = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[3, 4, 5]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last() {
/// assert_eq!(last, &2);
/// assert_eq!(elements, &[0, 1]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last_mut() {
/// *last = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[4, 5, 3]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&30), v.last());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.last());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a mutable pointer to the last item in the slice.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(last) = x.last_mut() {
/// *last = 10;
/// }
/// assert_eq!(x, &[0, 1, 10]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a reference to an element or subslice depending on the type of
/// index.
///
/// - If given a position, returns a reference to the element at that
/// position or `None` if out of bounds.
/// - If given a range, returns the subslice corresponding to that range,
/// or `None` if out of bounds.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&40), v.get(1));
/// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
/// assert_eq!(None, v.get(3));
/// assert_eq!(None, v.get(0..4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: SliceIndex<Self>,
{
index.get(self)
}
/// Returns a mutable reference to an element or subslice depending on the
/// type of index (see [`get`]) or `None` if the index is out of bounds.
///
/// [`get`]: slice::get
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(elem) = x.get_mut(1) {
/// *elem = 42;
/// }
/// assert_eq!(x, &[0, 42, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: SliceIndex<Self>,
{
index.get_mut(self)
}
/// Returns a reference to an element or subslice, without doing bounds
/// checking.
///
/// For a safe alternative see [`get`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get`]: slice::get
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
///
/// unsafe {
/// assert_eq!(x.get_unchecked(1), &2);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
/// Returns a mutable reference to an element or subslice, without doing
/// bounds checking.
///
/// For a safe alternative see [`get_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get_mut`]: slice::get_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
///
/// unsafe {
/// let elem = x.get_unchecked_mut(1);
/// *elem = 13;
/// }
/// assert_eq!(x, &[1, 13, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
/// Returns a raw pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// The caller must also ensure that the memory the pointer (non-transitively) points to
/// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
/// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let x_ptr = x.as_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
/// }
/// }
/// ```
///
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
#[inline]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
/// Returns an unsafe mutable pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// let x_ptr = x.as_mut_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// *x_ptr.add(i) += 2;
/// }
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
/// Returns the two raw pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_ptr`] for warnings on using these pointers. The end pointer
/// requires extra caution, as it does not point to a valid element in the
/// slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// It can also be useful to check if a pointer to an element refers to an
/// element of this slice:
///
/// ```
/// let a = [1, 2, 3];
/// let x = &a[1] as *const _;
/// let y = &5 as *const _;
///
/// assert!(a.as_ptr_range().contains(&x));
/// assert!(!a.as_ptr_range().contains(&y));
/// ```
///
/// [`as_ptr`]: slice::as_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_ptr_range(&self) -> Range<*const T> {
let start = self.as_ptr();
// SAFETY: The `add` here is safe, because:
//
// - Both pointers are part of the same object, as pointing directly
// past the object also counts.
//
// - The size of the slice is never larger than isize::MAX bytes, as
// noted here:
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
// - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
// - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
// (This doesn't seem normative yet, but the very same assumption is
// made in many places, including the Index implementation of slices.)
//
// - There is no wrapping around involved, as slices do not wrap past
// the end of the address space.
//
// See the documentation of pointer::add.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Returns the two unsafe mutable pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_mut_ptr`] for warnings on using these pointers. The end
/// pointer requires extra caution, as it does not point to a valid element
/// in the slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
let start = self.as_mut_ptr();
// SAFETY: See as_ptr_range() above for why `add` here is safe.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Swaps two elements in the slice.
///
/// # Arguments
///
/// * a - The index of the first element
/// * b - The index of the second element
///
/// # Panics
///
/// Panics if `a` or `b` are out of bounds.
///
/// # Examples
///
/// ```
/// let mut v = ["a", "b", "c", "d", "e"];
/// v.swap(2, 4);
/// assert!(v == ["a", "b", "e", "d", "c"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn swap(&mut self, a: usize, b: usize) {
let _ = &self[a];
let _ = &self[b];
// SAFETY: we just checked that both `a` and `b` are in bounds
unsafe { self.swap_unchecked(a, b) }
}
/// Swaps two elements in the slice, without doing bounds checking.
///
/// For a safe alternative see [`swap`].
///
/// # Arguments
///
/// * a - The index of the first element
/// * b - The index of the second element
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*.
/// The caller has to ensure that `a < self.len()` and `b < self.len()`.
///
/// # Examples
///
/// ```
/// #![feature(slice_swap_unchecked)]
///
/// let mut v = ["a", "b", "c", "d"];
/// // SAFETY: we know that 1 and 3 are both indices of the slice
/// unsafe { v.swap_unchecked(1, 3) };
/// assert!(v == ["a", "d", "c", "b"]);
/// ```
///
/// [`swap`]: slice::swap
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[unstable(feature = "slice_swap_unchecked", issue = "88539")]
pub unsafe fn swap_unchecked(&mut self, a: usize, b: usize) {
#[cfg(debug_assertions)]
{
let _ = &self[a];
let _ = &self[b];
}
let ptr = self.as_mut_ptr();
// SAFETY: caller has to guarantee that `a < self.len()` and `b < self.len()`
unsafe {
ptr::swap(ptr.add(a), ptr.add(b));
}
}
/// Reverses the order of elements in the slice, in place.
///
/// # Examples
///
/// ```
/// let mut v = [1, 2, 3];
/// v.reverse();
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn reverse(&mut self) {
let mut i: usize = 0;
let ln = self.len();
// For very small types, all the individual reads in the normal
// path perform poorly. We can do better, given efficient unaligned
// load/store, by loading a larger chunk and reversing a register.
// Ideally LLVM would do this for us, as it knows better than we do
// whether unaligned reads are efficient (since that changes between
// different ARM versions, for example) and what the best chunk size
// would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
// the loop, so we need to do this ourselves. (Hypothesis: reverse
// is troublesome because the sides can be aligned differently --
// will be, when the length is odd -- so there's no way of emitting
// pre- and postludes to use fully-aligned SIMD in the middle.)
let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
if fast_unaligned && mem::size_of::<T>() == 1 {
// Use the llvm.bswap intrinsic to reverse u8s in a usize
let chunk = mem::size_of::<usize>();
while i + chunk - 1 < ln / 2 {
// SAFETY: There are several things to check here:
//
// - Note that `chunk` is either 4 or 8 due to the cfg check
// above. So `chunk - 1` is positive.
// - Indexing with index `i` is fine as the loop check guarantees
// `i + chunk - 1 < ln / 2`
// <=> `i < ln / 2 - (chunk - 1) < ln / 2 < ln`.
// - Indexing with index `ln - i - chunk = ln - (i + chunk)` is fine:
// - `i + chunk > 0` is trivially true.
// - The loop check guarantees:
// `i + chunk - 1 < ln / 2`
// <=> `i + chunk ≤ ln / 2 ≤ ln`, thus subtraction does not underflow.
// - The `read_unaligned` and `write_unaligned` calls are fine:
// - `pa` points to index `i` where `i < ln / 2 - (chunk - 1)`
// (see above) and `pb` points to index `ln - i - chunk`, so
// both are at least `chunk`
// many bytes away from the end of `self`.
// - Any initialized memory is valid `usize`.
unsafe {
let ptr = self.as_mut_ptr();
let pa = ptr.add(i);
let pb = ptr.add(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut usize);
let vb = ptr::read_unaligned(pb as *mut usize);
ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
}
i += chunk;
}
}
if fast_unaligned && mem::size_of::<T>() == 2 {
// Use rotate-by-16 to reverse u16s in a u32
let chunk = mem::size_of::<u32>() / 2;
while i + chunk - 1 < ln / 2 {
// SAFETY: An unaligned u32 can be read from `i` if `i + 1 < ln`
// (and obviously `i < ln`), because each element is 2 bytes and
// we're reading 4.
//
// `i + chunk - 1 < ln / 2` # while condition
// `i + 2 - 1 < ln / 2`
// `i + 1 < ln / 2`
//
// Since it's less than the length divided by 2, then it must be
// in bounds.
//
// This also means that the condition `0 < i + chunk <= ln` is
// always respected, ensuring the `pb` pointer can be used
// safely.
unsafe {
let ptr = self.as_mut_ptr();
let pa = ptr.add(i);
let pb = ptr.add(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut u32);
let vb = ptr::read_unaligned(pb as *mut u32);
ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
}
i += chunk;
}
}
while i < ln / 2 {
// SAFETY: `i` is inferior to half the length of the slice so
// accessing `i` and `ln - i - 1` is safe (`i` starts at 0 and
// will not go further than `ln / 2 - 1`).
// The resulting pointers `pa` and `pb` are therefore valid and
// aligned, and can be read from and written to.
unsafe {
self.swap_unchecked(i, ln - i - 1);
}
i += 1;
}
}
/// Returns an iterator over the slice.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let mut iterator = x.iter();
///
/// assert_eq!(iterator.next(), Some(&1));
/// assert_eq!(iterator.next(), Some(&2));
/// assert_eq!(iterator.next(), Some(&4));
/// assert_eq!(iterator.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter(&self) -> Iter<'_, T> {
Iter::new(self)
}
/// Returns an iterator that allows modifying each value.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// for elem in x.iter_mut() {
/// *elem += 2;
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut::new(self)
}
/// Returns an iterator over all contiguous windows of length
/// `size`. The windows overlap. If the slice is shorter than
/// `size`, the iterator returns no values.
///
/// # Panics
///
/// Panics if `size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['r', 'u', 's', 't'];
/// let mut iter = slice.windows(2);
/// assert_eq!(iter.next().unwrap(), &['r', 'u']);
/// assert_eq!(iter.next().unwrap(), &['u', 's']);
/// assert_eq!(iter.next().unwrap(), &['s', 't']);
/// assert!(iter.next().is_none());
/// ```
///
/// If the slice is shorter than `size`:
///
/// ```
/// let slice = ['f', 'o', 'o'];
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<'_, T> {
let size = NonZeroUsize::new(size).expect("size is zero");
Windows::new(self, size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert_eq!(iter.next().unwrap(), &['m']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`chunks_exact`]: slice::chunks_exact
/// [`rchunks`]: slice::rchunks
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
assert_ne!(chunk_size, 0);
Chunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at
/// the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 3]);
/// ```
///
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
/// [`rchunks_mut`]: slice::rchunks_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks`]: slice::chunks
/// [`rchunks_exact`]: slice::rchunks_exact
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of
/// the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_mut`]: slice::chunks_mut
/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExactMut::new(self, chunk_size)
}
/// Splits the slice into a slice of `N`-element arrays,
/// assuming that there's no remainder.
///
/// # Safety
///
/// This may only be called when
/// - The slice splits exactly into `N`-element chunks (aka `self.len() % N == 0`).
/// - `N != 0`.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice: &[char] = &['l', 'o', 'r', 'e', 'm', '!'];
/// let chunks: &[[char; 1]] =
/// // SAFETY: 1-element chunks never have remainder
/// unsafe { slice.as_chunks_unchecked() };
/// assert_eq!(chunks, &[['l'], ['o'], ['r'], ['e'], ['m'], ['!']]);
/// let chunks: &[[char; 3]] =
/// // SAFETY: The slice length (6) is a multiple of 3
/// unsafe { slice.as_chunks_unchecked() };
/// assert_eq!(chunks, &[['l', 'o', 'r'], ['e', 'm', '!']]);
///
/// // These would be unsound:
/// // let chunks: &[[_; 5]] = slice.as_chunks_unchecked() // The slice length is not a multiple of 5
/// // let chunks: &[[_; 0]] = slice.as_chunks_unchecked() // Zero-length chunks are never allowed
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
debug_assert_ne!(N, 0);
debug_assert_eq!(self.len() % N, 0);
let new_len =
// SAFETY: Our precondition is exactly what's needed to call this
unsafe { crate::intrinsics::exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts(self.as_ptr().cast(), new_len) }
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the beginning of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let (chunks, remainder) = slice.as_chunks();
/// assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
/// assert_eq!(remainder, &['m']);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
(array_slice, remainder)
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the end of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let (remainder, chunks) = slice.as_rchunks();
/// assert_eq!(remainder, &['l']);
/// assert_eq!(chunks, &[['o', 'r'], ['e', 'm']]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
(remainder, array_slice)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are array references and do not overlap. If `N` does not divide the
/// length of the slice, then the last up to `N-1` elements will be omitted and can be
/// retrieved from the `remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.array_chunks();
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks_exact`]: slice::chunks_exact
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
assert_ne!(N, 0);
ArrayChunks::new(self)
}
/// Splits the slice into a slice of `N`-element arrays,
/// assuming that there's no remainder.
///
/// # Safety
///
/// This may only be called when
/// - The slice splits exactly into `N`-element chunks (aka `self.len() % N == 0`).
/// - `N != 0`.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice: &mut [char] = &mut ['l', 'o', 'r', 'e', 'm', '!'];
/// let chunks: &mut [[char; 1]] =
/// // SAFETY: 1-element chunks never have remainder
/// unsafe { slice.as_chunks_unchecked_mut() };
/// chunks[0] = ['L'];
/// assert_eq!(chunks, &[['L'], ['o'], ['r'], ['e'], ['m'], ['!']]);
/// let chunks: &mut [[char; 3]] =
/// // SAFETY: The slice length (6) is a multiple of 3
/// unsafe { slice.as_chunks_unchecked_mut() };
/// chunks[1] = ['a', 'x', '?'];
/// assert_eq!(slice, &['L', 'o', 'r', 'a', 'x', '?']);
///
/// // These would be unsound:
/// // let chunks: &[[_; 5]] = slice.as_chunks_unchecked_mut() // The slice length is not a multiple of 5
/// // let chunks: &[[_; 0]] = slice.as_chunks_unchecked_mut() // Zero-length chunks are never allowed
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub unsafe fn as_chunks_unchecked_mut<const N: usize>(&mut self) -> &mut [[T; N]] {
debug_assert_ne!(N, 0);
debug_assert_eq!(self.len() % N, 0);
let new_len =
// SAFETY: Our precondition is exactly what's needed to call this
unsafe { crate::intrinsics::exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts_mut(self.as_mut_ptr().cast(), new_len) }
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the beginning of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// let (chunks, remainder) = v.as_chunks_mut();
/// remainder[0] = 9;
/// for chunk in chunks {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 9]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at_mut(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked_mut() };
(array_slice, remainder)
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the end of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// let (remainder, chunks) = v.as_rchunks_mut();
/// remainder[0] = 9;
/// for chunk in chunks {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[9, 1, 1, 2, 2]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at_mut(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked_mut() };
(remainder, array_slice)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable array references and do not overlap. If `N` does not divide
/// the length of the slice, then the last up to `N-1` elements will be omitted and
/// can be retrieved from the `into_remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact_mut`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.array_chunks_mut() {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
assert_ne!(N, 0);
ArrayChunksMut::new(self)
}
/// Returns an iterator over overlapping windows of `N` elements of a slice,
/// starting at the beginning of the slice.
///
/// This is the const generic equivalent of [`windows`].
///
/// If `N` is greater than the size of the slice, it will return no windows.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_windows)]
/// let slice = [0, 1, 2, 3];
/// let mut iter = slice.array_windows();
/// assert_eq!(iter.next().unwrap(), &[0, 1]);
/// assert_eq!(iter.next().unwrap(), &[1, 2]);
/// assert_eq!(iter.next().unwrap(), &[2, 3]);
/// assert!(iter.next().is_none());
/// ```
///
/// [`windows`]: slice::windows
#[unstable(feature = "array_windows", issue = "75027")]
#[inline]
pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> {
assert_ne!(N, 0);
ArrayWindows::new(self)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert_eq!(iter.next().unwrap(), &['l']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`rchunks_exact`]: slice::rchunks_exact
/// [`chunks`]: slice::chunks
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
assert!(chunk_size != 0);
RChunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the
/// beginning of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[3, 2, 2, 1, 1]);
/// ```
///
/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
/// [`chunks_mut`]: slice::chunks_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
assert!(chunk_size != 0);
RChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// end of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['l']);
/// ```
///
/// [`chunks`]: slice::chunks
/// [`rchunks`]: slice::rchunks
/// [`chunks_exact`]: slice::chunks_exact
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
assert!(chunk_size != 0);
RChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[0, 2, 2, 1, 1]);
/// ```
///
/// [`chunks_mut`]: slice::chunks_mut
/// [`rchunks_mut`]: slice::rchunks_mut
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
assert!(chunk_size != 0);
RChunksExactMut::new(self, chunk_size)
}
/// Returns an iterator over the slice producing non-overlapping runs
/// of elements using the predicate to separate them.
///
/// The predicate is called on two elements following themselves,
/// it means the predicate is called on `slice[0]` and `slice[1]`
/// then on `slice[1]` and `slice[2]` and so on.
///
/// # Examples
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &[1, 1, 1, 3, 3, 2, 2, 2];
///
/// let mut iter = slice.group_by(|a, b| a == b);
///
/// assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
/// assert_eq!(iter.next(), Some(&[3, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
/// assert_eq!(iter.next(), None);
/// ```
///
/// This method can be used to extract the sorted subslices:
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &[1, 1, 2, 3, 2, 3, 2, 3, 4];
///
/// let mut iter = slice.group_by(|a, b| a <= b);
///
/// assert_eq!(iter.next(), Some(&[1, 1, 2, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 3, 4][..]));
/// assert_eq!(iter.next(), None);
/// ```
#[unstable(feature = "slice_group_by", issue = "80552")]
#[inline]
pub fn group_by<F>(&self, pred: F) -> GroupBy<'_, T, F>
where
F: FnMut(&T, &T) -> bool,
{
GroupBy::new(self, pred)
}
/// Returns an iterator over the slice producing non-overlapping mutable
/// runs of elements using the predicate to separate them.
///
/// The predicate is called on two elements following themselves,
/// it means the predicate is called on `slice[0]` and `slice[1]`
/// then on `slice[1]` and `slice[2]` and so on.
///
/// # Examples
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &mut [1, 1, 1, 3, 3, 2, 2, 2];
///
/// let mut iter = slice.group_by_mut(|a, b| a == b);
///
/// assert_eq!(iter.next(), Some(&mut [1, 1, 1][..]));
/// assert_eq!(iter.next(), Some(&mut [3, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 2, 2][..]));
/// assert_eq!(iter.next(), None);
/// ```
///
/// This method can be used to extract the sorted subslices:
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &mut [1, 1, 2, 3, 2, 3, 2, 3, 4];
///
/// let mut iter = slice.group_by_mut(|a, b| a <= b);
///
/// assert_eq!(iter.next(), Some(&mut [1, 1, 2, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 3, 4][..]));
/// assert_eq!(iter.next(), None);
/// ```
#[unstable(feature = "slice_group_by", issue = "80552")]
#[inline]
pub fn group_by_mut<F>(&mut self, pred: F) -> GroupByMut<'_, T, F>
where
F: FnMut(&T, &T) -> bool,
{
GroupByMut::new(self, pred)
}
/// Divides one slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 4, 5, 6];
///
/// {
/// let (left, right) = v.split_at(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_unchecked(mid) }
}
/// Divides one mutable slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let mut v = [1, 0, 3, 0, 5, 6];
/// let (left, right) = v.split_at_mut(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_mut_unchecked(mid) }
}
/// Divides one slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at`]: slice::split_at
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// #![feature(slice_split_at_unchecked)]
///
/// let v = [1, 2, 3, 4, 5, 6];
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at_mut`]: slice::split_at_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// #![feature(slice_split_at_unchecked)]
///
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// unsafe {
/// let (left, right) = v.split_at_mut_unchecked(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
pub unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
// SAFETY: Caller has to check that `0 <= mid <= self.len()`.
//
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
unsafe { (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid)) }
}
/// Divides one slice into an array and a remainder slice at an index.
///
/// The array will contain all indices from `[0, N)` (excluding
/// the index `N` itself) and the slice will contain all
/// indices from `[N, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `N > len`.
///
/// # Examples
///
/// ```
/// #![feature(split_array)]
///
/// let v = &[1, 2, 3, 4, 5, 6][..];
///
/// {
/// let (left, right) = v.split_array_ref::<0>();
/// assert_eq!(left, &[]);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_array_ref::<2>();
/// assert_eq!(left, &[1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_array_ref::<6>();
/// assert_eq!(left, &[1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[unstable(feature = "split_array", reason = "new API", issue = "90091")]
#[inline]
pub fn split_array_ref<const N: usize>(&self) -> (&[T; N], &[T]) {
let (a, b) = self.split_at(N);
// SAFETY: a points to [T; N]? Yes it's [T] of length N (checked by split_at)
unsafe { (&*(a.as_ptr() as *const [T; N]), b) }
}
/// Divides one mutable slice into an array and a remainder slice at an index.
///
/// The array will contain all indices from `[0, N)` (excluding
/// the index `N` itself) and the slice will contain all
/// indices from `[N, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `N > len`.
///
/// # Examples
///
/// ```
/// #![feature(split_array)]
///
/// let mut v = &mut [1, 0, 3, 0, 5, 6][..];
/// let (left, right) = v.split_array_mut::<2>();
/// assert_eq!(left, &mut [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[unstable(feature = "split_array", reason = "new API", issue = "90091")]
#[inline]
pub fn split_array_mut<const N: usize>(&mut self) -> (&mut [T; N], &mut [T]) {
let (a, b) = self.split_at_mut(N);
// SAFETY: a points to [T; N]? Yes it's [T] of length N (checked by split_at_mut)
unsafe { (&mut *(a.as_mut_ptr() as *mut [T; N]), b) }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the first element is matched, an empty slice will be the first item
/// returned by the iterator. Similarly, if the last element in the slice
/// is matched, an empty slice will be the last item returned by the
/// iterator:
///
/// ```
/// let slice = [10, 40, 33];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert!(iter.next().is_none());
/// ```
///
/// If two matched elements are directly adjacent, an empty slice will be
/// present between them:
///
/// ```
/// let slice = [10, 6, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
where
F: FnMut(&T) -> bool,
{
Split::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_mut(|num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is contained in the end of the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the last element of the slice is matched,
/// that element will be considered the terminator of the preceding slice.
/// That slice will be the last item returned by the iterator.
///
/// ```
/// let slice = [3, 10, 40, 33];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[3]);
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "split_inclusive", since = "1.51.0")]
#[inline]
pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusive::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is contained in the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
/// let terminator_idx = group.len()-1;
/// group[terminator_idx] = 1;
/// }
/// assert_eq!(v, [10, 40, 1, 20, 1, 1]);
/// ```
#[stable(feature = "split_inclusive", since = "1.51.0")]
#[inline]
pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusiveMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, starting at the end of the slice and working backwards.
/// The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [11, 22, 33, 0, 44, 55];
/// let mut iter = slice.rsplit(|num| *num == 0);
///
/// assert_eq!(iter.next().unwrap(), &[44, 55]);
/// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
/// assert_eq!(iter.next(), None);
/// ```
///
/// As with `split()`, if the first or last element is matched, an empty
/// slice will be the first (or last) item returned by the iterator.
///
/// ```
/// let v = &[0, 1, 1, 2, 3, 5, 8];
/// let mut it = v.rsplit(|n| *n % 2 == 0);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next().unwrap(), &[3, 5]);
/// assert_eq!(it.next().unwrap(), &[1, 1]);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next(), None);
/// ```
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplit::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`, starting at the end of the slice and working
/// backwards. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [100, 400, 300, 200, 600, 500];
///
/// let mut count = 0;
/// for group in v.rsplit_mut(|num| *num % 3 == 0) {
/// count += 1;
/// group[0] = count;
/// }
/// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
/// ```
///
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once by numbers divisible by 3 (i.e., `[10, 40]`,
/// `[20, 60, 50]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitN::new(self.split(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitNMut::new(self.split_mut(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once, starting from the end, by numbers divisible
/// by 3 (i.e., `[50]`, `[10, 40, 30, 20]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.rsplitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitN::new(self.rsplit(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut s = [10, 40, 30, 20, 60, 50];
///
/// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitNMut::new(self.rsplit_mut(pred), n)
}
/// Returns `true` if the slice contains an element with the given value.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.contains(&30));
/// assert!(!v.contains(&50));
/// ```
///
/// If you do not have a `&T`, but some other value that you can compare
/// with one (for example, `String` implements `PartialEq<str>`), you can
/// use `iter().any`:
///
/// ```
/// let v = [String::from("hello"), String::from("world")]; // slice of `String`
/// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
/// assert!(!v.iter().any(|e| e == "hi"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn contains(&self, x: &T) -> bool
where
T: PartialEq,
{
cmp::SliceContains::slice_contains(x, self)
}
/// Returns `true` if `needle` is a prefix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.starts_with(&[10]));
/// assert!(v.starts_with(&[10, 40]));
/// assert!(!v.starts_with(&[50]));
/// assert!(!v.starts_with(&[10, 50]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.starts_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.starts_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
/// Returns `true` if `needle` is a suffix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.ends_with(&[30]));
/// assert!(v.ends_with(&[40, 30]));
/// assert!(!v.ends_with(&[50]));
/// assert!(!v.ends_with(&[50, 30]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.ends_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.ends_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let (m, n) = (self.len(), needle.len());
m >= n && needle == &self[m - n..]
}
/// Returns a subslice with the prefix removed.
///
/// If the slice starts with `prefix`, returns the subslice after the prefix, wrapped in `Some`.
/// If `prefix` is empty, simply returns the original slice.
///
/// If the slice does not start with `prefix`, returns `None`.
///
/// # Examples
///
/// ```
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
/// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
/// assert_eq!(v.strip_prefix(&[50]), None);
/// assert_eq!(v.strip_prefix(&[10, 50]), None);
///
/// let prefix : &str = "he";
/// assert_eq!(b"hello".strip_prefix(prefix.as_bytes()),
/// Some(b"llo".as_ref()));
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[stable(feature = "slice_strip", since = "1.51.0")]
pub fn strip_prefix<P: SlicePattern<Item = T> + ?Sized>(&self, prefix: &P) -> Option<&[T]>
where
T: PartialEq,
{
// This function will need rewriting if and when SlicePattern becomes more sophisticated.
let prefix = prefix.as_slice();
let n = prefix.len();
if n <= self.len() {
let (head, tail) = self.split_at(n);
if head == prefix {
return Some(tail);
}
}
None
}
/// Returns a subslice with the suffix removed.
///
/// If the slice ends with `suffix`, returns the subslice before the suffix, wrapped in `Some`.
/// If `suffix` is empty, simply returns the original slice.
///
/// If the slice does not end with `suffix`, returns `None`.
///
/// # Examples
///
/// ```
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
/// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
/// assert_eq!(v.strip_suffix(&[50]), None);
/// assert_eq!(v.strip_suffix(&[50, 30]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[stable(feature = "slice_strip", since = "1.51.0")]
pub fn strip_suffix<P: SlicePattern<Item = T> + ?Sized>(&self, suffix: &P) -> Option<&[T]>
where
T: PartialEq,
{
// This function will need rewriting if and when SlicePattern becomes more sophisticated.
let suffix = suffix.as_slice();
let (len, n) = (self.len(), suffix.len());
if n <= len {
let (head, tail) = self.split_at(len - n);
if tail == suffix {
return Some(head);
}
}
None
}
/// Binary searches this sorted slice for a given element.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
///
/// [`binary_search_by`]: slice::binary_search_by
/// [`binary_search_by_key`]: slice::binary_search_by_key
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// assert_eq!(s.binary_search(&13), Ok(9));
/// assert_eq!(s.binary_search(&4), Err(7));
/// assert_eq!(s.binary_search(&100), Err(13));
/// let r = s.binary_search(&1);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
///
/// If you want to insert an item to a sorted vector, while maintaining
/// sort order:
///
/// ```
/// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
/// let num = 42;
/// let idx = s.binary_search(&num).unwrap_or_else(|x| x);
/// s.insert(idx, num);
/// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
where
T: Ord,
{
self.binary_search_by(|p| p.cmp(x))
}
/// Binary searches this sorted slice with a comparator function.
///
/// The comparator function should implement an order consistent
/// with the sort order of the underlying slice, returning an
/// order code that indicates whether its argument is `Less`,
/// `Equal` or `Greater` the desired target.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
///
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by_key`]: slice::binary_search_by_key
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// let seek = 13;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
/// let seek = 4;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
/// let seek = 100;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
/// let seek = 1;
/// let r = s.binary_search_by(|probe| probe.cmp(&seek));
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> Ordering,
{
let mut size = self.len();
let mut left = 0;
let mut right = size;
while left < right {
let mid = left + size / 2;
// SAFETY: the call is made safe by the following invariants:
// - `mid >= 0`
// - `mid < size`: `mid` is limited by `[left; right)` bound.
let cmp = f(unsafe { self.get_unchecked(mid) });
// The reason why we use if/else control flow rather than match
// is because match reorders comparison operations, which is perf sensitive.
// This is x86 asm for u8: https://rust.godbolt.org/z/8Y8Pra.
if cmp == Less {
left = mid + 1;
} else if cmp == Greater {
right = mid;
} else {
// SAFETY: same as the `get_unchecked` above
unsafe { crate::intrinsics::assume(mid < self.len()) };
return Ok(mid);
}
size = right - left;
}
Err(left)
}
/// Binary searches this sorted slice with a key extraction function.
///
/// Assumes that the slice is sorted by the key, for instance with
/// [`sort_by_key`] using the same key extraction function.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
///
/// [`sort_by_key`]: slice::sort_by_key
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by`]: slice::binary_search_by
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements in a slice of pairs sorted by
/// their second elements. The first is found, with a uniquely
/// determined position; the second and third are not found; the
/// fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
/// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
/// (1, 21), (2, 34), (4, 55)];
///
/// assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b), Ok(9));
/// assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b), Err(7));
/// assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13));
/// let r = s.binary_search_by_key(&1, |&(a, b)| b);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
// Lint rustdoc::broken_intra_doc_links is allowed as `slice::sort_by_key` is
// in crate `alloc`, and as such doesn't exists yet when building `core`: #74481.
// This breaks links when slice is displayed in core, but changing it to use relative links
// would break when the item is re-exported. So allow the core links to be broken for now.
#[allow(rustdoc::broken_intra_doc_links)]
#[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
#[inline]
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> B,
B: Ord,
{
self.binary_search_by(|k| f(k).cmp(b))
}
/// Sorts the slice, but might not preserve the order of equal elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort_unstable();
/// assert!(v == [-5, -3, 1, 2, 4]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable(&mut self)
where
T: Ord,
{
sort::quicksort(self, |a, b| a.lt(b));
}
/// Sorts the slice with a comparator function, but might not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
/// total order if it is (for all `a`, `b` and `c`):
///
/// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
/// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
///
/// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
/// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
///
/// ```
/// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
/// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
/// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
/// ```
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_unstable_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
/// v.sort_unstable_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by<F>(&mut self, mut compare: F)
where
F: FnMut(&T, &T) -> Ordering,
{
sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less);
}
/// Sorts the slice with a key extraction function, but might not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
/// *O*(*m*).
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
/// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
/// cases where the key function is expensive.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_unstable_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
where
F: FnMut(&T) -> K,
K: Ord,
{
sort::quicksort(self, |a, b| f(a).lt(&f(b)));
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use the select_nth_unstable() instead")]
#[inline]
pub fn partition_at_index(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
self.select_nth_unstable(index)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use select_nth_unstable_by() instead")]
#[inline]
pub fn partition_at_index_by<F>(
&mut self,
index: usize,
compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
self.select_nth_unstable_by(index, compare)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use the select_nth_unstable_by_key() instead")]
#[inline]
pub fn partition_at_index_by_key<K, F>(
&mut self,
index: usize,
f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
self.select_nth_unstable_by_key(index, f)
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index`. Additionally, this reordering is
/// unstable (i.e. any number of equal elements may end up at position `index`), in-place
/// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
/// element" in other libraries. It returns a triplet of the following values: all elements less
/// than the one at the given index, the value at the given index, and all elements greater than
/// the one at the given index.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median
/// v.select_nth_unstable(2);
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [-3, -5, 1, 2, 4] ||
/// v == [-5, -3, 1, 2, 4] ||
/// v == [-3, -5, 1, 4, 2] ||
/// v == [-5, -3, 1, 4, 2]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
let mut f = |a: &T, b: &T| a.lt(b);
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the comparator function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index,
/// and all elements greater than the one at the given index, using the provided comparator
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median as if the slice were sorted in descending order.
/// v.select_nth_unstable_by(2, |a, b| b.cmp(a));
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [2, 4, 1, -5, -3] ||
/// v == [2, 4, 1, -3, -5] ||
/// v == [4, 2, 1, -5, -3] ||
/// v == [4, 2, 1, -3, -5]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable_by<F>(
&mut self,
index: usize,
mut compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
let mut f = |a: &T, b: &T| compare(a, b) == Less;
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the key extraction function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index, and
/// all elements greater than the one at the given index, using the provided key extraction
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Return the median as if the array were sorted according to absolute value.
/// v.select_nth_unstable_by_key(2, |a| a.abs());
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [1, 2, -3, 4, -5] ||
/// v == [1, 2, -3, -5, 4] ||
/// v == [2, 1, -3, 4, -5] ||
/// v == [2, 1, -3, -5, 4]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable_by_key<K, F>(
&mut self,
index: usize,
mut f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
let mut g = |a: &T, b: &T| f(a).lt(&f(b));
sort::partition_at_index(self, index, &mut g)
}
/// Moves all consecutive repeated elements to the end of the slice according to the
/// [`PartialEq`] trait implementation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
///
/// let (dedup, duplicates) = slice.partition_dedup();
///
/// assert_eq!(dedup, [1, 2, 3, 2, 1]);
/// assert_eq!(duplicates, [2, 3, 1]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
where
T: PartialEq,
{
self.partition_dedup_by(|a, b| a == b)
}
/// Moves all but the first of consecutive elements to the end of the slice satisfying
/// a given equality relation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// The `same_bucket` function is passed references to two elements from the slice and
/// must determine if the elements compare equal. The elements are passed in opposite order
/// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is moved
/// at the end of the slice.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
///
/// let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
///
/// assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
/// assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by<F>(&mut self, mut same_bucket: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T, &mut T) -> bool,
{
// Although we have a mutable reference to `self`, we cannot make
// *arbitrary* changes. The `same_bucket` calls could panic, so we
// must ensure that the slice is in a valid state at all times.
//
// The way that we handle this is by using swaps; we iterate
// over all the elements, swapping as we go so that at the end
// the elements we wish to keep are in the front, and those we
// wish to reject are at the back. We can then split the slice.
// This operation is still `O(n)`.
//
// Example: We start in this state, where `r` represents "next
// read" and `w` represents "next_write`.
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate, so
// we swap self[r] and self[w] (no effect as r==w) and then increment both
// r and w, leaving us with:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this value is a duplicate,
// so we increment `r` but leave everything else unchanged:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate,
// so swap self[r] and self[w] and advance r and w:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 1 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Not a duplicate, repeat:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 3 | 1 | 3 |
// +---+---+---+---+---+---+
// w
//
// Duplicate, advance r. End of slice. Split at w.
let len = self.len();
if len <= 1 {
return (self, &mut []);
}
let ptr = self.as_mut_ptr();
let mut next_read: usize = 1;
let mut next_write: usize = 1;
// SAFETY: the `while` condition guarantees `next_read` and `next_write`
// are less than `len`, thus are inside `self`. `prev_ptr_write` points to
// one element before `ptr_write`, but `next_write` starts at 1, so
// `prev_ptr_write` is never less than 0 and is inside the slice.
// This fulfils the requirements for dereferencing `ptr_read`, `prev_ptr_write`
// and `ptr_write`, and for using `ptr.add(next_read)`, `ptr.add(next_write - 1)`
// and `prev_ptr_write.offset(1)`.
//
// `next_write` is also incremented at most once per loop at most meaning
// no element is skipped when it may need to be swapped.
//
// `ptr_read` and `prev_ptr_write` never point to the same element. This
// is required for `&mut *ptr_read`, `&mut *prev_ptr_write` to be safe.
// The explanation is simply that `next_read >= next_write` is always true,
// thus `next_read > next_write - 1` is too.
unsafe {
// Avoid bounds checks by using raw pointers.
while next_read < len {
let ptr_read = ptr.add(next_read);
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
let ptr_write = prev_ptr_write.offset(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
}
next_read += 1;
}
}
self.split_at_mut(next_write)
}
/// Moves all but the first of consecutive elements to the end of the slice that resolve
/// to the same key.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
///
/// let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
///
/// assert_eq!(dedup, [10, 20, 30, 20, 11]);
/// assert_eq!(duplicates, [21, 30, 13]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by_key<K, F>(&mut self, mut key: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T) -> K,
K: PartialEq,
{
self.partition_dedup_by(|a, b| key(a) == key(b))
}
/// Rotates the slice in-place such that the first `mid` elements of the
/// slice move to the end while the last `self.len() - mid` elements move to
/// the front. After calling `rotate_left`, the element previously at index
/// `mid` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `mid` is greater than the length of the
/// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_left(2);
/// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
/// ```
///
/// Rotating a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_left(1);
/// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Rotates the slice in-place such that the first `self.len() - k`
/// elements of the slice move to the end while the last `k` elements move
/// to the front. After calling `rotate_right`, the element previously at
/// index `self.len() - k` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `k` is greater than the length of the
/// slice. Note that `k == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_right(2);
/// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
/// ```
///
/// Rotate a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_right(1);
/// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Fills `self` with elements by cloning `value`.
///
/// # Examples
///
/// ```
/// let mut buf = vec![0; 10];
/// buf.fill(1);
/// assert_eq!(buf, vec![1; 10]);
/// ```
#[doc(alias = "memset")]
#[stable(feature = "slice_fill", since = "1.50.0")]
pub fn fill(&mut self, value: T)
where
T: Clone,
{
specialize::SpecFill::spec_fill(self, value);
}
/// Fills `self` with elements returned by calling a closure repeatedly.
///
/// This method uses a closure to create new values. If you'd rather
/// [`Clone`] a given value, use [`fill`]. If you want to use the [`Default`]
/// trait to generate values, you can pass [`Default::default`] as the
/// argument.
///
/// [`fill`]: slice::fill
///
/// # Examples
///
/// ```
/// let mut buf = vec![1; 10];
/// buf.fill_with(Default::default);
/// assert_eq!(buf, vec![0; 10]);
/// ```
#[doc(alias = "memset")]
#[stable(feature = "slice_fill_with", since = "1.51.0")]
pub fn fill_with<F>(&mut self, mut f: F)
where
F: FnMut() -> T,
{
for el in self {
*el = f();
}
}
/// Copies the elements from `src` into `self`.
///
/// The length of `src` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Cloning two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.clone_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `clone_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.clone_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`copy_from_slice`]: slice::copy_from_slice
/// [`split_at_mut`]: slice::split_at_mut
#[stable(feature = "clone_from_slice", since = "1.7.0")]
pub fn clone_from_slice(&mut self, src: &[T])
where
T: Clone,
{
self.spec_clone_from(src);
}
/// Copies all elements from `src` into `self`, using a memcpy.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` does not implement `Copy`, use [`clone_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Copying two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.copy_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `copy_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.copy_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`clone_from_slice`]: slice::clone_from_slice
/// [`split_at_mut`]: slice::split_at_mut
#[doc(alias = "memcpy")]
#[stable(feature = "copy_from_slice", since = "1.9.0")]
pub fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
// The panic code path was put into a cold function to not bloat the
// call site.
#[inline(never)]
#[cold]
#[track_caller]
fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
panic!(
"source slice length ({}) does not match destination slice length ({})",
src_len, dst_len,
);
}
if self.len() != src.len() {
len_mismatch_fail(self.len(), src.len());
}
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
}
}
/// Copies elements from one part of the slice to another part of itself,
/// using a memmove.
///
/// `src` is the range within `self` to copy from. `dest` is the starting
/// index of the range within `self` to copy to, which will have the same
/// length as `src`. The two ranges may overlap. The ends of the two ranges
/// must be less than or equal to `self.len()`.
///
/// # Panics
///
/// This function will panic if either range exceeds the end of the slice,
/// or if the end of `src` is before the start.
///
/// # Examples
///
/// Copying four bytes within a slice:
///
/// ```
/// let mut bytes = *b"Hello, World!";
///
/// bytes.copy_within(1..5, 8);
///
/// assert_eq!(&bytes, b"Hello, Wello!");
/// ```
#[stable(feature = "copy_within", since = "1.37.0")]
#[track_caller]
pub fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dest: usize)
where
T: Copy,
{
let Range { start: src_start, end: src_end } = slice::range(src, ..self.len());
let count = src_end - src_start;
assert!(dest <= self.len() - count, "dest is out of bounds");
// SAFETY: the conditions for `ptr::copy` have all been checked above,
// as have those for `ptr::add`.
unsafe {
// Derive both `src_ptr` and `dest_ptr` from the same loan
let ptr = self.as_mut_ptr();
let src_ptr = ptr.add(src_start);
let dest_ptr = ptr.add(dest);
ptr::copy(src_ptr, dest_ptr, count);
}
}
/// Swaps all elements in `self` with those in `other`.
///
/// The length of `other` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Example
///
/// Swapping two elements across slices:
///
/// ```
/// let mut slice1 = [0, 0];
/// let mut slice2 = [1, 2, 3, 4];
///
/// slice1.swap_with_slice(&mut slice2[2..]);
///
/// assert_eq!(slice1, [3, 4]);
/// assert_eq!(slice2, [1, 2, 0, 0]);
/// ```
///
/// Rust enforces that there can only be one mutable reference to a
/// particular piece of data in a particular scope. Because of this,
/// attempting to use `swap_with_slice` on a single slice will result in
/// a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
/// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// mutable sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.swap_with_slice(&mut right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 1, 2]);
/// ```
///
/// [`split_at_mut`]: slice::split_at_mut
#[stable(feature = "swap_with_slice", since = "1.27.0")]
pub fn swap_with_slice(&mut self, other: &mut [T]) {
assert!(self.len() == other.len(), "destination and source slices have different lengths");
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::swap_nonoverlapping(self.as_mut_ptr(), other.as_mut_ptr(), self.len());
}
}
/// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
#[inline]
fn gcd(a: usize, b: usize) -> usize {
use crate::intrinsics;
// iterative stein’s algorithm
// We should still make this `const fn` (and revert to recursive algorithm if we do)
// because relying on llvm to consteval all this is… well, it makes me uncomfortable.
// SAFETY: `a` and `b` are checked to be non-zero values.
let (ctz_a, mut ctz_b) = unsafe {
if a == 0 {
return b;
}
if b == 0 {
return a;
}
(intrinsics::cttz_nonzero(a), intrinsics::cttz_nonzero(b))
};
let k = ctz_a.min(ctz_b);
let mut a = a >> ctz_a;
let mut b = b;
loop {
// remove all factors of 2 from b
b >>= ctz_b;
if a > b {
mem::swap(&mut a, &mut b);
}
b = b - a;
// SAFETY: `b` is checked to be non-zero.
unsafe {
if b == 0 {
break;
}
ctz_b = intrinsics::cttz_nonzero(b);
}
}
a << k
}
let gcd: usize = gcd(mem::size_of::<T>(), mem::size_of::<U>());
let ts: usize = mem::size_of::<U>() / gcd;
let us: usize = mem::size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
(us_len, ts_len)
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: See the `align_to_mut` method for the detailed safety comment.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
(
left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
)
}
}
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: Here we are ensuring we will use aligned pointers for U for the
// rest of the method. This is done by passing a pointer to &[T] with an
// alignment targeted for U.
// `crate::ptr::align_offset` is called with a correctly aligned and
// valid pointer `ptr` (it comes from a reference to `self`) and with
// a size that is a power of two (since it comes from the alignement for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
(
left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
)
}
}
}
/// Checks if the elements of this slice are sorted.
///
/// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
/// slice yields exactly zero or one element, `true` is returned.
///
/// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
/// implies that this function returns `false` if any two consecutive items are not
/// comparable.
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
/// let empty: [i32; 0] = [];
///
/// assert!([1, 2, 2, 9].is_sorted());
/// assert!(![1, 3, 2, 4].is_sorted());
/// assert!([0].is_sorted());
/// assert!(empty.is_sorted());
/// assert!(![0.0, 1.0, f32::NAN].is_sorted());
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted(&self) -> bool
where
T: PartialOrd,
{
self.is_sorted_by(|a, b| a.partial_cmp(b))
}
/// Checks if the elements of this slice are sorted using the given comparator function.
///
/// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
/// function to determine the ordering of two elements. Apart from that, it's equivalent to
/// [`is_sorted`]; see its documentation for more information.
///
/// [`is_sorted`]: slice::is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
where
F: FnMut(&T, &T) -> Option<Ordering>,
{
self.iter().is_sorted_by(|a, b| compare(*a, *b))
}
/// Checks if the elements of this slice are sorted using the given key extraction function.
///
/// Instead of comparing the slice's elements directly, this function compares the keys of the
/// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
/// documentation for more information.
///
/// [`is_sorted`]: slice::is_sorted
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
///
/// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
/// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
where
F: FnMut(&T) -> K,
K: PartialOrd,
{
self.iter().is_sorted_by_key(f)
}
/// Returns the index of the partition point according to the given predicate
/// (the index of the first element of the second partition).
///
/// The slice is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the slice
/// and all elements for which the predicate returns false are at the end.
/// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
/// (all odd numbers are at the start, all even at the end).
///
/// If this slice is not partitioned, the returned result is unspecified and meaningless,
/// as this method performs a kind of binary search.
///
/// See also [`binary_search`], [`binary_search_by`], and [`binary_search_by_key`].
///
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by`]: slice::binary_search_by
/// [`binary_search_by_key`]: slice::binary_search_by_key
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 3, 5, 6, 7];
/// let i = v.partition_point(|&x| x < 5);
///
/// assert_eq!(i, 4);
/// assert!(v[..i].iter().all(|&x| x < 5));
/// assert!(v[i..].iter().all(|&x| !(x < 5)));
/// ```
#[stable(feature = "partition_point", since = "1.52.0")]
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
{
self.binary_search_by(|x| if pred(x) { Less } else { Greater }).unwrap_or_else(|i| i)
}
}
trait CloneFromSpec<T> {
fn spec_clone_from(&mut self, src: &[T]);
}
impl<T> CloneFromSpec<T> for [T]
where
T: Clone,
{
default fn spec_clone_from(&mut self, src: &[T]) {
assert!(self.len() == src.len(), "destination and source slices have different lengths");
// NOTE: We need to explicitly slice them to the same length
// to make it easier for the optimizer to elide bounds checking.
// But since it can't be relied on we also have an explicit specialization for T: Copy.
let len = self.len();
let src = &src[..len];
for i in 0..len {
self[i].clone_from(&src[i]);
}
}
}
impl<T> CloneFromSpec<T> for [T]
where
T: Copy,
{
fn spec_clone_from(&mut self, src: &[T]) {
self.copy_from_slice(src);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl<T> const Default for &[T] {
/// Creates an empty slice.
fn default() -> Self {
&[]
}
}
#[stable(feature = "mut_slice_default", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl<T> const Default for &mut [T] {
/// Creates a mutable empty slice.
fn default() -> Self {
&mut []
}
}
#[unstable(feature = "slice_pattern", reason = "stopgap trait for slice patterns", issue = "56345")]
/// Patterns in slices - currently, only used by `strip_prefix` and `strip_suffix`. At a future
/// point, we hope to generalise `core::str::Pattern` (which at the time of writing is limited to
/// `str`) to slices, and then this trait will be replaced or abolished.
pub trait SlicePattern {
/// The element type of the slice being matched on.
type Item;
/// Currently, the consumers of `SlicePattern` need a slice.
fn as_slice(&self) -> &[Self::Item];
}
#[stable(feature = "slice_strip", since = "1.51.0")]
impl<T> SlicePattern for [T] {
type Item = T;
#[inline]
fn as_slice(&self) -> &[Self::Item] {
self
}
}
#[stable(feature = "slice_strip", since = "1.51.0")]
impl<T, const N: usize> SlicePattern for [T; N] {
type Item = T;
#[inline]
fn as_slice(&self) -> &[Self::Item] {
self
}
}
Auto merge of #90306 - kornelski:slicecloneasset, r=joshtriplett
track_caller for slice length assertions
`clone_from_slice` was missing `#[track_caller]`, and its assert did not report a useful location.
These are small generic methods, so hopefully track_caller gets inlined into nothingness, but it may be worth running a benchmark on this.
//! Slice management and manipulation.
//!
//! For more details see [`std::slice`].
//!
//! [`std::slice`]: ../../std/slice/index.html
#![stable(feature = "rust1", since = "1.0.0")]
use crate::cmp::Ordering::{self, Greater, Less};
use crate::marker::Copy;
use crate::mem;
use crate::num::NonZeroUsize;
use crate::ops::{FnMut, Range, RangeBounds};
use crate::option::Option;
use crate::option::Option::{None, Some};
use crate::ptr;
use crate::result::Result;
use crate::result::Result::{Err, Ok};
use crate::slice;
#[unstable(
feature = "slice_internals",
issue = "none",
reason = "exposed from core to be reused in std; use the memchr crate"
)]
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;
mod ascii;
mod cmp;
mod index;
mod iter;
mod raw;
mod rotate;
mod sort;
mod specialize;
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Chunks, ChunksMut, Windows};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Iter, IterMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{RSplitN, RSplitNMut, Split, SplitMut, SplitN, SplitNMut};
#[stable(feature = "slice_rsplit", since = "1.27.0")]
pub use iter::{RSplit, RSplitMut};
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub use iter::{ChunksExact, ChunksExactMut};
#[stable(feature = "rchunks", since = "1.31.0")]
pub use iter::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
#[unstable(feature = "array_chunks", issue = "74985")]
pub use iter::{ArrayChunks, ArrayChunksMut};
#[unstable(feature = "array_windows", issue = "75027")]
pub use iter::ArrayWindows;
#[unstable(feature = "slice_group_by", issue = "80552")]
pub use iter::{GroupBy, GroupByMut};
#[stable(feature = "split_inclusive", since = "1.51.0")]
pub use iter::{SplitInclusive, SplitInclusiveMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use raw::{from_raw_parts, from_raw_parts_mut};
#[stable(feature = "from_ref", since = "1.28.0")]
pub use raw::{from_mut, from_ref};
// This function is public only because there is no other way to unit test heapsort.
#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
pub use sort::heapsort;
#[stable(feature = "slice_get_slice", since = "1.28.0")]
pub use index::SliceIndex;
#[unstable(feature = "slice_range", issue = "76393")]
pub use index::range;
#[unstable(feature = "inherent_ascii_escape", issue = "77174")]
pub use ascii::EscapeAscii;
#[lang = "slice"]
#[cfg(not(test))]
impl<T> [T] {
/// Returns the number of elements in the slice.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert_eq!(a.len(), 3);
/// ```
#[lang = "slice_len_fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_len", since = "1.39.0")]
#[inline]
// SAFETY: const sound because we transmute out the length field as a usize (which it must be)
pub const fn len(&self) -> usize {
// FIXME: Replace with `crate::ptr::metadata(self)` when that is const-stable.
// As of this writing this causes a "Const-stable functions can only call other
// const-stable functions" error.
// SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T
// and PtrComponents<T> have the same memory layouts. Only std can make this
// guarantee.
unsafe { crate::ptr::PtrRepr { const_ptr: self }.components.metadata }
}
/// Returns `true` if the slice has a length of 0.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_is_empty", since = "1.39.0")]
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&10), v.first());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.first());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(first) = x.first_mut() {
/// *first = 5;
/// }
/// assert_eq!(x, &[5, 1, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first() {
/// assert_eq!(first, &0);
/// assert_eq!(elements, &[1, 2]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first_mut() {
/// *first = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[3, 4, 5]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last() {
/// assert_eq!(last, &2);
/// assert_eq!(elements, &[0, 1]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last_mut() {
/// *last = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[4, 5, 3]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&30), v.last());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.last());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
pub const fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a mutable pointer to the last item in the slice.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(last) = x.last_mut() {
/// *last = 10;
/// }
/// assert_eq!(x, &[0, 1, 10]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
pub const fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a reference to an element or subslice depending on the type of
/// index.
///
/// - If given a position, returns a reference to the element at that
/// position or `None` if out of bounds.
/// - If given a range, returns the subslice corresponding to that range,
/// or `None` if out of bounds.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&40), v.get(1));
/// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
/// assert_eq!(None, v.get(3));
/// assert_eq!(None, v.get(0..4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: SliceIndex<Self>,
{
index.get(self)
}
/// Returns a mutable reference to an element or subslice depending on the
/// type of index (see [`get`]) or `None` if the index is out of bounds.
///
/// [`get`]: slice::get
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(elem) = x.get_mut(1) {
/// *elem = 42;
/// }
/// assert_eq!(x, &[0, 42, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: SliceIndex<Self>,
{
index.get_mut(self)
}
/// Returns a reference to an element or subslice, without doing bounds
/// checking.
///
/// For a safe alternative see [`get`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get`]: slice::get
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
///
/// unsafe {
/// assert_eq!(x.get_unchecked(1), &2);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
/// Returns a mutable reference to an element or subslice, without doing
/// bounds checking.
///
/// For a safe alternative see [`get_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get_mut`]: slice::get_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
///
/// unsafe {
/// let elem = x.get_unchecked_mut(1);
/// *elem = 13;
/// }
/// assert_eq!(x, &[1, 13, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
/// Returns a raw pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// The caller must also ensure that the memory the pointer (non-transitively) points to
/// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
/// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let x_ptr = x.as_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
/// }
/// }
/// ```
///
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
#[inline]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
/// Returns an unsafe mutable pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// let x_ptr = x.as_mut_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// *x_ptr.add(i) += 2;
/// }
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
/// Returns the two raw pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_ptr`] for warnings on using these pointers. The end pointer
/// requires extra caution, as it does not point to a valid element in the
/// slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// It can also be useful to check if a pointer to an element refers to an
/// element of this slice:
///
/// ```
/// let a = [1, 2, 3];
/// let x = &a[1] as *const _;
/// let y = &5 as *const _;
///
/// assert!(a.as_ptr_range().contains(&x));
/// assert!(!a.as_ptr_range().contains(&y));
/// ```
///
/// [`as_ptr`]: slice::as_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_ptr_range(&self) -> Range<*const T> {
let start = self.as_ptr();
// SAFETY: The `add` here is safe, because:
//
// - Both pointers are part of the same object, as pointing directly
// past the object also counts.
//
// - The size of the slice is never larger than isize::MAX bytes, as
// noted here:
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
// - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
// - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
// (This doesn't seem normative yet, but the very same assumption is
// made in many places, including the Index implementation of slices.)
//
// - There is no wrapping around involved, as slices do not wrap past
// the end of the address space.
//
// See the documentation of pointer::add.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Returns the two unsafe mutable pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_mut_ptr`] for warnings on using these pointers. The end
/// pointer requires extra caution, as it does not point to a valid element
/// in the slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
let start = self.as_mut_ptr();
// SAFETY: See as_ptr_range() above for why `add` here is safe.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Swaps two elements in the slice.
///
/// # Arguments
///
/// * a - The index of the first element
/// * b - The index of the second element
///
/// # Panics
///
/// Panics if `a` or `b` are out of bounds.
///
/// # Examples
///
/// ```
/// let mut v = ["a", "b", "c", "d", "e"];
/// v.swap(2, 4);
/// assert!(v == ["a", "b", "e", "d", "c"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn swap(&mut self, a: usize, b: usize) {
let _ = &self[a];
let _ = &self[b];
// SAFETY: we just checked that both `a` and `b` are in bounds
unsafe { self.swap_unchecked(a, b) }
}
/// Swaps two elements in the slice, without doing bounds checking.
///
/// For a safe alternative see [`swap`].
///
/// # Arguments
///
/// * a - The index of the first element
/// * b - The index of the second element
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*.
/// The caller has to ensure that `a < self.len()` and `b < self.len()`.
///
/// # Examples
///
/// ```
/// #![feature(slice_swap_unchecked)]
///
/// let mut v = ["a", "b", "c", "d"];
/// // SAFETY: we know that 1 and 3 are both indices of the slice
/// unsafe { v.swap_unchecked(1, 3) };
/// assert!(v == ["a", "d", "c", "b"]);
/// ```
///
/// [`swap`]: slice::swap
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[unstable(feature = "slice_swap_unchecked", issue = "88539")]
pub unsafe fn swap_unchecked(&mut self, a: usize, b: usize) {
#[cfg(debug_assertions)]
{
let _ = &self[a];
let _ = &self[b];
}
let ptr = self.as_mut_ptr();
// SAFETY: caller has to guarantee that `a < self.len()` and `b < self.len()`
unsafe {
ptr::swap(ptr.add(a), ptr.add(b));
}
}
/// Reverses the order of elements in the slice, in place.
///
/// # Examples
///
/// ```
/// let mut v = [1, 2, 3];
/// v.reverse();
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn reverse(&mut self) {
let mut i: usize = 0;
let ln = self.len();
// For very small types, all the individual reads in the normal
// path perform poorly. We can do better, given efficient unaligned
// load/store, by loading a larger chunk and reversing a register.
// Ideally LLVM would do this for us, as it knows better than we do
// whether unaligned reads are efficient (since that changes between
// different ARM versions, for example) and what the best chunk size
// would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
// the loop, so we need to do this ourselves. (Hypothesis: reverse
// is troublesome because the sides can be aligned differently --
// will be, when the length is odd -- so there's no way of emitting
// pre- and postludes to use fully-aligned SIMD in the middle.)
let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
if fast_unaligned && mem::size_of::<T>() == 1 {
// Use the llvm.bswap intrinsic to reverse u8s in a usize
let chunk = mem::size_of::<usize>();
while i + chunk - 1 < ln / 2 {
// SAFETY: There are several things to check here:
//
// - Note that `chunk` is either 4 or 8 due to the cfg check
// above. So `chunk - 1` is positive.
// - Indexing with index `i` is fine as the loop check guarantees
// `i + chunk - 1 < ln / 2`
// <=> `i < ln / 2 - (chunk - 1) < ln / 2 < ln`.
// - Indexing with index `ln - i - chunk = ln - (i + chunk)` is fine:
// - `i + chunk > 0` is trivially true.
// - The loop check guarantees:
// `i + chunk - 1 < ln / 2`
// <=> `i + chunk ≤ ln / 2 ≤ ln`, thus subtraction does not underflow.
// - The `read_unaligned` and `write_unaligned` calls are fine:
// - `pa` points to index `i` where `i < ln / 2 - (chunk - 1)`
// (see above) and `pb` points to index `ln - i - chunk`, so
// both are at least `chunk`
// many bytes away from the end of `self`.
// - Any initialized memory is valid `usize`.
unsafe {
let ptr = self.as_mut_ptr();
let pa = ptr.add(i);
let pb = ptr.add(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut usize);
let vb = ptr::read_unaligned(pb as *mut usize);
ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
}
i += chunk;
}
}
if fast_unaligned && mem::size_of::<T>() == 2 {
// Use rotate-by-16 to reverse u16s in a u32
let chunk = mem::size_of::<u32>() / 2;
while i + chunk - 1 < ln / 2 {
// SAFETY: An unaligned u32 can be read from `i` if `i + 1 < ln`
// (and obviously `i < ln`), because each element is 2 bytes and
// we're reading 4.
//
// `i + chunk - 1 < ln / 2` # while condition
// `i + 2 - 1 < ln / 2`
// `i + 1 < ln / 2`
//
// Since it's less than the length divided by 2, then it must be
// in bounds.
//
// This also means that the condition `0 < i + chunk <= ln` is
// always respected, ensuring the `pb` pointer can be used
// safely.
unsafe {
let ptr = self.as_mut_ptr();
let pa = ptr.add(i);
let pb = ptr.add(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut u32);
let vb = ptr::read_unaligned(pb as *mut u32);
ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
}
i += chunk;
}
}
while i < ln / 2 {
// SAFETY: `i` is inferior to half the length of the slice so
// accessing `i` and `ln - i - 1` is safe (`i` starts at 0 and
// will not go further than `ln / 2 - 1`).
// The resulting pointers `pa` and `pb` are therefore valid and
// aligned, and can be read from and written to.
unsafe {
self.swap_unchecked(i, ln - i - 1);
}
i += 1;
}
}
/// Returns an iterator over the slice.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let mut iterator = x.iter();
///
/// assert_eq!(iterator.next(), Some(&1));
/// assert_eq!(iterator.next(), Some(&2));
/// assert_eq!(iterator.next(), Some(&4));
/// assert_eq!(iterator.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter(&self) -> Iter<'_, T> {
Iter::new(self)
}
/// Returns an iterator that allows modifying each value.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// for elem in x.iter_mut() {
/// *elem += 2;
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut::new(self)
}
/// Returns an iterator over all contiguous windows of length
/// `size`. The windows overlap. If the slice is shorter than
/// `size`, the iterator returns no values.
///
/// # Panics
///
/// Panics if `size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['r', 'u', 's', 't'];
/// let mut iter = slice.windows(2);
/// assert_eq!(iter.next().unwrap(), &['r', 'u']);
/// assert_eq!(iter.next().unwrap(), &['u', 's']);
/// assert_eq!(iter.next().unwrap(), &['s', 't']);
/// assert!(iter.next().is_none());
/// ```
///
/// If the slice is shorter than `size`:
///
/// ```
/// let slice = ['f', 'o', 'o'];
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<'_, T> {
let size = NonZeroUsize::new(size).expect("size is zero");
Windows::new(self, size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert_eq!(iter.next().unwrap(), &['m']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`chunks_exact`]: slice::chunks_exact
/// [`rchunks`]: slice::rchunks
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
assert_ne!(chunk_size, 0);
Chunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at
/// the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 3]);
/// ```
///
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
/// [`rchunks_mut`]: slice::rchunks_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks`]: slice::chunks
/// [`rchunks_exact`]: slice::rchunks_exact
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of
/// the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_mut`]: slice::chunks_mut
/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExactMut::new(self, chunk_size)
}
/// Splits the slice into a slice of `N`-element arrays,
/// assuming that there's no remainder.
///
/// # Safety
///
/// This may only be called when
/// - The slice splits exactly into `N`-element chunks (aka `self.len() % N == 0`).
/// - `N != 0`.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice: &[char] = &['l', 'o', 'r', 'e', 'm', '!'];
/// let chunks: &[[char; 1]] =
/// // SAFETY: 1-element chunks never have remainder
/// unsafe { slice.as_chunks_unchecked() };
/// assert_eq!(chunks, &[['l'], ['o'], ['r'], ['e'], ['m'], ['!']]);
/// let chunks: &[[char; 3]] =
/// // SAFETY: The slice length (6) is a multiple of 3
/// unsafe { slice.as_chunks_unchecked() };
/// assert_eq!(chunks, &[['l', 'o', 'r'], ['e', 'm', '!']]);
///
/// // These would be unsound:
/// // let chunks: &[[_; 5]] = slice.as_chunks_unchecked() // The slice length is not a multiple of 5
/// // let chunks: &[[_; 0]] = slice.as_chunks_unchecked() // Zero-length chunks are never allowed
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
debug_assert_ne!(N, 0);
debug_assert_eq!(self.len() % N, 0);
let new_len =
// SAFETY: Our precondition is exactly what's needed to call this
unsafe { crate::intrinsics::exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts(self.as_ptr().cast(), new_len) }
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the beginning of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let (chunks, remainder) = slice.as_chunks();
/// assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
/// assert_eq!(remainder, &['m']);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
(array_slice, remainder)
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the end of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let (remainder, chunks) = slice.as_rchunks();
/// assert_eq!(remainder, &['l']);
/// assert_eq!(chunks, &[['o', 'r'], ['e', 'm']]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
(remainder, array_slice)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are array references and do not overlap. If `N` does not divide the
/// length of the slice, then the last up to `N-1` elements will be omitted and can be
/// retrieved from the `remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.array_chunks();
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks_exact`]: slice::chunks_exact
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
assert_ne!(N, 0);
ArrayChunks::new(self)
}
/// Splits the slice into a slice of `N`-element arrays,
/// assuming that there's no remainder.
///
/// # Safety
///
/// This may only be called when
/// - The slice splits exactly into `N`-element chunks (aka `self.len() % N == 0`).
/// - `N != 0`.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let slice: &mut [char] = &mut ['l', 'o', 'r', 'e', 'm', '!'];
/// let chunks: &mut [[char; 1]] =
/// // SAFETY: 1-element chunks never have remainder
/// unsafe { slice.as_chunks_unchecked_mut() };
/// chunks[0] = ['L'];
/// assert_eq!(chunks, &[['L'], ['o'], ['r'], ['e'], ['m'], ['!']]);
/// let chunks: &mut [[char; 3]] =
/// // SAFETY: The slice length (6) is a multiple of 3
/// unsafe { slice.as_chunks_unchecked_mut() };
/// chunks[1] = ['a', 'x', '?'];
/// assert_eq!(slice, &['L', 'o', 'r', 'a', 'x', '?']);
///
/// // These would be unsound:
/// // let chunks: &[[_; 5]] = slice.as_chunks_unchecked_mut() // The slice length is not a multiple of 5
/// // let chunks: &[[_; 0]] = slice.as_chunks_unchecked_mut() // Zero-length chunks are never allowed
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub unsafe fn as_chunks_unchecked_mut<const N: usize>(&mut self) -> &mut [[T; N]] {
debug_assert_ne!(N, 0);
debug_assert_eq!(self.len() % N, 0);
let new_len =
// SAFETY: Our precondition is exactly what's needed to call this
unsafe { crate::intrinsics::exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts_mut(self.as_mut_ptr().cast(), new_len) }
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the beginning of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// let (chunks, remainder) = v.as_chunks_mut();
/// remainder[0] = 9;
/// for chunk in chunks {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 9]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at_mut(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked_mut() };
(array_slice, remainder)
}
/// Splits the slice into a slice of `N`-element arrays,
/// starting at the end of the slice,
/// and a remainder slice with length strictly less than `N`.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(slice_as_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// let (remainder, chunks) = v.as_rchunks_mut();
/// remainder[0] = 9;
/// for chunk in chunks {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[9, 1, 1, 2, 2]);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]]) {
assert_ne!(N, 0);
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at_mut(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked_mut() };
(remainder, array_slice)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable array references and do not overlap. If `N` does not divide
/// the length of the slice, then the last up to `N-1` elements will be omitted and
/// can be retrieved from the `into_remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact_mut`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.array_chunks_mut() {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
assert_ne!(N, 0);
ArrayChunksMut::new(self)
}
/// Returns an iterator over overlapping windows of `N` elements of a slice,
/// starting at the beginning of the slice.
///
/// This is the const generic equivalent of [`windows`].
///
/// If `N` is greater than the size of the slice, it will return no windows.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_windows)]
/// let slice = [0, 1, 2, 3];
/// let mut iter = slice.array_windows();
/// assert_eq!(iter.next().unwrap(), &[0, 1]);
/// assert_eq!(iter.next().unwrap(), &[1, 2]);
/// assert_eq!(iter.next().unwrap(), &[2, 3]);
/// assert!(iter.next().is_none());
/// ```
///
/// [`windows`]: slice::windows
#[unstable(feature = "array_windows", issue = "75027")]
#[inline]
pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> {
assert_ne!(N, 0);
ArrayWindows::new(self)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert_eq!(iter.next().unwrap(), &['l']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`rchunks_exact`]: slice::rchunks_exact
/// [`chunks`]: slice::chunks
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
assert!(chunk_size != 0);
RChunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the
/// beginning of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[3, 2, 2, 1, 1]);
/// ```
///
/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
/// [`chunks_mut`]: slice::chunks_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
assert!(chunk_size != 0);
RChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// end of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['l']);
/// ```
///
/// [`chunks`]: slice::chunks
/// [`rchunks`]: slice::rchunks
/// [`chunks_exact`]: slice::chunks_exact
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
assert!(chunk_size != 0);
RChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[0, 2, 2, 1, 1]);
/// ```
///
/// [`chunks_mut`]: slice::chunks_mut
/// [`rchunks_mut`]: slice::rchunks_mut
/// [`chunks_exact_mut`]: slice::chunks_exact_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
assert!(chunk_size != 0);
RChunksExactMut::new(self, chunk_size)
}
/// Returns an iterator over the slice producing non-overlapping runs
/// of elements using the predicate to separate them.
///
/// The predicate is called on two elements following themselves,
/// it means the predicate is called on `slice[0]` and `slice[1]`
/// then on `slice[1]` and `slice[2]` and so on.
///
/// # Examples
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &[1, 1, 1, 3, 3, 2, 2, 2];
///
/// let mut iter = slice.group_by(|a, b| a == b);
///
/// assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
/// assert_eq!(iter.next(), Some(&[3, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
/// assert_eq!(iter.next(), None);
/// ```
///
/// This method can be used to extract the sorted subslices:
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &[1, 1, 2, 3, 2, 3, 2, 3, 4];
///
/// let mut iter = slice.group_by(|a, b| a <= b);
///
/// assert_eq!(iter.next(), Some(&[1, 1, 2, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 3][..]));
/// assert_eq!(iter.next(), Some(&[2, 3, 4][..]));
/// assert_eq!(iter.next(), None);
/// ```
#[unstable(feature = "slice_group_by", issue = "80552")]
#[inline]
pub fn group_by<F>(&self, pred: F) -> GroupBy<'_, T, F>
where
F: FnMut(&T, &T) -> bool,
{
GroupBy::new(self, pred)
}
/// Returns an iterator over the slice producing non-overlapping mutable
/// runs of elements using the predicate to separate them.
///
/// The predicate is called on two elements following themselves,
/// it means the predicate is called on `slice[0]` and `slice[1]`
/// then on `slice[1]` and `slice[2]` and so on.
///
/// # Examples
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &mut [1, 1, 1, 3, 3, 2, 2, 2];
///
/// let mut iter = slice.group_by_mut(|a, b| a == b);
///
/// assert_eq!(iter.next(), Some(&mut [1, 1, 1][..]));
/// assert_eq!(iter.next(), Some(&mut [3, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 2, 2][..]));
/// assert_eq!(iter.next(), None);
/// ```
///
/// This method can be used to extract the sorted subslices:
///
/// ```
/// #![feature(slice_group_by)]
///
/// let slice = &mut [1, 1, 2, 3, 2, 3, 2, 3, 4];
///
/// let mut iter = slice.group_by_mut(|a, b| a <= b);
///
/// assert_eq!(iter.next(), Some(&mut [1, 1, 2, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 3][..]));
/// assert_eq!(iter.next(), Some(&mut [2, 3, 4][..]));
/// assert_eq!(iter.next(), None);
/// ```
#[unstable(feature = "slice_group_by", issue = "80552")]
#[inline]
pub fn group_by_mut<F>(&mut self, pred: F) -> GroupByMut<'_, T, F>
where
F: FnMut(&T, &T) -> bool,
{
GroupByMut::new(self, pred)
}
/// Divides one slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 4, 5, 6];
///
/// {
/// let (left, right) = v.split_at(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_unchecked(mid) }
}
/// Divides one mutable slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let mut v = [1, 0, 3, 0, 5, 6];
/// let (left, right) = v.split_at_mut(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_mut_unchecked(mid) }
}
/// Divides one slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at`]: slice::split_at
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// #![feature(slice_split_at_unchecked)]
///
/// let v = [1, 2, 3, 4, 5, 6];
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at_mut`]: slice::split_at_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// #![feature(slice_split_at_unchecked)]
///
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// unsafe {
/// let (left, right) = v.split_at_mut_unchecked(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
pub unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
// SAFETY: Caller has to check that `0 <= mid <= self.len()`.
//
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
unsafe { (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid)) }
}
/// Divides one slice into an array and a remainder slice at an index.
///
/// The array will contain all indices from `[0, N)` (excluding
/// the index `N` itself) and the slice will contain all
/// indices from `[N, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `N > len`.
///
/// # Examples
///
/// ```
/// #![feature(split_array)]
///
/// let v = &[1, 2, 3, 4, 5, 6][..];
///
/// {
/// let (left, right) = v.split_array_ref::<0>();
/// assert_eq!(left, &[]);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_array_ref::<2>();
/// assert_eq!(left, &[1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_array_ref::<6>();
/// assert_eq!(left, &[1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[unstable(feature = "split_array", reason = "new API", issue = "90091")]
#[inline]
pub fn split_array_ref<const N: usize>(&self) -> (&[T; N], &[T]) {
let (a, b) = self.split_at(N);
// SAFETY: a points to [T; N]? Yes it's [T] of length N (checked by split_at)
unsafe { (&*(a.as_ptr() as *const [T; N]), b) }
}
/// Divides one mutable slice into an array and a remainder slice at an index.
///
/// The array will contain all indices from `[0, N)` (excluding
/// the index `N` itself) and the slice will contain all
/// indices from `[N, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `N > len`.
///
/// # Examples
///
/// ```
/// #![feature(split_array)]
///
/// let mut v = &mut [1, 0, 3, 0, 5, 6][..];
/// let (left, right) = v.split_array_mut::<2>();
/// assert_eq!(left, &mut [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[unstable(feature = "split_array", reason = "new API", issue = "90091")]
#[inline]
pub fn split_array_mut<const N: usize>(&mut self) -> (&mut [T; N], &mut [T]) {
let (a, b) = self.split_at_mut(N);
// SAFETY: a points to [T; N]? Yes it's [T] of length N (checked by split_at_mut)
unsafe { (&mut *(a.as_mut_ptr() as *mut [T; N]), b) }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the first element is matched, an empty slice will be the first item
/// returned by the iterator. Similarly, if the last element in the slice
/// is matched, an empty slice will be the last item returned by the
/// iterator:
///
/// ```
/// let slice = [10, 40, 33];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert!(iter.next().is_none());
/// ```
///
/// If two matched elements are directly adjacent, an empty slice will be
/// present between them:
///
/// ```
/// let slice = [10, 6, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
where
F: FnMut(&T) -> bool,
{
Split::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_mut(|num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is contained in the end of the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the last element of the slice is matched,
/// that element will be considered the terminator of the preceding slice.
/// That slice will be the last item returned by the iterator.
///
/// ```
/// let slice = [3, 10, 40, 33];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[3]);
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "split_inclusive", since = "1.51.0")]
#[inline]
pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusive::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is contained in the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
/// let terminator_idx = group.len()-1;
/// group[terminator_idx] = 1;
/// }
/// assert_eq!(v, [10, 40, 1, 20, 1, 1]);
/// ```
#[stable(feature = "split_inclusive", since = "1.51.0")]
#[inline]
pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusiveMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, starting at the end of the slice and working backwards.
/// The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [11, 22, 33, 0, 44, 55];
/// let mut iter = slice.rsplit(|num| *num == 0);
///
/// assert_eq!(iter.next().unwrap(), &[44, 55]);
/// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
/// assert_eq!(iter.next(), None);
/// ```
///
/// As with `split()`, if the first or last element is matched, an empty
/// slice will be the first (or last) item returned by the iterator.
///
/// ```
/// let v = &[0, 1, 1, 2, 3, 5, 8];
/// let mut it = v.rsplit(|n| *n % 2 == 0);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next().unwrap(), &[3, 5]);
/// assert_eq!(it.next().unwrap(), &[1, 1]);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next(), None);
/// ```
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplit::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`, starting at the end of the slice and working
/// backwards. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [100, 400, 300, 200, 600, 500];
///
/// let mut count = 0;
/// for group in v.rsplit_mut(|num| *num % 3 == 0) {
/// count += 1;
/// group[0] = count;
/// }
/// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
/// ```
///
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once by numbers divisible by 3 (i.e., `[10, 40]`,
/// `[20, 60, 50]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitN::new(self.split(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitNMut::new(self.split_mut(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once, starting from the end, by numbers divisible
/// by 3 (i.e., `[50]`, `[10, 40, 30, 20]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.rsplitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitN::new(self.rsplit(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut s = [10, 40, 30, 20, 60, 50];
///
/// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitNMut::new(self.rsplit_mut(pred), n)
}
/// Returns `true` if the slice contains an element with the given value.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.contains(&30));
/// assert!(!v.contains(&50));
/// ```
///
/// If you do not have a `&T`, but some other value that you can compare
/// with one (for example, `String` implements `PartialEq<str>`), you can
/// use `iter().any`:
///
/// ```
/// let v = [String::from("hello"), String::from("world")]; // slice of `String`
/// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
/// assert!(!v.iter().any(|e| e == "hi"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn contains(&self, x: &T) -> bool
where
T: PartialEq,
{
cmp::SliceContains::slice_contains(x, self)
}
/// Returns `true` if `needle` is a prefix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.starts_with(&[10]));
/// assert!(v.starts_with(&[10, 40]));
/// assert!(!v.starts_with(&[50]));
/// assert!(!v.starts_with(&[10, 50]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.starts_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.starts_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
/// Returns `true` if `needle` is a suffix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.ends_with(&[30]));
/// assert!(v.ends_with(&[40, 30]));
/// assert!(!v.ends_with(&[50]));
/// assert!(!v.ends_with(&[50, 30]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.ends_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.ends_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let (m, n) = (self.len(), needle.len());
m >= n && needle == &self[m - n..]
}
/// Returns a subslice with the prefix removed.
///
/// If the slice starts with `prefix`, returns the subslice after the prefix, wrapped in `Some`.
/// If `prefix` is empty, simply returns the original slice.
///
/// If the slice does not start with `prefix`, returns `None`.
///
/// # Examples
///
/// ```
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
/// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
/// assert_eq!(v.strip_prefix(&[50]), None);
/// assert_eq!(v.strip_prefix(&[10, 50]), None);
///
/// let prefix : &str = "he";
/// assert_eq!(b"hello".strip_prefix(prefix.as_bytes()),
/// Some(b"llo".as_ref()));
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[stable(feature = "slice_strip", since = "1.51.0")]
pub fn strip_prefix<P: SlicePattern<Item = T> + ?Sized>(&self, prefix: &P) -> Option<&[T]>
where
T: PartialEq,
{
// This function will need rewriting if and when SlicePattern becomes more sophisticated.
let prefix = prefix.as_slice();
let n = prefix.len();
if n <= self.len() {
let (head, tail) = self.split_at(n);
if head == prefix {
return Some(tail);
}
}
None
}
/// Returns a subslice with the suffix removed.
///
/// If the slice ends with `suffix`, returns the subslice before the suffix, wrapped in `Some`.
/// If `suffix` is empty, simply returns the original slice.
///
/// If the slice does not end with `suffix`, returns `None`.
///
/// # Examples
///
/// ```
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
/// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
/// assert_eq!(v.strip_suffix(&[50]), None);
/// assert_eq!(v.strip_suffix(&[50, 30]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[stable(feature = "slice_strip", since = "1.51.0")]
pub fn strip_suffix<P: SlicePattern<Item = T> + ?Sized>(&self, suffix: &P) -> Option<&[T]>
where
T: PartialEq,
{
// This function will need rewriting if and when SlicePattern becomes more sophisticated.
let suffix = suffix.as_slice();
let (len, n) = (self.len(), suffix.len());
if n <= len {
let (head, tail) = self.split_at(len - n);
if tail == suffix {
return Some(head);
}
}
None
}
/// Binary searches this sorted slice for a given element.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
///
/// [`binary_search_by`]: slice::binary_search_by
/// [`binary_search_by_key`]: slice::binary_search_by_key
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// assert_eq!(s.binary_search(&13), Ok(9));
/// assert_eq!(s.binary_search(&4), Err(7));
/// assert_eq!(s.binary_search(&100), Err(13));
/// let r = s.binary_search(&1);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
///
/// If you want to insert an item to a sorted vector, while maintaining
/// sort order:
///
/// ```
/// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
/// let num = 42;
/// let idx = s.binary_search(&num).unwrap_or_else(|x| x);
/// s.insert(idx, num);
/// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
where
T: Ord,
{
self.binary_search_by(|p| p.cmp(x))
}
/// Binary searches this sorted slice with a comparator function.
///
/// The comparator function should implement an order consistent
/// with the sort order of the underlying slice, returning an
/// order code that indicates whether its argument is `Less`,
/// `Equal` or `Greater` the desired target.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
///
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by_key`]: slice::binary_search_by_key
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// let seek = 13;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
/// let seek = 4;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
/// let seek = 100;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
/// let seek = 1;
/// let r = s.binary_search_by(|probe| probe.cmp(&seek));
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> Ordering,
{
let mut size = self.len();
let mut left = 0;
let mut right = size;
while left < right {
let mid = left + size / 2;
// SAFETY: the call is made safe by the following invariants:
// - `mid >= 0`
// - `mid < size`: `mid` is limited by `[left; right)` bound.
let cmp = f(unsafe { self.get_unchecked(mid) });
// The reason why we use if/else control flow rather than match
// is because match reorders comparison operations, which is perf sensitive.
// This is x86 asm for u8: https://rust.godbolt.org/z/8Y8Pra.
if cmp == Less {
left = mid + 1;
} else if cmp == Greater {
right = mid;
} else {
// SAFETY: same as the `get_unchecked` above
unsafe { crate::intrinsics::assume(mid < self.len()) };
return Ok(mid);
}
size = right - left;
}
Err(left)
}
/// Binary searches this sorted slice with a key extraction function.
///
/// Assumes that the slice is sorted by the key, for instance with
/// [`sort_by_key`] using the same key extraction function.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. The index is chosen
/// deterministically, but is subject to change in future versions of Rust.
/// If the value is not found then [`Result::Err`] is returned, containing
/// the index where a matching element could be inserted while maintaining
/// sorted order.
///
/// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
///
/// [`sort_by_key`]: slice::sort_by_key
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by`]: slice::binary_search_by
/// [`partition_point`]: slice::partition_point
///
/// # Examples
///
/// Looks up a series of four elements in a slice of pairs sorted by
/// their second elements. The first is found, with a uniquely
/// determined position; the second and third are not found; the
/// fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
/// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
/// (1, 21), (2, 34), (4, 55)];
///
/// assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b), Ok(9));
/// assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b), Err(7));
/// assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13));
/// let r = s.binary_search_by_key(&1, |&(a, b)| b);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
// Lint rustdoc::broken_intra_doc_links is allowed as `slice::sort_by_key` is
// in crate `alloc`, and as such doesn't exists yet when building `core`: #74481.
// This breaks links when slice is displayed in core, but changing it to use relative links
// would break when the item is re-exported. So allow the core links to be broken for now.
#[allow(rustdoc::broken_intra_doc_links)]
#[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
#[inline]
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> B,
B: Ord,
{
self.binary_search_by(|k| f(k).cmp(b))
}
/// Sorts the slice, but might not preserve the order of equal elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort_unstable();
/// assert!(v == [-5, -3, 1, 2, 4]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable(&mut self)
where
T: Ord,
{
sort::quicksort(self, |a, b| a.lt(b));
}
/// Sorts the slice with a comparator function, but might not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
/// total order if it is (for all `a`, `b` and `c`):
///
/// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
/// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
///
/// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
/// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
///
/// ```
/// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
/// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
/// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
/// ```
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_unstable_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
/// v.sort_unstable_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by<F>(&mut self, mut compare: F)
where
F: FnMut(&T, &T) -> Ordering,
{
sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less);
}
/// Sorts the slice with a key extraction function, but might not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
/// *O*(*m*).
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
/// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
/// cases where the key function is expensive.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_unstable_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
where
F: FnMut(&T) -> K,
K: Ord,
{
sort::quicksort(self, |a, b| f(a).lt(&f(b)));
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use the select_nth_unstable() instead")]
#[inline]
pub fn partition_at_index(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
self.select_nth_unstable(index)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use select_nth_unstable_by() instead")]
#[inline]
pub fn partition_at_index_by<F>(
&mut self,
index: usize,
compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
self.select_nth_unstable_by(index, compare)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[rustc_deprecated(since = "1.49.0", reason = "use the select_nth_unstable_by_key() instead")]
#[inline]
pub fn partition_at_index_by_key<K, F>(
&mut self,
index: usize,
f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
self.select_nth_unstable_by_key(index, f)
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index`. Additionally, this reordering is
/// unstable (i.e. any number of equal elements may end up at position `index`), in-place
/// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
/// element" in other libraries. It returns a triplet of the following values: all elements less
/// than the one at the given index, the value at the given index, and all elements greater than
/// the one at the given index.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median
/// v.select_nth_unstable(2);
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [-3, -5, 1, 2, 4] ||
/// v == [-5, -3, 1, 2, 4] ||
/// v == [-3, -5, 1, 4, 2] ||
/// v == [-5, -3, 1, 4, 2]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
let mut f = |a: &T, b: &T| a.lt(b);
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the comparator function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index,
/// and all elements greater than the one at the given index, using the provided comparator
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median as if the slice were sorted in descending order.
/// v.select_nth_unstable_by(2, |a, b| b.cmp(a));
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [2, 4, 1, -5, -3] ||
/// v == [2, 4, 1, -3, -5] ||
/// v == [4, 2, 1, -5, -3] ||
/// v == [4, 2, 1, -3, -5]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable_by<F>(
&mut self,
index: usize,
mut compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
let mut f = |a: &T, b: &T| compare(a, b) == Less;
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the key extraction function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index, and
/// all elements greater than the one at the given index, using the provided key extraction
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: slice::sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Return the median as if the array were sorted according to absolute value.
/// v.select_nth_unstable_by_key(2, |a| a.abs());
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [1, 2, -3, 4, -5] ||
/// v == [1, 2, -3, -5, 4] ||
/// v == [2, 1, -3, 4, -5] ||
/// v == [2, 1, -3, -5, 4]);
/// ```
#[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
#[inline]
pub fn select_nth_unstable_by_key<K, F>(
&mut self,
index: usize,
mut f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
let mut g = |a: &T, b: &T| f(a).lt(&f(b));
sort::partition_at_index(self, index, &mut g)
}
/// Moves all consecutive repeated elements to the end of the slice according to the
/// [`PartialEq`] trait implementation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
///
/// let (dedup, duplicates) = slice.partition_dedup();
///
/// assert_eq!(dedup, [1, 2, 3, 2, 1]);
/// assert_eq!(duplicates, [2, 3, 1]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
where
T: PartialEq,
{
self.partition_dedup_by(|a, b| a == b)
}
/// Moves all but the first of consecutive elements to the end of the slice satisfying
/// a given equality relation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// The `same_bucket` function is passed references to two elements from the slice and
/// must determine if the elements compare equal. The elements are passed in opposite order
/// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is moved
/// at the end of the slice.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
///
/// let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
///
/// assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
/// assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by<F>(&mut self, mut same_bucket: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T, &mut T) -> bool,
{
// Although we have a mutable reference to `self`, we cannot make
// *arbitrary* changes. The `same_bucket` calls could panic, so we
// must ensure that the slice is in a valid state at all times.
//
// The way that we handle this is by using swaps; we iterate
// over all the elements, swapping as we go so that at the end
// the elements we wish to keep are in the front, and those we
// wish to reject are at the back. We can then split the slice.
// This operation is still `O(n)`.
//
// Example: We start in this state, where `r` represents "next
// read" and `w` represents "next_write`.
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate, so
// we swap self[r] and self[w] (no effect as r==w) and then increment both
// r and w, leaving us with:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this value is a duplicate,
// so we increment `r` but leave everything else unchanged:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate,
// so swap self[r] and self[w] and advance r and w:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 1 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Not a duplicate, repeat:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 3 | 1 | 3 |
// +---+---+---+---+---+---+
// w
//
// Duplicate, advance r. End of slice. Split at w.
let len = self.len();
if len <= 1 {
return (self, &mut []);
}
let ptr = self.as_mut_ptr();
let mut next_read: usize = 1;
let mut next_write: usize = 1;
// SAFETY: the `while` condition guarantees `next_read` and `next_write`
// are less than `len`, thus are inside `self`. `prev_ptr_write` points to
// one element before `ptr_write`, but `next_write` starts at 1, so
// `prev_ptr_write` is never less than 0 and is inside the slice.
// This fulfils the requirements for dereferencing `ptr_read`, `prev_ptr_write`
// and `ptr_write`, and for using `ptr.add(next_read)`, `ptr.add(next_write - 1)`
// and `prev_ptr_write.offset(1)`.
//
// `next_write` is also incremented at most once per loop at most meaning
// no element is skipped when it may need to be swapped.
//
// `ptr_read` and `prev_ptr_write` never point to the same element. This
// is required for `&mut *ptr_read`, `&mut *prev_ptr_write` to be safe.
// The explanation is simply that `next_read >= next_write` is always true,
// thus `next_read > next_write - 1` is too.
unsafe {
// Avoid bounds checks by using raw pointers.
while next_read < len {
let ptr_read = ptr.add(next_read);
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
let ptr_write = prev_ptr_write.offset(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
}
next_read += 1;
}
}
self.split_at_mut(next_write)
}
/// Moves all but the first of consecutive elements to the end of the slice that resolve
/// to the same key.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
///
/// let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
///
/// assert_eq!(dedup, [10, 20, 30, 20, 11]);
/// assert_eq!(duplicates, [21, 30, 13]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by_key<K, F>(&mut self, mut key: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T) -> K,
K: PartialEq,
{
self.partition_dedup_by(|a, b| key(a) == key(b))
}
/// Rotates the slice in-place such that the first `mid` elements of the
/// slice move to the end while the last `self.len() - mid` elements move to
/// the front. After calling `rotate_left`, the element previously at index
/// `mid` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `mid` is greater than the length of the
/// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_left(2);
/// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
/// ```
///
/// Rotating a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_left(1);
/// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Rotates the slice in-place such that the first `self.len() - k`
/// elements of the slice move to the end while the last `k` elements move
/// to the front. After calling `rotate_right`, the element previously at
/// index `self.len() - k` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `k` is greater than the length of the
/// slice. Note that `k == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_right(2);
/// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
/// ```
///
/// Rotate a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_right(1);
/// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Fills `self` with elements by cloning `value`.
///
/// # Examples
///
/// ```
/// let mut buf = vec![0; 10];
/// buf.fill(1);
/// assert_eq!(buf, vec![1; 10]);
/// ```
#[doc(alias = "memset")]
#[stable(feature = "slice_fill", since = "1.50.0")]
pub fn fill(&mut self, value: T)
where
T: Clone,
{
specialize::SpecFill::spec_fill(self, value);
}
/// Fills `self` with elements returned by calling a closure repeatedly.
///
/// This method uses a closure to create new values. If you'd rather
/// [`Clone`] a given value, use [`fill`]. If you want to use the [`Default`]
/// trait to generate values, you can pass [`Default::default`] as the
/// argument.
///
/// [`fill`]: slice::fill
///
/// # Examples
///
/// ```
/// let mut buf = vec![1; 10];
/// buf.fill_with(Default::default);
/// assert_eq!(buf, vec![0; 10]);
/// ```
#[doc(alias = "memset")]
#[stable(feature = "slice_fill_with", since = "1.51.0")]
pub fn fill_with<F>(&mut self, mut f: F)
where
F: FnMut() -> T,
{
for el in self {
*el = f();
}
}
/// Copies the elements from `src` into `self`.
///
/// The length of `src` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Cloning two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.clone_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `clone_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.clone_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`copy_from_slice`]: slice::copy_from_slice
/// [`split_at_mut`]: slice::split_at_mut
#[stable(feature = "clone_from_slice", since = "1.7.0")]
#[track_caller]
pub fn clone_from_slice(&mut self, src: &[T])
where
T: Clone,
{
self.spec_clone_from(src);
}
/// Copies all elements from `src` into `self`, using a memcpy.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` does not implement `Copy`, use [`clone_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Copying two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.copy_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `copy_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.copy_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`clone_from_slice`]: slice::clone_from_slice
/// [`split_at_mut`]: slice::split_at_mut
#[doc(alias = "memcpy")]
#[stable(feature = "copy_from_slice", since = "1.9.0")]
#[track_caller]
pub fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
// The panic code path was put into a cold function to not bloat the
// call site.
#[inline(never)]
#[cold]
#[track_caller]
fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
panic!(
"source slice length ({}) does not match destination slice length ({})",
src_len, dst_len,
);
}
if self.len() != src.len() {
len_mismatch_fail(self.len(), src.len());
}
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
}
}
/// Copies elements from one part of the slice to another part of itself,
/// using a memmove.
///
/// `src` is the range within `self` to copy from. `dest` is the starting
/// index of the range within `self` to copy to, which will have the same
/// length as `src`. The two ranges may overlap. The ends of the two ranges
/// must be less than or equal to `self.len()`.
///
/// # Panics
///
/// This function will panic if either range exceeds the end of the slice,
/// or if the end of `src` is before the start.
///
/// # Examples
///
/// Copying four bytes within a slice:
///
/// ```
/// let mut bytes = *b"Hello, World!";
///
/// bytes.copy_within(1..5, 8);
///
/// assert_eq!(&bytes, b"Hello, Wello!");
/// ```
#[stable(feature = "copy_within", since = "1.37.0")]
#[track_caller]
pub fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dest: usize)
where
T: Copy,
{
let Range { start: src_start, end: src_end } = slice::range(src, ..self.len());
let count = src_end - src_start;
assert!(dest <= self.len() - count, "dest is out of bounds");
// SAFETY: the conditions for `ptr::copy` have all been checked above,
// as have those for `ptr::add`.
unsafe {
// Derive both `src_ptr` and `dest_ptr` from the same loan
let ptr = self.as_mut_ptr();
let src_ptr = ptr.add(src_start);
let dest_ptr = ptr.add(dest);
ptr::copy(src_ptr, dest_ptr, count);
}
}
/// Swaps all elements in `self` with those in `other`.
///
/// The length of `other` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Example
///
/// Swapping two elements across slices:
///
/// ```
/// let mut slice1 = [0, 0];
/// let mut slice2 = [1, 2, 3, 4];
///
/// slice1.swap_with_slice(&mut slice2[2..]);
///
/// assert_eq!(slice1, [3, 4]);
/// assert_eq!(slice2, [1, 2, 0, 0]);
/// ```
///
/// Rust enforces that there can only be one mutable reference to a
/// particular piece of data in a particular scope. Because of this,
/// attempting to use `swap_with_slice` on a single slice will result in
/// a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
/// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// mutable sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.swap_with_slice(&mut right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 1, 2]);
/// ```
///
/// [`split_at_mut`]: slice::split_at_mut
#[stable(feature = "swap_with_slice", since = "1.27.0")]
#[track_caller]
pub fn swap_with_slice(&mut self, other: &mut [T]) {
assert!(self.len() == other.len(), "destination and source slices have different lengths");
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::swap_nonoverlapping(self.as_mut_ptr(), other.as_mut_ptr(), self.len());
}
}
/// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
#[inline]
fn gcd(a: usize, b: usize) -> usize {
use crate::intrinsics;
// iterative stein’s algorithm
// We should still make this `const fn` (and revert to recursive algorithm if we do)
// because relying on llvm to consteval all this is… well, it makes me uncomfortable.
// SAFETY: `a` and `b` are checked to be non-zero values.
let (ctz_a, mut ctz_b) = unsafe {
if a == 0 {
return b;
}
if b == 0 {
return a;
}
(intrinsics::cttz_nonzero(a), intrinsics::cttz_nonzero(b))
};
let k = ctz_a.min(ctz_b);
let mut a = a >> ctz_a;
let mut b = b;
loop {
// remove all factors of 2 from b
b >>= ctz_b;
if a > b {
mem::swap(&mut a, &mut b);
}
b = b - a;
// SAFETY: `b` is checked to be non-zero.
unsafe {
if b == 0 {
break;
}
ctz_b = intrinsics::cttz_nonzero(b);
}
}
a << k
}
let gcd: usize = gcd(mem::size_of::<T>(), mem::size_of::<U>());
let ts: usize = mem::size_of::<U>() / gcd;
let us: usize = mem::size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
(us_len, ts_len)
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: See the `align_to_mut` method for the detailed safety comment.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
(
left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
)
}
}
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: Here we are ensuring we will use aligned pointers for U for the
// rest of the method. This is done by passing a pointer to &[T] with an
// alignment targeted for U.
// `crate::ptr::align_offset` is called with a correctly aligned and
// valid pointer `ptr` (it comes from a reference to `self`) and with
// a size that is a power of two (since it comes from the alignement for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
(
left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
)
}
}
}
/// Checks if the elements of this slice are sorted.
///
/// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
/// slice yields exactly zero or one element, `true` is returned.
///
/// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
/// implies that this function returns `false` if any two consecutive items are not
/// comparable.
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
/// let empty: [i32; 0] = [];
///
/// assert!([1, 2, 2, 9].is_sorted());
/// assert!(![1, 3, 2, 4].is_sorted());
/// assert!([0].is_sorted());
/// assert!(empty.is_sorted());
/// assert!(![0.0, 1.0, f32::NAN].is_sorted());
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted(&self) -> bool
where
T: PartialOrd,
{
self.is_sorted_by(|a, b| a.partial_cmp(b))
}
/// Checks if the elements of this slice are sorted using the given comparator function.
///
/// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
/// function to determine the ordering of two elements. Apart from that, it's equivalent to
/// [`is_sorted`]; see its documentation for more information.
///
/// [`is_sorted`]: slice::is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
where
F: FnMut(&T, &T) -> Option<Ordering>,
{
self.iter().is_sorted_by(|a, b| compare(*a, *b))
}
/// Checks if the elements of this slice are sorted using the given key extraction function.
///
/// Instead of comparing the slice's elements directly, this function compares the keys of the
/// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
/// documentation for more information.
///
/// [`is_sorted`]: slice::is_sorted
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
///
/// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
/// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
where
F: FnMut(&T) -> K,
K: PartialOrd,
{
self.iter().is_sorted_by_key(f)
}
/// Returns the index of the partition point according to the given predicate
/// (the index of the first element of the second partition).
///
/// The slice is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the slice
/// and all elements for which the predicate returns false are at the end.
/// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
/// (all odd numbers are at the start, all even at the end).
///
/// If this slice is not partitioned, the returned result is unspecified and meaningless,
/// as this method performs a kind of binary search.
///
/// See also [`binary_search`], [`binary_search_by`], and [`binary_search_by_key`].
///
/// [`binary_search`]: slice::binary_search
/// [`binary_search_by`]: slice::binary_search_by
/// [`binary_search_by_key`]: slice::binary_search_by_key
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 3, 5, 6, 7];
/// let i = v.partition_point(|&x| x < 5);
///
/// assert_eq!(i, 4);
/// assert!(v[..i].iter().all(|&x| x < 5));
/// assert!(v[i..].iter().all(|&x| !(x < 5)));
/// ```
#[stable(feature = "partition_point", since = "1.52.0")]
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
{
self.binary_search_by(|x| if pred(x) { Less } else { Greater }).unwrap_or_else(|i| i)
}
}
trait CloneFromSpec<T> {
fn spec_clone_from(&mut self, src: &[T]);
}
impl<T> CloneFromSpec<T> for [T]
where
T: Clone,
{
#[track_caller]
default fn spec_clone_from(&mut self, src: &[T]) {
assert!(self.len() == src.len(), "destination and source slices have different lengths");
// NOTE: We need to explicitly slice them to the same length
// to make it easier for the optimizer to elide bounds checking.
// But since it can't be relied on we also have an explicit specialization for T: Copy.
let len = self.len();
let src = &src[..len];
for i in 0..len {
self[i].clone_from(&src[i]);
}
}
}
impl<T> CloneFromSpec<T> for [T]
where
T: Copy,
{
#[track_caller]
fn spec_clone_from(&mut self, src: &[T]) {
self.copy_from_slice(src);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl<T> const Default for &[T] {
/// Creates an empty slice.
fn default() -> Self {
&[]
}
}
#[stable(feature = "mut_slice_default", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl<T> const Default for &mut [T] {
/// Creates a mutable empty slice.
fn default() -> Self {
&mut []
}
}
#[unstable(feature = "slice_pattern", reason = "stopgap trait for slice patterns", issue = "56345")]
/// Patterns in slices - currently, only used by `strip_prefix` and `strip_suffix`. At a future
/// point, we hope to generalise `core::str::Pattern` (which at the time of writing is limited to
/// `str`) to slices, and then this trait will be replaced or abolished.
pub trait SlicePattern {
/// The element type of the slice being matched on.
type Item;
/// Currently, the consumers of `SlicePattern` need a slice.
fn as_slice(&self) -> &[Self::Item];
}
#[stable(feature = "slice_strip", since = "1.51.0")]
impl<T> SlicePattern for [T] {
type Item = T;
#[inline]
fn as_slice(&self) -> &[Self::Item] {
self
}
}
#[stable(feature = "slice_strip", since = "1.51.0")]
impl<T, const N: usize> SlicePattern for [T; N] {
type Item = T;
#[inline]
fn as_slice(&self) -> &[Self::Item] {
self
}
}
|
// ignore-tidy-filelength
//! Slice management and manipulation.
//!
//! For more details see [`std::slice`].
//!
//! [`std::slice`]: ../../std/slice/index.html
#![stable(feature = "rust1", since = "1.0.0")]
use crate::cmp::Ordering::{self, Equal, Greater, Less};
use crate::marker::Copy;
use crate::mem;
use crate::num::NonZeroUsize;
use crate::ops::{FnMut, Range, RangeBounds};
use crate::option::Option;
use crate::option::Option::{None, Some};
use crate::ptr;
use crate::result::Result;
use crate::result::Result::{Err, Ok};
#[unstable(
feature = "slice_internals",
issue = "none",
reason = "exposed from core to be reused in std; use the memchr crate"
)]
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;
mod ascii;
mod cmp;
mod index;
mod iter;
mod raw;
mod rotate;
mod sort;
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Chunks, ChunksMut, Windows};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Iter, IterMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{RSplitN, RSplitNMut, Split, SplitMut, SplitN, SplitNMut};
#[stable(feature = "slice_rsplit", since = "1.27.0")]
pub use iter::{RSplit, RSplitMut};
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub use iter::{ChunksExact, ChunksExactMut};
#[stable(feature = "rchunks", since = "1.31.0")]
pub use iter::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
#[unstable(feature = "array_chunks", issue = "74985")]
pub use iter::{ArrayChunks, ArrayChunksMut};
#[unstable(feature = "array_windows", issue = "75027")]
pub use iter::ArrayWindows;
#[unstable(feature = "split_inclusive", issue = "72360")]
pub use iter::{SplitInclusive, SplitInclusiveMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use raw::{from_raw_parts, from_raw_parts_mut};
#[stable(feature = "from_ref", since = "1.28.0")]
pub use raw::{from_mut, from_ref};
// This function is public only because there is no other way to unit test heapsort.
#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
pub use sort::heapsort;
#[stable(feature = "slice_get_slice", since = "1.28.0")]
pub use index::SliceIndex;
#[unstable(feature = "slice_check_range", issue = "76393")]
pub use index::check_range;
#[lang = "slice"]
#[cfg(not(test))]
impl<T> [T] {
/// Returns the number of elements in the slice.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert_eq!(a.len(), 3);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_len", since = "1.32.0")]
#[inline]
// SAFETY: const sound because we transmute out the length field as a usize (which it must be)
#[allow_internal_unstable(const_fn_union)]
pub const fn len(&self) -> usize {
// SAFETY: this is safe because `&[T]` and `FatPtr<T>` have the same layout.
// Only `std` can make this guarantee.
unsafe { crate::ptr::Repr { rust: self }.raw.len }
}
/// Returns `true` if the slice has a length of 0.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_is_empty", since = "1.32.0")]
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&10), v.first());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.first());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(first) = x.first_mut() {
/// *first = 5;
/// }
/// assert_eq!(x, &[5, 1, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first() {
/// assert_eq!(first, &0);
/// assert_eq!(elements, &[1, 2]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first_mut() {
/// *first = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[3, 4, 5]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last() {
/// assert_eq!(last, &2);
/// assert_eq!(elements, &[0, 1]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last_mut() {
/// *last = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[4, 5, 3]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&30), v.last());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.last());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a mutable pointer to the last item in the slice.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(last) = x.last_mut() {
/// *last = 10;
/// }
/// assert_eq!(x, &[0, 1, 10]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a reference to an element or subslice depending on the type of
/// index.
///
/// - If given a position, returns a reference to the element at that
/// position or `None` if out of bounds.
/// - If given a range, returns the subslice corresponding to that range,
/// or `None` if out of bounds.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&40), v.get(1));
/// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
/// assert_eq!(None, v.get(3));
/// assert_eq!(None, v.get(0..4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: SliceIndex<Self>,
{
index.get(self)
}
/// Returns a mutable reference to an element or subslice depending on the
/// type of index (see [`get`]) or `None` if the index is out of bounds.
///
/// [`get`]: #method.get
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(elem) = x.get_mut(1) {
/// *elem = 42;
/// }
/// assert_eq!(x, &[0, 42, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: SliceIndex<Self>,
{
index.get_mut(self)
}
/// Returns a reference to an element or subslice, without doing bounds
/// checking.
///
/// For a safe alternative see [`get`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get`]: #method.get
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
///
/// unsafe {
/// assert_eq!(x.get_unchecked(1), &2);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
/// Returns a mutable reference to an element or subslice, without doing
/// bounds checking.
///
/// For a safe alternative see [`get_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get_mut`]: #method.get_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
///
/// unsafe {
/// let elem = x.get_unchecked_mut(1);
/// *elem = 13;
/// }
/// assert_eq!(x, &[1, 13, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
/// Returns a raw pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// The caller must also ensure that the memory the pointer (non-transitively) points to
/// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
/// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let x_ptr = x.as_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
/// }
/// }
/// ```
///
/// [`as_mut_ptr`]: #method.as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
#[inline]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
/// Returns an unsafe mutable pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// let x_ptr = x.as_mut_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// *x_ptr.add(i) += 2;
/// }
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
/// Returns the two raw pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_ptr`] for warnings on using these pointers. The end pointer
/// requires extra caution, as it does not point to a valid element in the
/// slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// It can also be useful to check if a pointer to an element refers to an
/// element of this slice:
///
/// ```
/// let a = [1, 2, 3];
/// let x = &a[1] as *const _;
/// let y = &5 as *const _;
///
/// assert!(a.as_ptr_range().contains(&x));
/// assert!(!a.as_ptr_range().contains(&y));
/// ```
///
/// [`as_ptr`]: #method.as_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_ptr_range(&self) -> Range<*const T> {
let start = self.as_ptr();
// SAFETY: The `add` here is safe, because:
//
// - Both pointers are part of the same object, as pointing directly
// past the object also counts.
//
// - The size of the slice is never larger than isize::MAX bytes, as
// noted here:
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
// - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
// - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
// (This doesn't seem normative yet, but the very same assumption is
// made in many places, including the Index implementation of slices.)
//
// - There is no wrapping around involved, as slices do not wrap past
// the end of the address space.
//
// See the documentation of pointer::add.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Returns the two unsafe mutable pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_mut_ptr`] for warnings on using these pointers. The end
/// pointer requires extra caution, as it does not point to a valid element
/// in the slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// [`as_mut_ptr`]: #method.as_mut_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
let start = self.as_mut_ptr();
// SAFETY: See as_ptr_range() above for why `add` here is safe.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Swaps two elements in the slice.
///
/// # Arguments
///
/// * a - The index of the first element
/// * b - The index of the second element
///
/// # Panics
///
/// Panics if `a` or `b` are out of bounds.
///
/// # Examples
///
/// ```
/// let mut v = ["a", "b", "c", "d"];
/// v.swap(1, 3);
/// assert!(v == ["a", "d", "c", "b"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn swap(&mut self, a: usize, b: usize) {
// Can't take two mutable loans from one vector, so instead just cast
// them to their raw pointers to do the swap.
let pa: *mut T = &mut self[a];
let pb: *mut T = &mut self[b];
// SAFETY: `pa` and `pb` have been created from safe mutable references and refer
// to elements in the slice and therefore are guaranteed to be valid and aligned.
// Note that accessing the elements behind `a` and `b` is checked and will
// panic when out of bounds.
unsafe {
ptr::swap(pa, pb);
}
}
/// Reverses the order of elements in the slice, in place.
///
/// # Examples
///
/// ```
/// let mut v = [1, 2, 3];
/// v.reverse();
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn reverse(&mut self) {
let mut i: usize = 0;
let ln = self.len();
// For very small types, all the individual reads in the normal
// path perform poorly. We can do better, given efficient unaligned
// load/store, by loading a larger chunk and reversing a register.
// Ideally LLVM would do this for us, as it knows better than we do
// whether unaligned reads are efficient (since that changes between
// different ARM versions, for example) and what the best chunk size
// would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
// the loop, so we need to do this ourselves. (Hypothesis: reverse
// is troublesome because the sides can be aligned differently --
// will be, when the length is odd -- so there's no way of emitting
// pre- and postludes to use fully-aligned SIMD in the middle.)
let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
if fast_unaligned && mem::size_of::<T>() == 1 {
// Use the llvm.bswap intrinsic to reverse u8s in a usize
let chunk = mem::size_of::<usize>();
while i + chunk - 1 < ln / 2 {
// SAFETY: There are several things to check here:
//
// - Note that `chunk` is either 4 or 8 due to the cfg check
// above. So `chunk - 1` is positive.
// - Indexing with index `i` is fine as the loop check guarantees
// `i + chunk - 1 < ln / 2`
// <=> `i < ln / 2 - (chunk - 1) < ln / 2 < ln`.
// - Indexing with index `ln - i - chunk = ln - (i + chunk)` is fine:
// - `i + chunk > 0` is trivially true.
// - The loop check guarantees:
// `i + chunk - 1 < ln / 2`
// <=> `i + chunk ≤ ln / 2 ≤ ln`, thus subtraction does not underflow.
// - The `read_unaligned` and `write_unaligned` calls are fine:
// - `pa` points to index `i` where `i < ln / 2 - (chunk - 1)`
// (see above) and `pb` points to index `ln - i - chunk`, so
// both are at least `chunk`
// many bytes away from the end of `self`.
// - Any initialized memory is valid `usize`.
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut usize);
let vb = ptr::read_unaligned(pb as *mut usize);
ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
}
i += chunk;
}
}
if fast_unaligned && mem::size_of::<T>() == 2 {
// Use rotate-by-16 to reverse u16s in a u32
let chunk = mem::size_of::<u32>() / 2;
while i + chunk - 1 < ln / 2 {
// SAFETY: An unaligned u32 can be read from `i` if `i + 1 < ln`
// (and obviously `i < ln`), because each element is 2 bytes and
// we're reading 4.
//
// `i + chunk - 1 < ln / 2` # while condition
// `i + 2 - 1 < ln / 2`
// `i + 1 < ln / 2`
//
// Since it's less than the length divided by 2, then it must be
// in bounds.
//
// This also means that the condition `0 < i + chunk <= ln` is
// always respected, ensuring the `pb` pointer can be used
// safely.
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut u32);
let vb = ptr::read_unaligned(pb as *mut u32);
ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
}
i += chunk;
}
}
while i < ln / 2 {
// SAFETY: `i` is inferior to half the length of the slice so
// accessing `i` and `ln - i - 1` is safe (`i` starts at 0 and
// will not go further than `ln / 2 - 1`).
// The resulting pointers `pa` and `pb` are therefore valid and
// aligned, and can be read from and written to.
unsafe {
// Unsafe swap to avoid the bounds check in safe swap.
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - 1);
ptr::swap(pa, pb);
}
i += 1;
}
}
/// Returns an iterator over the slice.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let mut iterator = x.iter();
///
/// assert_eq!(iterator.next(), Some(&1));
/// assert_eq!(iterator.next(), Some(&2));
/// assert_eq!(iterator.next(), Some(&4));
/// assert_eq!(iterator.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter(&self) -> Iter<'_, T> {
Iter::new(self)
}
/// Returns an iterator that allows modifying each value.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// for elem in x.iter_mut() {
/// *elem += 2;
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut::new(self)
}
/// Returns an iterator over all contiguous windows of length
/// `size`. The windows overlap. If the slice is shorter than
/// `size`, the iterator returns no values.
///
/// # Panics
///
/// Panics if `size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['r', 'u', 's', 't'];
/// let mut iter = slice.windows(2);
/// assert_eq!(iter.next().unwrap(), &['r', 'u']);
/// assert_eq!(iter.next().unwrap(), &['u', 's']);
/// assert_eq!(iter.next().unwrap(), &['s', 't']);
/// assert!(iter.next().is_none());
/// ```
///
/// If the slice is shorter than `size`:
///
/// ```
/// let slice = ['f', 'o', 'o'];
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<'_, T> {
let size = NonZeroUsize::new(size).expect("size is zero");
Windows::new(self, size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert_eq!(iter.next().unwrap(), &['m']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`chunks_exact`]: #method.chunks_exact
/// [`rchunks`]: #method.rchunks
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
assert_ne!(chunk_size, 0);
Chunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at
/// the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 3]);
/// ```
///
/// [`chunks_exact_mut`]: #method.chunks_exact_mut
/// [`rchunks_mut`]: #method.rchunks_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks`]: #method.chunks
/// [`rchunks_exact`]: #method.rchunks_exact
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of
/// the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_mut`]: #method.chunks_mut
/// [`rchunks_exact_mut`]: #method.rchunks_exact_mut
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExactMut::new(self, chunk_size)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are array references and do not overlap. If `N` does not divide the
/// length of the slice, then the last up to `N-1` elements will be omitted and can be
/// retrieved from the `remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.array_chunks();
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks_exact`]: #method.chunks_exact
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
assert_ne!(N, 0);
ArrayChunks::new(self)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable array references and do not overlap. If `N` does not divide
/// the length of the slice, then the last up to `N-1` elements will be omitted and
/// can be retrieved from the `into_remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact_mut`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.array_chunks_mut() {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_exact_mut`]: #method.chunks_exact_mut
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
assert_ne!(N, 0);
ArrayChunksMut::new(self)
}
/// Returns an iterator over overlapping windows of `N` elements of a slice,
/// starting at the beginning of the slice.
///
/// This is the const generic equivalent of [`windows`].
///
/// If `N` is greater than the size of the slice, it will return no windows.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_windows)]
/// let slice = [0, 1, 2, 3];
/// let mut iter = slice.array_windows();
/// assert_eq!(iter.next().unwrap(), &[0, 1]);
/// assert_eq!(iter.next().unwrap(), &[1, 2]);
/// assert_eq!(iter.next().unwrap(), &[2, 3]);
/// assert!(iter.next().is_none());
/// ```
///
/// [`windows`]: #method.windows
#[unstable(feature = "array_windows", issue = "75027")]
#[inline]
pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> {
assert_ne!(N, 0);
ArrayWindows::new(self)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert_eq!(iter.next().unwrap(), &['l']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`rchunks_exact`]: #method.rchunks_exact
/// [`chunks`]: #method.chunks
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
assert!(chunk_size != 0);
RChunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the
/// beginning of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[3, 2, 2, 1, 1]);
/// ```
///
/// [`rchunks_exact_mut`]: #method.rchunks_exact_mut
/// [`chunks_mut`]: #method.chunks_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
assert!(chunk_size != 0);
RChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// end of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['l']);
/// ```
///
/// [`chunks`]: #method.chunks
/// [`rchunks`]: #method.rchunks
/// [`chunks_exact`]: #method.chunks_exact
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
assert!(chunk_size != 0);
RChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[0, 2, 2, 1, 1]);
/// ```
///
/// [`chunks_mut`]: #method.chunks_mut
/// [`rchunks_mut`]: #method.rchunks_mut
/// [`chunks_exact_mut`]: #method.chunks_exact_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
assert!(chunk_size != 0);
RChunksExactMut::new(self, chunk_size)
}
/// Divides one slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 4, 5, 6];
///
/// {
/// let (left, right) = v.split_at(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_unchecked(mid) }
}
/// Divides one mutable slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// {
/// let (left, right) = v.split_at_mut(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_mut_unchecked(mid) }
}
/// Divides one slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at`]: #method.split_at
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```compile_fail
/// #![feature(slice_split_at_unchecked)]
///
/// let v = [1, 2, 3, 4, 5, 6];
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at_mut`]: #method.split_at_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```compile_fail
/// #![feature(slice_split_at_unchecked)]
///
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// unsafe {
/// let (left, right) = v.split_at_mut_unchecked(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
// SAFETY: Caller has to check that `0 <= mid <= self.len()`.
//
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
unsafe { (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid)) }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the first element is matched, an empty slice will be the first item
/// returned by the iterator. Similarly, if the last element in the slice
/// is matched, an empty slice will be the last item returned by the
/// iterator:
///
/// ```
/// let slice = [10, 40, 33];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert!(iter.next().is_none());
/// ```
///
/// If two matched elements are directly adjacent, an empty slice will be
/// present between them:
///
/// ```
/// let slice = [10, 6, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
where
F: FnMut(&T) -> bool,
{
Split::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_mut(|num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is contained in the end of the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// #![feature(split_inclusive)]
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the last element of the slice is matched,
/// that element will be considered the terminator of the preceding slice.
/// That slice will be the last item returned by the iterator.
///
/// ```
/// #![feature(split_inclusive)]
/// let slice = [3, 10, 40, 33];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[3]);
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert!(iter.next().is_none());
/// ```
#[unstable(feature = "split_inclusive", issue = "72360")]
#[inline]
pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusive::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is contained in the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// #![feature(split_inclusive)]
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
/// let terminator_idx = group.len()-1;
/// group[terminator_idx] = 1;
/// }
/// assert_eq!(v, [10, 40, 1, 20, 1, 1]);
/// ```
#[unstable(feature = "split_inclusive", issue = "72360")]
#[inline]
pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusiveMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, starting at the end of the slice and working backwards.
/// The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [11, 22, 33, 0, 44, 55];
/// let mut iter = slice.rsplit(|num| *num == 0);
///
/// assert_eq!(iter.next().unwrap(), &[44, 55]);
/// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
/// assert_eq!(iter.next(), None);
/// ```
///
/// As with `split()`, if the first or last element is matched, an empty
/// slice will be the first (or last) item returned by the iterator.
///
/// ```
/// let v = &[0, 1, 1, 2, 3, 5, 8];
/// let mut it = v.rsplit(|n| *n % 2 == 0);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next().unwrap(), &[3, 5]);
/// assert_eq!(it.next().unwrap(), &[1, 1]);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next(), None);
/// ```
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplit::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`, starting at the end of the slice and working
/// backwards. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [100, 400, 300, 200, 600, 500];
///
/// let mut count = 0;
/// for group in v.rsplit_mut(|num| *num % 3 == 0) {
/// count += 1;
/// group[0] = count;
/// }
/// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
/// ```
///
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once by numbers divisible by 3 (i.e., `[10, 40]`,
/// `[20, 60, 50]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitN::new(self.split(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitNMut::new(self.split_mut(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once, starting from the end, by numbers divisible
/// by 3 (i.e., `[50]`, `[10, 40, 30, 20]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.rsplitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitN::new(self.rsplit(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut s = [10, 40, 30, 20, 60, 50];
///
/// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitNMut::new(self.rsplit_mut(pred), n)
}
/// Returns `true` if the slice contains an element with the given value.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.contains(&30));
/// assert!(!v.contains(&50));
/// ```
///
/// If you do not have an `&T`, but just an `&U` such that `T: Borrow<U>`
/// (e.g. `String: Borrow<str>`), you can use `iter().any`:
///
/// ```
/// let v = [String::from("hello"), String::from("world")]; // slice of `String`
/// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
/// assert!(!v.iter().any(|e| e == "hi"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn contains(&self, x: &T) -> bool
where
T: PartialEq,
{
cmp::SliceContains::slice_contains(x, self)
}
/// Returns `true` if `needle` is a prefix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.starts_with(&[10]));
/// assert!(v.starts_with(&[10, 40]));
/// assert!(!v.starts_with(&[50]));
/// assert!(!v.starts_with(&[10, 50]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.starts_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.starts_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
/// Returns `true` if `needle` is a suffix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.ends_with(&[30]));
/// assert!(v.ends_with(&[40, 30]));
/// assert!(!v.ends_with(&[50]));
/// assert!(!v.ends_with(&[50, 30]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.ends_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.ends_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let (m, n) = (self.len(), needle.len());
m >= n && needle == &self[m - n..]
}
/// Returns a subslice with the prefix removed.
///
/// If the slice starts with `prefix`, returns
/// the subslice after the prefix, wrapped in `Some`.
///
/// If the slice does not start with `prefix`, returns `None`.
///
/// (If `prefix` is empty, simply returns the original slice.)
///
/// # Examples
///
/// ```
/// #![feature(slice_strip)]
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
/// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
/// assert_eq!(v.strip_prefix(&[50]), None);
/// assert_eq!(v.strip_prefix(&[10, 50]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[unstable(feature = "slice_strip", issue = "73413")]
pub fn strip_prefix(&self, prefix: &[T]) -> Option<&[T]>
where
T: PartialEq,
{
let n = prefix.len();
if n <= self.len() {
let (head, tail) = self.split_at(n);
if head == prefix {
return Some(tail);
}
}
None
}
/// Returns a subslice with the suffix removed.
///
/// If the slice ends with `suffix`, returns
/// the subslice before the suffix, wrapped in `Some`.
///
/// If the slice does not end with `suffix`, returns `None`.
///
/// (If `suffix` is empty, simply returns the original slice.)
///
/// # Examples
///
/// ```
/// #![feature(slice_strip)]
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
/// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
/// assert_eq!(v.strip_suffix(&[50]), None);
/// assert_eq!(v.strip_suffix(&[50, 30]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[unstable(feature = "slice_strip", issue = "73413")]
pub fn strip_suffix(&self, suffix: &[T]) -> Option<&[T]>
where
T: PartialEq,
{
let (len, n) = (self.len(), suffix.len());
if n <= len {
let (head, tail) = self.split_at(len - n);
if tail == suffix {
return Some(head);
}
}
None
}
/// Binary searches this sorted slice for a given element.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// assert_eq!(s.binary_search(&13), Ok(9));
/// assert_eq!(s.binary_search(&4), Err(7));
/// assert_eq!(s.binary_search(&100), Err(13));
/// let r = s.binary_search(&1);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
///
/// If you want to insert an item to a sorted vector, while maintaining
/// sort order:
///
/// ```
/// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
/// let num = 42;
/// let idx = s.binary_search(&num).unwrap_or_else(|x| x);
/// s.insert(idx, num);
/// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
where
T: Ord,
{
self.binary_search_by(|p| p.cmp(x))
}
/// Binary searches this sorted slice with a comparator function.
///
/// The comparator function should implement an order consistent
/// with the sort order of the underlying slice, returning an
/// order code that indicates whether its argument is `Less`,
/// `Equal` or `Greater` the desired target.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// let seek = 13;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
/// let seek = 4;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
/// let seek = 100;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
/// let seek = 1;
/// let r = s.binary_search_by(|probe| probe.cmp(&seek));
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> Ordering,
{
let s = self;
let mut size = s.len();
if size == 0 {
return Err(0);
}
let mut base = 0usize;
while size > 1 {
let half = size / 2;
let mid = base + half;
// SAFETY: the call is made safe by the following inconstants:
// - `mid >= 0`: by definition
// - `mid < size`: `mid = size / 2 + size / 4 + size / 8 ...`
let cmp = f(unsafe { s.get_unchecked(mid) });
base = if cmp == Greater { base } else { mid };
size -= half;
}
// SAFETY: base is always in [0, size) because base <= mid.
let cmp = f(unsafe { s.get_unchecked(base) });
if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) }
}
/// Binary searches this sorted slice with a key extraction function.
///
/// Assumes that the slice is sorted by the key, for instance with
/// [`sort_by_key`] using the same key extraction function.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// [`sort_by_key`]: #method.sort_by_key
///
/// # Examples
///
/// Looks up a series of four elements in a slice of pairs sorted by
/// their second elements. The first is found, with a uniquely
/// determined position; the second and third are not found; the
/// fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
/// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
/// (1, 21), (2, 34), (4, 55)];
///
/// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9));
/// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7));
/// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13));
/// let r = s.binary_search_by_key(&1, |&(a,b)| b);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
#[inline]
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> B,
B: Ord,
{
self.binary_search_by(|k| f(k).cmp(b))
}
/// Sorts the slice, but may not preserve the order of equal elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort_unstable();
/// assert!(v == [-5, -3, 1, 2, 4]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable(&mut self)
where
T: Ord,
{
sort::quicksort(self, |a, b| a.lt(b));
}
/// Sorts the slice with a comparator function, but may not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
/// total order if it is (for all a, b and c):
///
/// * total and antisymmetric: exactly one of a < b, a == b or a > b is true; and
/// * transitive, a < b and b < c implies a < c. The same must hold for both == and >.
///
/// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
/// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
///
/// ```
/// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
/// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
/// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
/// ```
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_unstable_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
/// v.sort_unstable_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by<F>(&mut self, mut compare: F)
where
F: FnMut(&T, &T) -> Ordering,
{
sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less);
}
/// Sorts the slice with a key extraction function, but may not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
/// *O*(*m*).
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
/// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
/// cases where the key function is expensive.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_unstable_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
where
F: FnMut(&T) -> K,
K: Ord,
{
sort::quicksort(self, |a, b| f(a).lt(&f(b)));
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index`. Additionally, this reordering is
/// unstable (i.e. any number of equal elements may end up at position `index`), in-place
/// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
/// element" in other libraries. It returns a triplet of the following values: all elements less
/// than the one at the given index, the value at the given index, and all elements greater than
/// the one at the given index.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median
/// v.partition_at_index(2);
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [-3, -5, 1, 2, 4] ||
/// v == [-5, -3, 1, 2, 4] ||
/// v == [-3, -5, 1, 4, 2] ||
/// v == [-5, -3, 1, 4, 2]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
let mut f = |a: &T, b: &T| a.lt(b);
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the comparator function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index,
/// and all elements greater than the one at the given index, using the provided comparator
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median as if the slice were sorted in descending order.
/// v.partition_at_index_by(2, |a, b| b.cmp(a));
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [2, 4, 1, -5, -3] ||
/// v == [2, 4, 1, -3, -5] ||
/// v == [4, 2, 1, -5, -3] ||
/// v == [4, 2, 1, -3, -5]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index_by<F>(
&mut self,
index: usize,
mut compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
let mut f = |a: &T, b: &T| compare(a, b) == Less;
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the key extraction function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index, and
/// all elements greater than the one at the given index, using the provided key extraction
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Return the median as if the array were sorted according to absolute value.
/// v.partition_at_index_by_key(2, |a| a.abs());
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [1, 2, -3, 4, -5] ||
/// v == [1, 2, -3, -5, 4] ||
/// v == [2, 1, -3, 4, -5] ||
/// v == [2, 1, -3, -5, 4]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index_by_key<K, F>(
&mut self,
index: usize,
mut f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
let mut g = |a: &T, b: &T| f(a).lt(&f(b));
sort::partition_at_index(self, index, &mut g)
}
/// Moves all consecutive repeated elements to the end of the slice according to the
/// [`PartialEq`] trait implementation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
///
/// let (dedup, duplicates) = slice.partition_dedup();
///
/// assert_eq!(dedup, [1, 2, 3, 2, 1]);
/// assert_eq!(duplicates, [2, 3, 1]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
where
T: PartialEq,
{
self.partition_dedup_by(|a, b| a == b)
}
/// Moves all but the first of consecutive elements to the end of the slice satisfying
/// a given equality relation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// The `same_bucket` function is passed references to two elements from the slice and
/// must determine if the elements compare equal. The elements are passed in opposite order
/// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is moved
/// at the end of the slice.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
///
/// let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
///
/// assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
/// assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by<F>(&mut self, mut same_bucket: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T, &mut T) -> bool,
{
// Although we have a mutable reference to `self`, we cannot make
// *arbitrary* changes. The `same_bucket` calls could panic, so we
// must ensure that the slice is in a valid state at all times.
//
// The way that we handle this is by using swaps; we iterate
// over all the elements, swapping as we go so that at the end
// the elements we wish to keep are in the front, and those we
// wish to reject are at the back. We can then split the slice.
// This operation is still `O(n)`.
//
// Example: We start in this state, where `r` represents "next
// read" and `w` represents "next_write`.
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate, so
// we swap self[r] and self[w] (no effect as r==w) and then increment both
// r and w, leaving us with:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this value is a duplicate,
// so we increment `r` but leave everything else unchanged:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate,
// so swap self[r] and self[w] and advance r and w:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 1 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Not a duplicate, repeat:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 3 | 1 | 3 |
// +---+---+---+---+---+---+
// w
//
// Duplicate, advance r. End of slice. Split at w.
let len = self.len();
if len <= 1 {
return (self, &mut []);
}
let ptr = self.as_mut_ptr();
let mut next_read: usize = 1;
let mut next_write: usize = 1;
// SAFETY: the `while` condition guarantees `next_read` and `next_write`
// are less than `len`, thus are inside `self`. `prev_ptr_write` points to
// one element before `ptr_write`, but `next_write` starts at 1, so
// `prev_ptr_write` is never less than 0 and is inside the slice.
// This fulfils the requirements for dereferencing `ptr_read`, `prev_ptr_write`
// and `ptr_write`, and for using `ptr.add(next_read)`, `ptr.add(next_write - 1)`
// and `prev_ptr_write.offset(1)`.
//
// `next_write` is also incremented at most once per loop at most meaning
// no element is skipped when it may need to be swapped.
//
// `ptr_read` and `prev_ptr_write` never point to the same element. This
// is required for `&mut *ptr_read`, `&mut *prev_ptr_write` to be safe.
// The explanation is simply that `next_read >= next_write` is always true,
// thus `next_read > next_write - 1` is too.
unsafe {
// Avoid bounds checks by using raw pointers.
while next_read < len {
let ptr_read = ptr.add(next_read);
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
let ptr_write = prev_ptr_write.offset(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
}
next_read += 1;
}
}
self.split_at_mut(next_write)
}
/// Moves all but the first of consecutive elements to the end of the slice that resolve
/// to the same key.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
///
/// let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
///
/// assert_eq!(dedup, [10, 20, 30, 20, 11]);
/// assert_eq!(duplicates, [21, 30, 13]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by_key<K, F>(&mut self, mut key: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T) -> K,
K: PartialEq,
{
self.partition_dedup_by(|a, b| key(a) == key(b))
}
/// Rotates the slice in-place such that the first `mid` elements of the
/// slice move to the end while the last `self.len() - mid` elements move to
/// the front. After calling `rotate_left`, the element previously at index
/// `mid` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `mid` is greater than the length of the
/// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_left(2);
/// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
/// ```
///
/// Rotating a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_left(1);
/// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Rotates the slice in-place such that the first `self.len() - k`
/// elements of the slice move to the end while the last `k` elements move
/// to the front. After calling `rotate_right`, the element previously at
/// index `self.len() - k` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `k` is greater than the length of the
/// slice. Note that `k == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_right(2);
/// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
/// ```
///
/// Rotate a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_right(1);
/// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Fills `self` with elements by cloning `value`.
///
/// # Examples
///
/// ```
/// #![feature(slice_fill)]
///
/// let mut buf = vec![0; 10];
/// buf.fill(1);
/// assert_eq!(buf, vec![1; 10]);
/// ```
#[unstable(feature = "slice_fill", issue = "70758")]
pub fn fill(&mut self, value: T)
where
T: Clone,
{
if let Some((last, elems)) = self.split_last_mut() {
for el in elems {
el.clone_from(&value);
}
*last = value
}
}
/// Copies the elements from `src` into `self`.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` implements `Copy`, it can be more performant to use
/// [`copy_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Cloning two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.clone_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `clone_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.clone_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`copy_from_slice`]: #method.copy_from_slice
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "clone_from_slice", since = "1.7.0")]
pub fn clone_from_slice(&mut self, src: &[T])
where
T: Clone,
{
assert!(self.len() == src.len(), "destination and source slices have different lengths");
// NOTE: We need to explicitly slice them to the same length
// for bounds checking to be elided, and the optimizer will
// generate memcpy for simple cases (for example T = u8).
let len = self.len();
let src = &src[..len];
for i in 0..len {
self[i].clone_from(&src[i]);
}
}
/// Copies all elements from `src` into `self`, using a memcpy.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` does not implement `Copy`, use [`clone_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Copying two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.copy_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `copy_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.copy_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`clone_from_slice`]: #method.clone_from_slice
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "copy_from_slice", since = "1.9.0")]
pub fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
// The panic code path was put into a cold function to not bloat the
// call site.
#[inline(never)]
#[cold]
#[track_caller]
fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
panic!(
"source slice length ({}) does not match destination slice length ({})",
src_len, dst_len,
);
}
if self.len() != src.len() {
len_mismatch_fail(self.len(), src.len());
}
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
}
}
/// Copies elements from one part of the slice to another part of itself,
/// using a memmove.
///
/// `src` is the range within `self` to copy from. `dest` is the starting
/// index of the range within `self` to copy to, which will have the same
/// length as `src`. The two ranges may overlap. The ends of the two ranges
/// must be less than or equal to `self.len()`.
///
/// # Panics
///
/// This function will panic if either range exceeds the end of the slice,
/// or if the end of `src` is before the start.
///
/// # Examples
///
/// Copying four bytes within a slice:
///
/// ```
/// let mut bytes = *b"Hello, World!";
///
/// bytes.copy_within(1..5, 8);
///
/// assert_eq!(&bytes, b"Hello, Wello!");
/// ```
#[stable(feature = "copy_within", since = "1.37.0")]
#[track_caller]
pub fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dest: usize)
where
T: Copy,
{
let Range { start: src_start, end: src_end } = check_range(self.len(), src);
let count = src_end - src_start;
assert!(dest <= self.len() - count, "dest is out of bounds");
// SAFETY: the conditions for `ptr::copy` have all been checked above,
// as have those for `ptr::add`.
unsafe {
ptr::copy(self.as_ptr().add(src_start), self.as_mut_ptr().add(dest), count);
}
}
/// Swaps all elements in `self` with those in `other`.
///
/// The length of `other` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Example
///
/// Swapping two elements across slices:
///
/// ```
/// let mut slice1 = [0, 0];
/// let mut slice2 = [1, 2, 3, 4];
///
/// slice1.swap_with_slice(&mut slice2[2..]);
///
/// assert_eq!(slice1, [3, 4]);
/// assert_eq!(slice2, [1, 2, 0, 0]);
/// ```
///
/// Rust enforces that there can only be one mutable reference to a
/// particular piece of data in a particular scope. Because of this,
/// attempting to use `swap_with_slice` on a single slice will result in
/// a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
/// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// mutable sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.swap_with_slice(&mut right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 1, 2]);
/// ```
///
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "swap_with_slice", since = "1.27.0")]
pub fn swap_with_slice(&mut self, other: &mut [T]) {
assert!(self.len() == other.len(), "destination and source slices have different lengths");
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::swap_nonoverlapping(self.as_mut_ptr(), other.as_mut_ptr(), self.len());
}
}
/// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
#[inline]
fn gcd(a: usize, b: usize) -> usize {
use crate::intrinsics;
// iterative stein’s algorithm
// We should still make this `const fn` (and revert to recursive algorithm if we do)
// because relying on llvm to consteval all this is… well, it makes me uncomfortable.
// SAFETY: `a` and `b` are checked to be non-zero values.
let (ctz_a, mut ctz_b) = unsafe {
if a == 0 {
return b;
}
if b == 0 {
return a;
}
(intrinsics::cttz_nonzero(a), intrinsics::cttz_nonzero(b))
};
let k = ctz_a.min(ctz_b);
let mut a = a >> ctz_a;
let mut b = b;
loop {
// remove all factors of 2 from b
b >>= ctz_b;
if a > b {
mem::swap(&mut a, &mut b);
}
b = b - a;
// SAFETY: `b` is checked to be non-zero.
unsafe {
if b == 0 {
break;
}
ctz_b = intrinsics::cttz_nonzero(b);
}
}
a << k
}
let gcd: usize = gcd(mem::size_of::<T>(), mem::size_of::<U>());
let ts: usize = mem::size_of::<U>() / gcd;
let us: usize = mem::size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
(us_len, ts_len)
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: See the `align_to_mut` method for the detailed safety comment.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
(
left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
)
}
}
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: Here we are ensuring we will use aligned pointers for U for the
// rest of the method. This is done by passing a pointer to &[T] with an
// alignment targeted for U.
// `crate::ptr::align_offset` is called with a correctly aligned and
// valid pointer `ptr` (it comes from a reference to `self`) and with
// a size that is a power of two (since it comes from the alignement for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
(
left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
)
}
}
}
/// Checks if the elements of this slice are sorted.
///
/// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
/// slice yields exactly zero or one element, `true` is returned.
///
/// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
/// implies that this function returns `false` if any two consecutive items are not
/// comparable.
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
/// let empty: [i32; 0] = [];
///
/// assert!([1, 2, 2, 9].is_sorted());
/// assert!(![1, 3, 2, 4].is_sorted());
/// assert!([0].is_sorted());
/// assert!(empty.is_sorted());
/// assert!(![0.0, 1.0, f32::NAN].is_sorted());
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted(&self) -> bool
where
T: PartialOrd,
{
self.is_sorted_by(|a, b| a.partial_cmp(b))
}
/// Checks if the elements of this slice are sorted using the given comparator function.
///
/// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
/// function to determine the ordering of two elements. Apart from that, it's equivalent to
/// [`is_sorted`]; see its documentation for more information.
///
/// [`is_sorted`]: #method.is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
where
F: FnMut(&T, &T) -> Option<Ordering>,
{
self.iter().is_sorted_by(|a, b| compare(*a, *b))
}
/// Checks if the elements of this slice are sorted using the given key extraction function.
///
/// Instead of comparing the slice's elements directly, this function compares the keys of the
/// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
/// documentation for more information.
///
/// [`is_sorted`]: #method.is_sorted
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
///
/// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
/// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
where
F: FnMut(&T) -> K,
K: PartialOrd,
{
self.iter().is_sorted_by_key(f)
}
/// Returns the index of the partition point according to the given predicate
/// (the index of the first element of the second partition).
///
/// The slice is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the slice
/// and all elements for which the predicate returns false are at the end.
/// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
/// (all odd numbers are at the start, all even at the end).
///
/// If this slice is not partitioned, the returned result is unspecified and meaningless,
/// as this method performs a kind of binary search.
///
/// # Examples
///
/// ```
/// #![feature(partition_point)]
///
/// let v = [1, 2, 3, 3, 5, 6, 7];
/// let i = v.partition_point(|&x| x < 5);
///
/// assert_eq!(i, 4);
/// assert!(v[..i].iter().all(|&x| x < 5));
/// assert!(v[i..].iter().all(|&x| !(x < 5)));
/// ```
#[unstable(feature = "partition_point", reason = "new API", issue = "73831")]
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
{
let mut left = 0;
let mut right = self.len();
while left != right {
let mid = left + (right - left) / 2;
// SAFETY: When `left < right`, `left <= mid < right`.
// Therefore `left` always increases and `right` always decreases,
// and either of them is selected. In both cases `left <= right` is
// satisfied. Therefore if `left < right` in a step, `left <= right`
// is satisfied in the next step. Therefore as long as `left != right`,
// `0 <= left < right <= len` is satisfied and if this case
// `0 <= mid < len` is satisfied too.
let value = unsafe { self.get_unchecked(mid) };
if pred(value) {
left = mid + 1;
} else {
right = mid;
}
}
left
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for &[T] {
/// Creates an empty slice.
fn default() -> Self {
&[]
}
}
#[stable(feature = "mut_slice_default", since = "1.5.0")]
impl<T> Default for &mut [T] {
/// Creates a mutable empty slice.
fn default() -> Self {
&mut []
}
}
docs: Rewrap `slice::strip_prefix` and `strip_suffix` back to 100
Requested-by: da39a3ee5e6b4b0d3255bfef95601890afd80709@LukasKalbertodt
Signed-off-by: Ian Jackson <d68d4287c1430ca128ad85033356c9d259238c74@chiark.greenend.org.uk>
// ignore-tidy-filelength
//! Slice management and manipulation.
//!
//! For more details see [`std::slice`].
//!
//! [`std::slice`]: ../../std/slice/index.html
#![stable(feature = "rust1", since = "1.0.0")]
use crate::cmp::Ordering::{self, Equal, Greater, Less};
use crate::marker::Copy;
use crate::mem;
use crate::num::NonZeroUsize;
use crate::ops::{FnMut, Range, RangeBounds};
use crate::option::Option;
use crate::option::Option::{None, Some};
use crate::ptr;
use crate::result::Result;
use crate::result::Result::{Err, Ok};
#[unstable(
feature = "slice_internals",
issue = "none",
reason = "exposed from core to be reused in std; use the memchr crate"
)]
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;
mod ascii;
mod cmp;
mod index;
mod iter;
mod raw;
mod rotate;
mod sort;
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Chunks, ChunksMut, Windows};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{Iter, IterMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use iter::{RSplitN, RSplitNMut, Split, SplitMut, SplitN, SplitNMut};
#[stable(feature = "slice_rsplit", since = "1.27.0")]
pub use iter::{RSplit, RSplitMut};
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub use iter::{ChunksExact, ChunksExactMut};
#[stable(feature = "rchunks", since = "1.31.0")]
pub use iter::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
#[unstable(feature = "array_chunks", issue = "74985")]
pub use iter::{ArrayChunks, ArrayChunksMut};
#[unstable(feature = "array_windows", issue = "75027")]
pub use iter::ArrayWindows;
#[unstable(feature = "split_inclusive", issue = "72360")]
pub use iter::{SplitInclusive, SplitInclusiveMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use raw::{from_raw_parts, from_raw_parts_mut};
#[stable(feature = "from_ref", since = "1.28.0")]
pub use raw::{from_mut, from_ref};
// This function is public only because there is no other way to unit test heapsort.
#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
pub use sort::heapsort;
#[stable(feature = "slice_get_slice", since = "1.28.0")]
pub use index::SliceIndex;
#[unstable(feature = "slice_check_range", issue = "76393")]
pub use index::check_range;
#[lang = "slice"]
#[cfg(not(test))]
impl<T> [T] {
/// Returns the number of elements in the slice.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert_eq!(a.len(), 3);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_len", since = "1.32.0")]
#[inline]
// SAFETY: const sound because we transmute out the length field as a usize (which it must be)
#[allow_internal_unstable(const_fn_union)]
pub const fn len(&self) -> usize {
// SAFETY: this is safe because `&[T]` and `FatPtr<T>` have the same layout.
// Only `std` can make this guarantee.
unsafe { crate::ptr::Repr { rust: self }.raw.len }
}
/// Returns `true` if the slice has a length of 0.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_is_empty", since = "1.32.0")]
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&10), v.first());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.first());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(first) = x.first_mut() {
/// *first = 5;
/// }
/// assert_eq!(x, &[5, 1, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first() {
/// assert_eq!(first, &0);
/// assert_eq!(elements, &[1, 2]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first_mut() {
/// *first = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[3, 4, 5]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last() {
/// assert_eq!(last, &2);
/// assert_eq!(elements, &[0, 1]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last_mut() {
/// *last = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[4, 5, 3]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&30), v.last());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.last());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a mutable pointer to the last item in the slice.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(last) = x.last_mut() {
/// *last = 10;
/// }
/// assert_eq!(x, &[0, 1, 10]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a reference to an element or subslice depending on the type of
/// index.
///
/// - If given a position, returns a reference to the element at that
/// position or `None` if out of bounds.
/// - If given a range, returns the subslice corresponding to that range,
/// or `None` if out of bounds.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&40), v.get(1));
/// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
/// assert_eq!(None, v.get(3));
/// assert_eq!(None, v.get(0..4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: SliceIndex<Self>,
{
index.get(self)
}
/// Returns a mutable reference to an element or subslice depending on the
/// type of index (see [`get`]) or `None` if the index is out of bounds.
///
/// [`get`]: #method.get
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(elem) = x.get_mut(1) {
/// *elem = 42;
/// }
/// assert_eq!(x, &[0, 42, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: SliceIndex<Self>,
{
index.get_mut(self)
}
/// Returns a reference to an element or subslice, without doing bounds
/// checking.
///
/// For a safe alternative see [`get`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get`]: #method.get
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
///
/// unsafe {
/// assert_eq!(x.get_unchecked(1), &2);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
/// Returns a mutable reference to an element or subslice, without doing
/// bounds checking.
///
/// For a safe alternative see [`get_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
///
/// [`get_mut`]: #method.get_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
///
/// unsafe {
/// let elem = x.get_unchecked_mut(1);
/// *elem = 13;
/// }
/// assert_eq!(x, &[1, 13, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
/// Returns a raw pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// The caller must also ensure that the memory the pointer (non-transitively) points to
/// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
/// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let x_ptr = x.as_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
/// }
/// }
/// ```
///
/// [`as_mut_ptr`]: #method.as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
#[inline]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
/// Returns an unsafe mutable pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// let x_ptr = x.as_mut_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// *x_ptr.add(i) += 2;
/// }
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
/// Returns the two raw pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_ptr`] for warnings on using these pointers. The end pointer
/// requires extra caution, as it does not point to a valid element in the
/// slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// It can also be useful to check if a pointer to an element refers to an
/// element of this slice:
///
/// ```
/// let a = [1, 2, 3];
/// let x = &a[1] as *const _;
/// let y = &5 as *const _;
///
/// assert!(a.as_ptr_range().contains(&x));
/// assert!(!a.as_ptr_range().contains(&y));
/// ```
///
/// [`as_ptr`]: #method.as_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_ptr_range(&self) -> Range<*const T> {
let start = self.as_ptr();
// SAFETY: The `add` here is safe, because:
//
// - Both pointers are part of the same object, as pointing directly
// past the object also counts.
//
// - The size of the slice is never larger than isize::MAX bytes, as
// noted here:
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
// - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
// - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
// (This doesn't seem normative yet, but the very same assumption is
// made in many places, including the Index implementation of slices.)
//
// - There is no wrapping around involved, as slices do not wrap past
// the end of the address space.
//
// See the documentation of pointer::add.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Returns the two unsafe mutable pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_mut_ptr`] for warnings on using these pointers. The end
/// pointer requires extra caution, as it does not point to a valid element
/// in the slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// [`as_mut_ptr`]: #method.as_mut_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
#[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
#[inline]
pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
let start = self.as_mut_ptr();
// SAFETY: See as_ptr_range() above for why `add` here is safe.
let end = unsafe { start.add(self.len()) };
start..end
}
/// Swaps two elements in the slice.
///
/// # Arguments
///
/// * a - The index of the first element
/// * b - The index of the second element
///
/// # Panics
///
/// Panics if `a` or `b` are out of bounds.
///
/// # Examples
///
/// ```
/// let mut v = ["a", "b", "c", "d"];
/// v.swap(1, 3);
/// assert!(v == ["a", "d", "c", "b"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn swap(&mut self, a: usize, b: usize) {
// Can't take two mutable loans from one vector, so instead just cast
// them to their raw pointers to do the swap.
let pa: *mut T = &mut self[a];
let pb: *mut T = &mut self[b];
// SAFETY: `pa` and `pb` have been created from safe mutable references and refer
// to elements in the slice and therefore are guaranteed to be valid and aligned.
// Note that accessing the elements behind `a` and `b` is checked and will
// panic when out of bounds.
unsafe {
ptr::swap(pa, pb);
}
}
/// Reverses the order of elements in the slice, in place.
///
/// # Examples
///
/// ```
/// let mut v = [1, 2, 3];
/// v.reverse();
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn reverse(&mut self) {
let mut i: usize = 0;
let ln = self.len();
// For very small types, all the individual reads in the normal
// path perform poorly. We can do better, given efficient unaligned
// load/store, by loading a larger chunk and reversing a register.
// Ideally LLVM would do this for us, as it knows better than we do
// whether unaligned reads are efficient (since that changes between
// different ARM versions, for example) and what the best chunk size
// would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
// the loop, so we need to do this ourselves. (Hypothesis: reverse
// is troublesome because the sides can be aligned differently --
// will be, when the length is odd -- so there's no way of emitting
// pre- and postludes to use fully-aligned SIMD in the middle.)
let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
if fast_unaligned && mem::size_of::<T>() == 1 {
// Use the llvm.bswap intrinsic to reverse u8s in a usize
let chunk = mem::size_of::<usize>();
while i + chunk - 1 < ln / 2 {
// SAFETY: There are several things to check here:
//
// - Note that `chunk` is either 4 or 8 due to the cfg check
// above. So `chunk - 1` is positive.
// - Indexing with index `i` is fine as the loop check guarantees
// `i + chunk - 1 < ln / 2`
// <=> `i < ln / 2 - (chunk - 1) < ln / 2 < ln`.
// - Indexing with index `ln - i - chunk = ln - (i + chunk)` is fine:
// - `i + chunk > 0` is trivially true.
// - The loop check guarantees:
// `i + chunk - 1 < ln / 2`
// <=> `i + chunk ≤ ln / 2 ≤ ln`, thus subtraction does not underflow.
// - The `read_unaligned` and `write_unaligned` calls are fine:
// - `pa` points to index `i` where `i < ln / 2 - (chunk - 1)`
// (see above) and `pb` points to index `ln - i - chunk`, so
// both are at least `chunk`
// many bytes away from the end of `self`.
// - Any initialized memory is valid `usize`.
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut usize);
let vb = ptr::read_unaligned(pb as *mut usize);
ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
}
i += chunk;
}
}
if fast_unaligned && mem::size_of::<T>() == 2 {
// Use rotate-by-16 to reverse u16s in a u32
let chunk = mem::size_of::<u32>() / 2;
while i + chunk - 1 < ln / 2 {
// SAFETY: An unaligned u32 can be read from `i` if `i + 1 < ln`
// (and obviously `i < ln`), because each element is 2 bytes and
// we're reading 4.
//
// `i + chunk - 1 < ln / 2` # while condition
// `i + 2 - 1 < ln / 2`
// `i + 1 < ln / 2`
//
// Since it's less than the length divided by 2, then it must be
// in bounds.
//
// This also means that the condition `0 < i + chunk <= ln` is
// always respected, ensuring the `pb` pointer can be used
// safely.
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut u32);
let vb = ptr::read_unaligned(pb as *mut u32);
ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
}
i += chunk;
}
}
while i < ln / 2 {
// SAFETY: `i` is inferior to half the length of the slice so
// accessing `i` and `ln - i - 1` is safe (`i` starts at 0 and
// will not go further than `ln / 2 - 1`).
// The resulting pointers `pa` and `pb` are therefore valid and
// aligned, and can be read from and written to.
unsafe {
// Unsafe swap to avoid the bounds check in safe swap.
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - 1);
ptr::swap(pa, pb);
}
i += 1;
}
}
/// Returns an iterator over the slice.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let mut iterator = x.iter();
///
/// assert_eq!(iterator.next(), Some(&1));
/// assert_eq!(iterator.next(), Some(&2));
/// assert_eq!(iterator.next(), Some(&4));
/// assert_eq!(iterator.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter(&self) -> Iter<'_, T> {
Iter::new(self)
}
/// Returns an iterator that allows modifying each value.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// for elem in x.iter_mut() {
/// *elem += 2;
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut::new(self)
}
/// Returns an iterator over all contiguous windows of length
/// `size`. The windows overlap. If the slice is shorter than
/// `size`, the iterator returns no values.
///
/// # Panics
///
/// Panics if `size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['r', 'u', 's', 't'];
/// let mut iter = slice.windows(2);
/// assert_eq!(iter.next().unwrap(), &['r', 'u']);
/// assert_eq!(iter.next().unwrap(), &['u', 's']);
/// assert_eq!(iter.next().unwrap(), &['s', 't']);
/// assert!(iter.next().is_none());
/// ```
///
/// If the slice is shorter than `size`:
///
/// ```
/// let slice = ['f', 'o', 'o'];
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<'_, T> {
let size = NonZeroUsize::new(size).expect("size is zero");
Windows::new(self, size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert_eq!(iter.next().unwrap(), &['m']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`chunks_exact`]: #method.chunks_exact
/// [`rchunks`]: #method.rchunks
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
assert_ne!(chunk_size, 0);
Chunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at
/// the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 3]);
/// ```
///
/// [`chunks_exact_mut`]: #method.chunks_exact_mut
/// [`rchunks_mut`]: #method.rchunks_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks`]: #method.chunks
/// [`rchunks_exact`]: #method.rchunks_exact
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of
/// the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_mut`]: #method.chunks_mut
/// [`rchunks_exact_mut`]: #method.rchunks_exact_mut
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksExactMut::new(self, chunk_size)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are array references and do not overlap. If `N` does not divide the
/// length of the slice, then the last up to `N-1` elements will be omitted and can be
/// retrieved from the `remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.array_chunks();
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks_exact`]: #method.chunks_exact
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
assert_ne!(N, 0);
ArrayChunks::new(self)
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable array references and do not overlap. If `N` does not divide
/// the length of the slice, then the last up to `N-1` elements will be omitted and
/// can be retrieved from the `into_remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact_mut`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.array_chunks_mut() {
/// *chunk = [count; 2];
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_exact_mut`]: #method.chunks_exact_mut
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
assert_ne!(N, 0);
ArrayChunksMut::new(self)
}
/// Returns an iterator over overlapping windows of `N` elements of a slice,
/// starting at the beginning of the slice.
///
/// This is the const generic equivalent of [`windows`].
///
/// If `N` is greater than the size of the slice, it will return no windows.
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_windows)]
/// let slice = [0, 1, 2, 3];
/// let mut iter = slice.array_windows();
/// assert_eq!(iter.next().unwrap(), &[0, 1]);
/// assert_eq!(iter.next().unwrap(), &[1, 2]);
/// assert_eq!(iter.next().unwrap(), &[2, 3]);
/// assert!(iter.next().is_none());
/// ```
///
/// [`windows`]: #method.windows
#[unstable(feature = "array_windows", issue = "75027")]
#[inline]
pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> {
assert_ne!(N, 0);
ArrayWindows::new(self)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert_eq!(iter.next().unwrap(), &['l']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`rchunks_exact`]: #method.rchunks_exact
/// [`chunks`]: #method.chunks
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
assert!(chunk_size != 0);
RChunks::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the
/// beginning of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[3, 2, 2, 1, 1]);
/// ```
///
/// [`rchunks_exact_mut`]: #method.rchunks_exact_mut
/// [`chunks_mut`]: #method.chunks_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
assert!(chunk_size != 0);
RChunksMut::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// end of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['l']);
/// ```
///
/// [`chunks`]: #method.chunks
/// [`rchunks`]: #method.rchunks
/// [`chunks_exact`]: #method.chunks_exact
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
assert!(chunk_size != 0);
RChunksExact::new(self, chunk_size)
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[0, 2, 2, 1, 1]);
/// ```
///
/// [`chunks_mut`]: #method.chunks_mut
/// [`rchunks_mut`]: #method.rchunks_mut
/// [`chunks_exact_mut`]: #method.chunks_exact_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
assert!(chunk_size != 0);
RChunksExactMut::new(self, chunk_size)
}
/// Divides one slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 4, 5, 6];
///
/// {
/// let (left, right) = v.split_at(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_unchecked(mid) }
}
/// Divides one mutable slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// {
/// let (left, right) = v.split_at_mut(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `from_raw_parts_mut`.
unsafe { self.split_at_mut_unchecked(mid) }
}
/// Divides one slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at`]: #method.split_at
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```compile_fail
/// #![feature(slice_split_at_unchecked)]
///
/// let v = [1, 2, 3, 4, 5, 6];
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(0);
/// assert_eq!(left, []);
/// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(2);
/// assert_eq!(left, [1, 2]);
/// assert_eq!(right, [3, 4, 5, 6]);
/// }
///
/// unsafe {
/// let (left, right) = v.split_at_unchecked(6);
/// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(right, []);
/// }
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// For a safe alternative see [`split_at_mut`].
///
/// # Safety
///
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used. The caller has to ensure that
/// `0 <= mid <= self.len()`.
///
/// [`split_at_mut`]: #method.split_at_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```compile_fail
/// #![feature(slice_split_at_unchecked)]
///
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// unsafe {
/// let (left, right) = v.split_at_mut_unchecked(2);
/// assert_eq!(left, [1, 0]);
/// assert_eq!(right, [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
// SAFETY: Caller has to check that `0 <= mid <= self.len()`.
//
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
unsafe { (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid)) }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the first element is matched, an empty slice will be the first item
/// returned by the iterator. Similarly, if the last element in the slice
/// is matched, an empty slice will be the last item returned by the
/// iterator:
///
/// ```
/// let slice = [10, 40, 33];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert!(iter.next().is_none());
/// ```
///
/// If two matched elements are directly adjacent, an empty slice will be
/// present between them:
///
/// ```
/// let slice = [10, 6, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
where
F: FnMut(&T) -> bool,
{
Split::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_mut(|num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is contained in the end of the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// #![feature(split_inclusive)]
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the last element of the slice is matched,
/// that element will be considered the terminator of the preceding slice.
/// That slice will be the last item returned by the iterator.
///
/// ```
/// #![feature(split_inclusive)]
/// let slice = [3, 10, 40, 33];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[3]);
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert!(iter.next().is_none());
/// ```
#[unstable(feature = "split_inclusive", issue = "72360")]
#[inline]
pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusive::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is contained in the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// #![feature(split_inclusive)]
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
/// let terminator_idx = group.len()-1;
/// group[terminator_idx] = 1;
/// }
/// assert_eq!(v, [10, 40, 1, 20, 1, 1]);
/// ```
#[unstable(feature = "split_inclusive", issue = "72360")]
#[inline]
pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusiveMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, starting at the end of the slice and working backwards.
/// The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [11, 22, 33, 0, 44, 55];
/// let mut iter = slice.rsplit(|num| *num == 0);
///
/// assert_eq!(iter.next().unwrap(), &[44, 55]);
/// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
/// assert_eq!(iter.next(), None);
/// ```
///
/// As with `split()`, if the first or last element is matched, an empty
/// slice will be the first (or last) item returned by the iterator.
///
/// ```
/// let v = &[0, 1, 1, 2, 3, 5, 8];
/// let mut it = v.rsplit(|n| *n % 2 == 0);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next().unwrap(), &[3, 5]);
/// assert_eq!(it.next().unwrap(), &[1, 1]);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next(), None);
/// ```
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplit::new(self, pred)
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`, starting at the end of the slice and working
/// backwards. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [100, 400, 300, 200, 600, 500];
///
/// let mut count = 0;
/// for group in v.rsplit_mut(|num| *num % 3 == 0) {
/// count += 1;
/// group[0] = count;
/// }
/// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
/// ```
///
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitMut::new(self, pred)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once by numbers divisible by 3 (i.e., `[10, 40]`,
/// `[20, 60, 50]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitN::new(self.split(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitNMut::new(self.split_mut(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once, starting from the end, by numbers divisible
/// by 3 (i.e., `[50]`, `[10, 40, 30, 20]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.rsplitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitN::new(self.rsplit(pred), n)
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut s = [10, 40, 30, 20, 60, 50];
///
/// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitNMut::new(self.rsplit_mut(pred), n)
}
/// Returns `true` if the slice contains an element with the given value.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.contains(&30));
/// assert!(!v.contains(&50));
/// ```
///
/// If you do not have an `&T`, but just an `&U` such that `T: Borrow<U>`
/// (e.g. `String: Borrow<str>`), you can use `iter().any`:
///
/// ```
/// let v = [String::from("hello"), String::from("world")]; // slice of `String`
/// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
/// assert!(!v.iter().any(|e| e == "hi"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn contains(&self, x: &T) -> bool
where
T: PartialEq,
{
cmp::SliceContains::slice_contains(x, self)
}
/// Returns `true` if `needle` is a prefix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.starts_with(&[10]));
/// assert!(v.starts_with(&[10, 40]));
/// assert!(!v.starts_with(&[50]));
/// assert!(!v.starts_with(&[10, 50]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.starts_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.starts_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
/// Returns `true` if `needle` is a suffix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.ends_with(&[30]));
/// assert!(v.ends_with(&[40, 30]));
/// assert!(!v.ends_with(&[50]));
/// assert!(!v.ends_with(&[50, 30]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.ends_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.ends_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let (m, n) = (self.len(), needle.len());
m >= n && needle == &self[m - n..]
}
/// Returns a subslice with the prefix removed.
///
/// If the slice starts with `prefix`, returns the subslice after the prefix, wrapped in `Some`.
///
/// If the slice does not start with `prefix`, returns `None`.
///
/// (If `prefix` is empty, simply returns the original slice.)
///
/// # Examples
///
/// ```
/// #![feature(slice_strip)]
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
/// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
/// assert_eq!(v.strip_prefix(&[50]), None);
/// assert_eq!(v.strip_prefix(&[10, 50]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[unstable(feature = "slice_strip", issue = "73413")]
pub fn strip_prefix(&self, prefix: &[T]) -> Option<&[T]>
where
T: PartialEq,
{
let n = prefix.len();
if n <= self.len() {
let (head, tail) = self.split_at(n);
if head == prefix {
return Some(tail);
}
}
None
}
/// Returns a subslice with the suffix removed.
///
/// If the slice ends with `suffix`, returns the subslice before the suffix, wrapped in `Some`.
///
/// If the slice does not end with `suffix`, returns `None`.
///
/// (If `suffix` is empty, simply returns the original slice.)
///
/// # Examples
///
/// ```
/// #![feature(slice_strip)]
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
/// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
/// assert_eq!(v.strip_suffix(&[50]), None);
/// assert_eq!(v.strip_suffix(&[50, 30]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[unstable(feature = "slice_strip", issue = "73413")]
pub fn strip_suffix(&self, suffix: &[T]) -> Option<&[T]>
where
T: PartialEq,
{
let (len, n) = (self.len(), suffix.len());
if n <= len {
let (head, tail) = self.split_at(len - n);
if tail == suffix {
return Some(head);
}
}
None
}
/// Binary searches this sorted slice for a given element.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// assert_eq!(s.binary_search(&13), Ok(9));
/// assert_eq!(s.binary_search(&4), Err(7));
/// assert_eq!(s.binary_search(&100), Err(13));
/// let r = s.binary_search(&1);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
///
/// If you want to insert an item to a sorted vector, while maintaining
/// sort order:
///
/// ```
/// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
/// let num = 42;
/// let idx = s.binary_search(&num).unwrap_or_else(|x| x);
/// s.insert(idx, num);
/// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
where
T: Ord,
{
self.binary_search_by(|p| p.cmp(x))
}
/// Binary searches this sorted slice with a comparator function.
///
/// The comparator function should implement an order consistent
/// with the sort order of the underlying slice, returning an
/// order code that indicates whether its argument is `Less`,
/// `Equal` or `Greater` the desired target.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// let seek = 13;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
/// let seek = 4;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
/// let seek = 100;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
/// let seek = 1;
/// let r = s.binary_search_by(|probe| probe.cmp(&seek));
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> Ordering,
{
let s = self;
let mut size = s.len();
if size == 0 {
return Err(0);
}
let mut base = 0usize;
while size > 1 {
let half = size / 2;
let mid = base + half;
// SAFETY: the call is made safe by the following inconstants:
// - `mid >= 0`: by definition
// - `mid < size`: `mid = size / 2 + size / 4 + size / 8 ...`
let cmp = f(unsafe { s.get_unchecked(mid) });
base = if cmp == Greater { base } else { mid };
size -= half;
}
// SAFETY: base is always in [0, size) because base <= mid.
let cmp = f(unsafe { s.get_unchecked(base) });
if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) }
}
/// Binary searches this sorted slice with a key extraction function.
///
/// Assumes that the slice is sorted by the key, for instance with
/// [`sort_by_key`] using the same key extraction function.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// [`sort_by_key`]: #method.sort_by_key
///
/// # Examples
///
/// Looks up a series of four elements in a slice of pairs sorted by
/// their second elements. The first is found, with a uniquely
/// determined position; the second and third are not found; the
/// fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
/// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
/// (1, 21), (2, 34), (4, 55)];
///
/// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9));
/// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7));
/// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13));
/// let r = s.binary_search_by_key(&1, |&(a,b)| b);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
#[inline]
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> B,
B: Ord,
{
self.binary_search_by(|k| f(k).cmp(b))
}
/// Sorts the slice, but may not preserve the order of equal elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort_unstable();
/// assert!(v == [-5, -3, 1, 2, 4]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable(&mut self)
where
T: Ord,
{
sort::quicksort(self, |a, b| a.lt(b));
}
/// Sorts the slice with a comparator function, but may not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
/// total order if it is (for all a, b and c):
///
/// * total and antisymmetric: exactly one of a < b, a == b or a > b is true; and
/// * transitive, a < b and b < c implies a < c. The same must hold for both == and >.
///
/// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
/// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
///
/// ```
/// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
/// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
/// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
/// ```
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_unstable_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
/// v.sort_unstable_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by<F>(&mut self, mut compare: F)
where
F: FnMut(&T, &T) -> Ordering,
{
sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less);
}
/// Sorts the slice with a key extraction function, but may not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
/// *O*(*m*).
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
/// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
/// cases where the key function is expensive.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_unstable_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
where
F: FnMut(&T) -> K,
K: Ord,
{
sort::quicksort(self, |a, b| f(a).lt(&f(b)));
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index`. Additionally, this reordering is
/// unstable (i.e. any number of equal elements may end up at position `index`), in-place
/// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
/// element" in other libraries. It returns a triplet of the following values: all elements less
/// than the one at the given index, the value at the given index, and all elements greater than
/// the one at the given index.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median
/// v.partition_at_index(2);
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [-3, -5, 1, 2, 4] ||
/// v == [-5, -3, 1, 2, 4] ||
/// v == [-3, -5, 1, 4, 2] ||
/// v == [-5, -3, 1, 4, 2]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
let mut f = |a: &T, b: &T| a.lt(b);
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the comparator function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index,
/// and all elements greater than the one at the given index, using the provided comparator
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median as if the slice were sorted in descending order.
/// v.partition_at_index_by(2, |a, b| b.cmp(a));
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [2, 4, 1, -5, -3] ||
/// v == [2, 4, 1, -3, -5] ||
/// v == [4, 2, 1, -5, -3] ||
/// v == [4, 2, 1, -3, -5]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index_by<F>(
&mut self,
index: usize,
mut compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
let mut f = |a: &T, b: &T| compare(a, b) == Less;
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the key extraction function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index, and
/// all elements greater than the one at the given index, using the provided key extraction
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Return the median as if the array were sorted according to absolute value.
/// v.partition_at_index_by_key(2, |a| a.abs());
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [1, 2, -3, 4, -5] ||
/// v == [1, 2, -3, -5, 4] ||
/// v == [2, 1, -3, 4, -5] ||
/// v == [2, 1, -3, -5, 4]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index_by_key<K, F>(
&mut self,
index: usize,
mut f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
let mut g = |a: &T, b: &T| f(a).lt(&f(b));
sort::partition_at_index(self, index, &mut g)
}
/// Moves all consecutive repeated elements to the end of the slice according to the
/// [`PartialEq`] trait implementation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
///
/// let (dedup, duplicates) = slice.partition_dedup();
///
/// assert_eq!(dedup, [1, 2, 3, 2, 1]);
/// assert_eq!(duplicates, [2, 3, 1]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
where
T: PartialEq,
{
self.partition_dedup_by(|a, b| a == b)
}
/// Moves all but the first of consecutive elements to the end of the slice satisfying
/// a given equality relation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// The `same_bucket` function is passed references to two elements from the slice and
/// must determine if the elements compare equal. The elements are passed in opposite order
/// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is moved
/// at the end of the slice.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
///
/// let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
///
/// assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
/// assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by<F>(&mut self, mut same_bucket: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T, &mut T) -> bool,
{
// Although we have a mutable reference to `self`, we cannot make
// *arbitrary* changes. The `same_bucket` calls could panic, so we
// must ensure that the slice is in a valid state at all times.
//
// The way that we handle this is by using swaps; we iterate
// over all the elements, swapping as we go so that at the end
// the elements we wish to keep are in the front, and those we
// wish to reject are at the back. We can then split the slice.
// This operation is still `O(n)`.
//
// Example: We start in this state, where `r` represents "next
// read" and `w` represents "next_write`.
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate, so
// we swap self[r] and self[w] (no effect as r==w) and then increment both
// r and w, leaving us with:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this value is a duplicate,
// so we increment `r` but leave everything else unchanged:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate,
// so swap self[r] and self[w] and advance r and w:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 1 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Not a duplicate, repeat:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 3 | 1 | 3 |
// +---+---+---+---+---+---+
// w
//
// Duplicate, advance r. End of slice. Split at w.
let len = self.len();
if len <= 1 {
return (self, &mut []);
}
let ptr = self.as_mut_ptr();
let mut next_read: usize = 1;
let mut next_write: usize = 1;
// SAFETY: the `while` condition guarantees `next_read` and `next_write`
// are less than `len`, thus are inside `self`. `prev_ptr_write` points to
// one element before `ptr_write`, but `next_write` starts at 1, so
// `prev_ptr_write` is never less than 0 and is inside the slice.
// This fulfils the requirements for dereferencing `ptr_read`, `prev_ptr_write`
// and `ptr_write`, and for using `ptr.add(next_read)`, `ptr.add(next_write - 1)`
// and `prev_ptr_write.offset(1)`.
//
// `next_write` is also incremented at most once per loop at most meaning
// no element is skipped when it may need to be swapped.
//
// `ptr_read` and `prev_ptr_write` never point to the same element. This
// is required for `&mut *ptr_read`, `&mut *prev_ptr_write` to be safe.
// The explanation is simply that `next_read >= next_write` is always true,
// thus `next_read > next_write - 1` is too.
unsafe {
// Avoid bounds checks by using raw pointers.
while next_read < len {
let ptr_read = ptr.add(next_read);
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
let ptr_write = prev_ptr_write.offset(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
}
next_read += 1;
}
}
self.split_at_mut(next_write)
}
/// Moves all but the first of consecutive elements to the end of the slice that resolve
/// to the same key.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
///
/// let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
///
/// assert_eq!(dedup, [10, 20, 30, 20, 11]);
/// assert_eq!(duplicates, [21, 30, 13]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by_key<K, F>(&mut self, mut key: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T) -> K,
K: PartialEq,
{
self.partition_dedup_by(|a, b| key(a) == key(b))
}
/// Rotates the slice in-place such that the first `mid` elements of the
/// slice move to the end while the last `self.len() - mid` elements move to
/// the front. After calling `rotate_left`, the element previously at index
/// `mid` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `mid` is greater than the length of the
/// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_left(2);
/// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
/// ```
///
/// Rotating a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_left(1);
/// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Rotates the slice in-place such that the first `self.len() - k`
/// elements of the slice move to the end while the last `k` elements move
/// to the front. After calling `rotate_right`, the element previously at
/// index `self.len() - k` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `k` is greater than the length of the
/// slice. Note that `k == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_right(2);
/// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
/// ```
///
/// Rotate a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_right(1);
/// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Fills `self` with elements by cloning `value`.
///
/// # Examples
///
/// ```
/// #![feature(slice_fill)]
///
/// let mut buf = vec![0; 10];
/// buf.fill(1);
/// assert_eq!(buf, vec![1; 10]);
/// ```
#[unstable(feature = "slice_fill", issue = "70758")]
pub fn fill(&mut self, value: T)
where
T: Clone,
{
if let Some((last, elems)) = self.split_last_mut() {
for el in elems {
el.clone_from(&value);
}
*last = value
}
}
/// Copies the elements from `src` into `self`.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` implements `Copy`, it can be more performant to use
/// [`copy_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Cloning two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.clone_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `clone_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.clone_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`copy_from_slice`]: #method.copy_from_slice
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "clone_from_slice", since = "1.7.0")]
pub fn clone_from_slice(&mut self, src: &[T])
where
T: Clone,
{
assert!(self.len() == src.len(), "destination and source slices have different lengths");
// NOTE: We need to explicitly slice them to the same length
// for bounds checking to be elided, and the optimizer will
// generate memcpy for simple cases (for example T = u8).
let len = self.len();
let src = &src[..len];
for i in 0..len {
self[i].clone_from(&src[i]);
}
}
/// Copies all elements from `src` into `self`, using a memcpy.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` does not implement `Copy`, use [`clone_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Copying two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.copy_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `copy_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.copy_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`clone_from_slice`]: #method.clone_from_slice
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "copy_from_slice", since = "1.9.0")]
pub fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
// The panic code path was put into a cold function to not bloat the
// call site.
#[inline(never)]
#[cold]
#[track_caller]
fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
panic!(
"source slice length ({}) does not match destination slice length ({})",
src_len, dst_len,
);
}
if self.len() != src.len() {
len_mismatch_fail(self.len(), src.len());
}
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
}
}
/// Copies elements from one part of the slice to another part of itself,
/// using a memmove.
///
/// `src` is the range within `self` to copy from. `dest` is the starting
/// index of the range within `self` to copy to, which will have the same
/// length as `src`. The two ranges may overlap. The ends of the two ranges
/// must be less than or equal to `self.len()`.
///
/// # Panics
///
/// This function will panic if either range exceeds the end of the slice,
/// or if the end of `src` is before the start.
///
/// # Examples
///
/// Copying four bytes within a slice:
///
/// ```
/// let mut bytes = *b"Hello, World!";
///
/// bytes.copy_within(1..5, 8);
///
/// assert_eq!(&bytes, b"Hello, Wello!");
/// ```
#[stable(feature = "copy_within", since = "1.37.0")]
#[track_caller]
pub fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dest: usize)
where
T: Copy,
{
let Range { start: src_start, end: src_end } = check_range(self.len(), src);
let count = src_end - src_start;
assert!(dest <= self.len() - count, "dest is out of bounds");
// SAFETY: the conditions for `ptr::copy` have all been checked above,
// as have those for `ptr::add`.
unsafe {
ptr::copy(self.as_ptr().add(src_start), self.as_mut_ptr().add(dest), count);
}
}
/// Swaps all elements in `self` with those in `other`.
///
/// The length of `other` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Example
///
/// Swapping two elements across slices:
///
/// ```
/// let mut slice1 = [0, 0];
/// let mut slice2 = [1, 2, 3, 4];
///
/// slice1.swap_with_slice(&mut slice2[2..]);
///
/// assert_eq!(slice1, [3, 4]);
/// assert_eq!(slice2, [1, 2, 0, 0]);
/// ```
///
/// Rust enforces that there can only be one mutable reference to a
/// particular piece of data in a particular scope. Because of this,
/// attempting to use `swap_with_slice` on a single slice will result in
/// a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
/// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// mutable sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.swap_with_slice(&mut right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 1, 2]);
/// ```
///
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "swap_with_slice", since = "1.27.0")]
pub fn swap_with_slice(&mut self, other: &mut [T]) {
assert!(self.len() == other.len(), "destination and source slices have different lengths");
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::swap_nonoverlapping(self.as_mut_ptr(), other.as_mut_ptr(), self.len());
}
}
/// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
#[inline]
fn gcd(a: usize, b: usize) -> usize {
use crate::intrinsics;
// iterative stein’s algorithm
// We should still make this `const fn` (and revert to recursive algorithm if we do)
// because relying on llvm to consteval all this is… well, it makes me uncomfortable.
// SAFETY: `a` and `b` are checked to be non-zero values.
let (ctz_a, mut ctz_b) = unsafe {
if a == 0 {
return b;
}
if b == 0 {
return a;
}
(intrinsics::cttz_nonzero(a), intrinsics::cttz_nonzero(b))
};
let k = ctz_a.min(ctz_b);
let mut a = a >> ctz_a;
let mut b = b;
loop {
// remove all factors of 2 from b
b >>= ctz_b;
if a > b {
mem::swap(&mut a, &mut b);
}
b = b - a;
// SAFETY: `b` is checked to be non-zero.
unsafe {
if b == 0 {
break;
}
ctz_b = intrinsics::cttz_nonzero(b);
}
}
a << k
}
let gcd: usize = gcd(mem::size_of::<T>(), mem::size_of::<U>());
let ts: usize = mem::size_of::<U>() / gcd;
let us: usize = mem::size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
(us_len, ts_len)
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: See the `align_to_mut` method for the detailed safety comment.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
(
left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
)
}
}
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: Here we are ensuring we will use aligned pointers for U for the
// rest of the method. This is done by passing a pointer to &[T] with an
// alignment targeted for U.
// `crate::ptr::align_offset` is called with a correctly aligned and
// valid pointer `ptr` (it comes from a reference to `self`) and with
// a size that is a power of two (since it comes from the alignement for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
(
left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
)
}
}
}
/// Checks if the elements of this slice are sorted.
///
/// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
/// slice yields exactly zero or one element, `true` is returned.
///
/// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
/// implies that this function returns `false` if any two consecutive items are not
/// comparable.
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
/// let empty: [i32; 0] = [];
///
/// assert!([1, 2, 2, 9].is_sorted());
/// assert!(![1, 3, 2, 4].is_sorted());
/// assert!([0].is_sorted());
/// assert!(empty.is_sorted());
/// assert!(![0.0, 1.0, f32::NAN].is_sorted());
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted(&self) -> bool
where
T: PartialOrd,
{
self.is_sorted_by(|a, b| a.partial_cmp(b))
}
/// Checks if the elements of this slice are sorted using the given comparator function.
///
/// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
/// function to determine the ordering of two elements. Apart from that, it's equivalent to
/// [`is_sorted`]; see its documentation for more information.
///
/// [`is_sorted`]: #method.is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
where
F: FnMut(&T, &T) -> Option<Ordering>,
{
self.iter().is_sorted_by(|a, b| compare(*a, *b))
}
/// Checks if the elements of this slice are sorted using the given key extraction function.
///
/// Instead of comparing the slice's elements directly, this function compares the keys of the
/// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
/// documentation for more information.
///
/// [`is_sorted`]: #method.is_sorted
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
///
/// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
/// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
where
F: FnMut(&T) -> K,
K: PartialOrd,
{
self.iter().is_sorted_by_key(f)
}
/// Returns the index of the partition point according to the given predicate
/// (the index of the first element of the second partition).
///
/// The slice is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the slice
/// and all elements for which the predicate returns false are at the end.
/// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
/// (all odd numbers are at the start, all even at the end).
///
/// If this slice is not partitioned, the returned result is unspecified and meaningless,
/// as this method performs a kind of binary search.
///
/// # Examples
///
/// ```
/// #![feature(partition_point)]
///
/// let v = [1, 2, 3, 3, 5, 6, 7];
/// let i = v.partition_point(|&x| x < 5);
///
/// assert_eq!(i, 4);
/// assert!(v[..i].iter().all(|&x| x < 5));
/// assert!(v[i..].iter().all(|&x| !(x < 5)));
/// ```
#[unstable(feature = "partition_point", reason = "new API", issue = "73831")]
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
{
let mut left = 0;
let mut right = self.len();
while left != right {
let mid = left + (right - left) / 2;
// SAFETY: When `left < right`, `left <= mid < right`.
// Therefore `left` always increases and `right` always decreases,
// and either of them is selected. In both cases `left <= right` is
// satisfied. Therefore if `left < right` in a step, `left <= right`
// is satisfied in the next step. Therefore as long as `left != right`,
// `0 <= left < right <= len` is satisfied and if this case
// `0 <= mid < len` is satisfied too.
let value = unsafe { self.get_unchecked(mid) };
if pred(value) {
left = mid + 1;
} else {
right = mid;
}
}
left
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for &[T] {
/// Creates an empty slice.
fn default() -> Self {
&[]
}
}
#[stable(feature = "mut_slice_default", since = "1.5.0")]
impl<T> Default for &mut [T] {
/// Creates a mutable empty slice.
fn default() -> Self {
&mut []
}
}
|
//! Shadowsocks Server Context
use std::{
io,
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
#[cfg(feature = "local-dns-relay")]
use std::net::IpAddr;
use bloomfilter::Bloom;
#[cfg(feature = "local-dns-relay")]
use lru_time_cache::LruCache;
use spin::Mutex;
use tokio::runtime::Handle;
#[cfg(feature = "trust-dns")]
use trust_dns_resolver::TokioAsyncResolver;
#[cfg(feature = "trust-dns")]
use crate::relay::dns_resolver::create_resolver;
#[cfg(feature = "local-flow-stat")]
use crate::relay::flow::ServerFlowStatistic;
use crate::{
config::{Config, ConfigType, ServerConfig},
relay::{dns_resolver::resolve, socks5::Address},
};
// Entries for server's bloom filter
//
// Borrowed from shadowsocks-libev's default value
const BF_NUM_ENTRIES_FOR_SERVER: usize = 1_000_000;
// Entries for client's bloom filter
//
// Borrowed from shadowsocks-libev's default value
const BF_NUM_ENTRIES_FOR_CLIENT: usize = 10_000;
// Error rate for server's bloom filter
//
// Borrowed from shadowsocks-libev's default value
const BF_ERROR_RATE_FOR_SERVER: f64 = 1e-6;
// Error rate for client's bloom filter
//
// Borrowed from shadowsocks-libev's default value
const BF_ERROR_RATE_FOR_CLIENT: f64 = 1e-15;
// A bloom filter borrowed from shadowsocks-libev's `ppbloom`
//
// It contains 2 bloom filters and each one holds 1/2 entries.
// Use them as a ring buffer.
struct PingPongBloom {
blooms: [Bloom<[u8]>; 2],
bloom_count: [usize; 2],
item_count: usize,
current: usize,
}
impl PingPongBloom {
fn new(ty: ConfigType) -> PingPongBloom {
let (mut item_count, fp_p) = if ty.is_local() {
(BF_NUM_ENTRIES_FOR_CLIENT, BF_ERROR_RATE_FOR_CLIENT)
} else {
(BF_NUM_ENTRIES_FOR_SERVER, BF_ERROR_RATE_FOR_SERVER)
};
item_count /= 2;
PingPongBloom {
blooms: [
Bloom::new_for_fp_rate(item_count, fp_p),
Bloom::new_for_fp_rate(item_count, fp_p),
],
bloom_count: [0, 0],
item_count,
current: 0,
}
}
// Check if data in `buf` exist.
//
// Set into the current bloom filter if not exist.
//
// Return `true` if data exist in bloom filter.
fn check_and_set(&mut self, buf: &[u8]) -> bool {
for bloom in &self.blooms {
if bloom.check(buf) {
return true;
}
}
if self.bloom_count[self.current] >= self.item_count {
// Current bloom filter is full,
// Create a new one and use that one as current.
self.current = (self.current + 1) % 2;
self.bloom_count[self.current] = 0;
self.blooms[self.current].clear();
}
// Cannot be optimized by `check_and_set`
// Because we have to check every filters in `blooms` before `set`
self.blooms[self.current].set(buf);
self.bloom_count[self.current] += 1;
false
}
}
/// Server's global running status
///
/// Shared between UDP and TCP servers
pub struct ServerState {
#[cfg(feature = "trust-dns")]
dns_resolver: Option<TokioAsyncResolver>,
}
#[cfg(feature = "trust-dns")]
impl ServerState {
/// Create a global shared server state
pub async fn new_shared(config: &Config, rt: Handle) -> SharedServerState {
let state = ServerState {
dns_resolver: match create_resolver(config.get_dns_config(), config.timeout, config.ipv6_first, rt).await {
Ok(resolver) => Some(resolver),
Err(..) => None,
},
};
Arc::new(state)
}
/// Get the global shared resolver
pub fn dns_resolver(&self) -> Option<&TokioAsyncResolver> {
self.dns_resolver.as_ref()
}
}
#[cfg(not(feature = "trust-dns"))]
impl ServerState {
/// Create a global shared server state
pub async fn new_shared(_config: &Config, _rt: Handle) -> SharedServerState {
Arc::new(ServerState {})
}
}
/// `ServerState` wrapped in `Arc`
pub type SharedServerState = Arc<ServerState>;
/// Shared basic configuration for the whole server
pub struct Context {
config: Config,
// Shared variables for all servers
server_state: SharedServerState,
// Server's running indicator
// For killing all background jobs
server_running: AtomicBool,
// Check for duplicated IV/Nonce, for prevent replay attack
// https://github.com/shadowsocks/shadowsocks-org/issues/44
nonce_ppbloom: Mutex<PingPongBloom>,
// For Android's flow stat report
#[cfg(feature = "local-flow-stat")]
local_flow_statistic: ServerFlowStatistic,
// For DNS relay's ACL domain name reverse lookup -- whether the IP shall be forwarded
#[cfg(feature = "local-dns-relay")]
reverse_lookup_cache: Mutex<LruCache<IpAddr, bool>>,
}
/// Unique context thw whole server
pub type SharedContext = Arc<Context>;
impl Context {
/// Create a non-shared Context
fn new(config: Config, server_state: SharedServerState) -> Context {
let nonce_ppbloom = Mutex::new(PingPongBloom::new(config.config_type));
#[cfg(feature = "local-dns-relay")]
let reverse_lookup_cache = Mutex::new(LruCache::<IpAddr, bool>::with_capacity(8192));
Context {
config,
server_state,
server_running: AtomicBool::new(true),
nonce_ppbloom,
#[cfg(feature = "local-flow-stat")]
local_flow_statistic: ServerFlowStatistic::new(),
#[cfg(feature = "local-dns-relay")]
reverse_lookup_cache,
}
}
/// Create a shared Context, wrapped in `Arc`
pub fn new_shared(config: Config, server_state: SharedServerState) -> SharedContext {
SharedContext::new(Context::new(config, server_state))
}
/// Config for TCP server
pub fn config(&self) -> &Config {
&self.config
}
/// Clone the internal ServerState
pub fn clone_server_state(&self) -> SharedServerState {
self.server_state.clone()
}
/// Mutable Config for TCP server
///
/// NOTE: Only for launching plugins
pub fn config_mut(&mut self) -> &mut Config {
&mut self.config
}
/// Get ServerConfig by index
pub fn server_config(&self, idx: usize) -> &ServerConfig {
&self.config.server[idx]
}
/// Get mutable ServerConfig by index
pub fn server_config_mut(&mut self, idx: usize) -> &mut ServerConfig {
&mut self.config.server[idx]
}
#[cfg(feature = "trust-dns")]
/// Get the global shared resolver
pub fn dns_resolver(&self) -> Option<&TokioAsyncResolver> {
self.server_state.dns_resolver()
}
/// Perform a DNS resolution
pub async fn dns_resolve(&self, host: &str, port: u16) -> io::Result<Vec<SocketAddr>> {
resolve(self, host, port).await
}
/// Check if the server is still in running state
pub fn server_running(&self) -> bool {
self.server_running.load(Ordering::Acquire)
}
/// Stops the server, kills all detached running tasks
pub fn set_server_stopped(&self) {
self.server_running.store(false, Ordering::Release)
}
/// Check if nonce exist or not
///
/// If not, set into the current bloom filter
pub fn check_nonce_and_set(&self, nonce: &[u8]) -> bool {
// Plain cipher doesn't have a nonce
// Always treated as non-duplicated
if nonce.is_empty() {
return false;
}
let mut ppbloom = self.nonce_ppbloom.lock();
ppbloom.check_and_set(nonce)
}
/// Check client ACL (for server)
pub fn check_client_blocked(&self, addr: &SocketAddr) -> bool {
match self.config.acl {
None => false,
Some(ref a) => a.check_client_blocked(addr),
}
}
/// Check outbound address ACL (for server)
pub fn check_outbound_blocked(&self, addr: &Address) -> bool {
match self.config.acl {
None => false,
Some(ref a) => a.check_outbound_blocked(addr),
}
}
/// Check resolved outbound address ACL (for server)
pub fn check_resolved_outbound_blocked(&self, addr: &SocketAddr) -> bool {
match self.config.acl {
None => false,
Some(ref a) => a.check_resolved_outbound_blocked(addr),
}
}
/// Add a record to the reverse lookup cache
#[cfg(feature = "local-dns-relay")]
pub fn add_to_reverse_lookup_cache(&self, addr: &IpAddr, forward: bool) {
let mut reverse_lookup_cache = self.reverse_lookup_cache.lock();
reverse_lookup_cache.insert(addr.clone(), forward);
}
/// Check if domain name is in proxy_list.
/// If so, it should be resolved from remote (for Android's DNS relay)
pub fn check_qname_in_proxy_list(&self, qname: &Address) -> Option<bool> {
match self.config.acl {
// Proxy everything by default
None => None,
Some(ref a) => a.check_qname_in_proxy_list(qname),
}
}
#[cfg(feature = "local-dns-relay")]
pub fn check_ip_in_proxy_list(&self, ip: &IpAddr) -> bool {
match self.config.acl {
// Proxy everything by default
None => true,
Some(ref a) => {
// first check the IP list
if a.check_ip_in_proxy_list(ip) {
true
} else {
// do the reverse lookup in our local cache
let mut reverse_lookup_cache = self.reverse_lookup_cache.lock();
// if a qname is found
if let Some(forward) = reverse_lookup_cache.get(ip) {
// if qname is resolved by remote,
// we should proxy it as well
*forward
} else {
// by default not proxied
false
}
}
}
}
}
/// Check target address ACL (for client)
pub async fn check_target_bypassed(&self, target: &Address) -> bool {
match self.config.acl {
// Proxy everything by default
None => false,
Some(ref a) => {
#[cfg(feature = "local-dns-relay")]
{
if let Address::SocketAddress(ref saddr) = target {
// do the reverse lookup in our local cache
let mut reverse_lookup_cache = self.reverse_lookup_cache.lock();
// if a qname is found
if let Some(forward) = reverse_lookup_cache.get(&saddr.ip()) {
return !*forward
}
}
}
a.check_target_bypassed(self, target).await
}
}
}
/// Get client flow statistics
#[cfg(feature = "local-flow-stat")]
pub fn local_flow_statistic(&self) -> &ServerFlowStatistic {
&self.local_flow_statistic
}
}
Revert the compare order in check_ip_in_proxy_list
//! Shadowsocks Server Context
use std::{
io,
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
#[cfg(feature = "local-dns-relay")]
use std::net::IpAddr;
use bloomfilter::Bloom;
#[cfg(feature = "local-dns-relay")]
use lru_time_cache::LruCache;
use spin::Mutex;
use tokio::runtime::Handle;
#[cfg(feature = "trust-dns")]
use trust_dns_resolver::TokioAsyncResolver;
#[cfg(feature = "trust-dns")]
use crate::relay::dns_resolver::create_resolver;
#[cfg(feature = "local-flow-stat")]
use crate::relay::flow::ServerFlowStatistic;
use crate::{
config::{Config, ConfigType, ServerConfig},
relay::{dns_resolver::resolve, socks5::Address},
};
// Entries for server's bloom filter
//
// Borrowed from shadowsocks-libev's default value
const BF_NUM_ENTRIES_FOR_SERVER: usize = 1_000_000;
// Entries for client's bloom filter
//
// Borrowed from shadowsocks-libev's default value
const BF_NUM_ENTRIES_FOR_CLIENT: usize = 10_000;
// Error rate for server's bloom filter
//
// Borrowed from shadowsocks-libev's default value
const BF_ERROR_RATE_FOR_SERVER: f64 = 1e-6;
// Error rate for client's bloom filter
//
// Borrowed from shadowsocks-libev's default value
const BF_ERROR_RATE_FOR_CLIENT: f64 = 1e-15;
// A bloom filter borrowed from shadowsocks-libev's `ppbloom`
//
// It contains 2 bloom filters and each one holds 1/2 entries.
// Use them as a ring buffer.
struct PingPongBloom {
blooms: [Bloom<[u8]>; 2],
bloom_count: [usize; 2],
item_count: usize,
current: usize,
}
impl PingPongBloom {
fn new(ty: ConfigType) -> PingPongBloom {
let (mut item_count, fp_p) = if ty.is_local() {
(BF_NUM_ENTRIES_FOR_CLIENT, BF_ERROR_RATE_FOR_CLIENT)
} else {
(BF_NUM_ENTRIES_FOR_SERVER, BF_ERROR_RATE_FOR_SERVER)
};
item_count /= 2;
PingPongBloom {
blooms: [
Bloom::new_for_fp_rate(item_count, fp_p),
Bloom::new_for_fp_rate(item_count, fp_p),
],
bloom_count: [0, 0],
item_count,
current: 0,
}
}
// Check if data in `buf` exist.
//
// Set into the current bloom filter if not exist.
//
// Return `true` if data exist in bloom filter.
fn check_and_set(&mut self, buf: &[u8]) -> bool {
for bloom in &self.blooms {
if bloom.check(buf) {
return true;
}
}
if self.bloom_count[self.current] >= self.item_count {
// Current bloom filter is full,
// Create a new one and use that one as current.
self.current = (self.current + 1) % 2;
self.bloom_count[self.current] = 0;
self.blooms[self.current].clear();
}
// Cannot be optimized by `check_and_set`
// Because we have to check every filters in `blooms` before `set`
self.blooms[self.current].set(buf);
self.bloom_count[self.current] += 1;
false
}
}
/// Server's global running status
///
/// Shared between UDP and TCP servers
pub struct ServerState {
#[cfg(feature = "trust-dns")]
dns_resolver: Option<TokioAsyncResolver>,
}
#[cfg(feature = "trust-dns")]
impl ServerState {
/// Create a global shared server state
pub async fn new_shared(config: &Config, rt: Handle) -> SharedServerState {
let state = ServerState {
dns_resolver: match create_resolver(config.get_dns_config(), config.timeout, config.ipv6_first, rt).await {
Ok(resolver) => Some(resolver),
Err(..) => None,
},
};
Arc::new(state)
}
/// Get the global shared resolver
pub fn dns_resolver(&self) -> Option<&TokioAsyncResolver> {
self.dns_resolver.as_ref()
}
}
#[cfg(not(feature = "trust-dns"))]
impl ServerState {
/// Create a global shared server state
pub async fn new_shared(_config: &Config, _rt: Handle) -> SharedServerState {
Arc::new(ServerState {})
}
}
/// `ServerState` wrapped in `Arc`
pub type SharedServerState = Arc<ServerState>;
/// Shared basic configuration for the whole server
pub struct Context {
config: Config,
// Shared variables for all servers
server_state: SharedServerState,
// Server's running indicator
// For killing all background jobs
server_running: AtomicBool,
// Check for duplicated IV/Nonce, for prevent replay attack
// https://github.com/shadowsocks/shadowsocks-org/issues/44
nonce_ppbloom: Mutex<PingPongBloom>,
// For Android's flow stat report
#[cfg(feature = "local-flow-stat")]
local_flow_statistic: ServerFlowStatistic,
// For DNS relay's ACL domain name reverse lookup -- whether the IP shall be forwarded
#[cfg(feature = "local-dns-relay")]
reverse_lookup_cache: Mutex<LruCache<IpAddr, bool>>,
}
/// Unique context thw whole server
pub type SharedContext = Arc<Context>;
impl Context {
/// Create a non-shared Context
fn new(config: Config, server_state: SharedServerState) -> Context {
let nonce_ppbloom = Mutex::new(PingPongBloom::new(config.config_type));
#[cfg(feature = "local-dns-relay")]
let reverse_lookup_cache = Mutex::new(LruCache::<IpAddr, bool>::with_capacity(8192));
Context {
config,
server_state,
server_running: AtomicBool::new(true),
nonce_ppbloom,
#[cfg(feature = "local-flow-stat")]
local_flow_statistic: ServerFlowStatistic::new(),
#[cfg(feature = "local-dns-relay")]
reverse_lookup_cache,
}
}
/// Create a shared Context, wrapped in `Arc`
pub fn new_shared(config: Config, server_state: SharedServerState) -> SharedContext {
SharedContext::new(Context::new(config, server_state))
}
/// Config for TCP server
pub fn config(&self) -> &Config {
&self.config
}
/// Clone the internal ServerState
pub fn clone_server_state(&self) -> SharedServerState {
self.server_state.clone()
}
/// Mutable Config for TCP server
///
/// NOTE: Only for launching plugins
pub fn config_mut(&mut self) -> &mut Config {
&mut self.config
}
/// Get ServerConfig by index
pub fn server_config(&self, idx: usize) -> &ServerConfig {
&self.config.server[idx]
}
/// Get mutable ServerConfig by index
pub fn server_config_mut(&mut self, idx: usize) -> &mut ServerConfig {
&mut self.config.server[idx]
}
#[cfg(feature = "trust-dns")]
/// Get the global shared resolver
pub fn dns_resolver(&self) -> Option<&TokioAsyncResolver> {
self.server_state.dns_resolver()
}
/// Perform a DNS resolution
pub async fn dns_resolve(&self, host: &str, port: u16) -> io::Result<Vec<SocketAddr>> {
resolve(self, host, port).await
}
/// Check if the server is still in running state
pub fn server_running(&self) -> bool {
self.server_running.load(Ordering::Acquire)
}
/// Stops the server, kills all detached running tasks
pub fn set_server_stopped(&self) {
self.server_running.store(false, Ordering::Release)
}
/// Check if nonce exist or not
///
/// If not, set into the current bloom filter
pub fn check_nonce_and_set(&self, nonce: &[u8]) -> bool {
// Plain cipher doesn't have a nonce
// Always treated as non-duplicated
if nonce.is_empty() {
return false;
}
let mut ppbloom = self.nonce_ppbloom.lock();
ppbloom.check_and_set(nonce)
}
/// Check client ACL (for server)
pub fn check_client_blocked(&self, addr: &SocketAddr) -> bool {
match self.config.acl {
None => false,
Some(ref a) => a.check_client_blocked(addr),
}
}
/// Check outbound address ACL (for server)
pub fn check_outbound_blocked(&self, addr: &Address) -> bool {
match self.config.acl {
None => false,
Some(ref a) => a.check_outbound_blocked(addr),
}
}
/// Check resolved outbound address ACL (for server)
pub fn check_resolved_outbound_blocked(&self, addr: &SocketAddr) -> bool {
match self.config.acl {
None => false,
Some(ref a) => a.check_resolved_outbound_blocked(addr),
}
}
/// Add a record to the reverse lookup cache
#[cfg(feature = "local-dns-relay")]
pub fn add_to_reverse_lookup_cache(&self, addr: &IpAddr, forward: bool) {
let mut reverse_lookup_cache = self.reverse_lookup_cache.lock();
reverse_lookup_cache.insert(addr.clone(), forward);
}
/// Check if domain name is in proxy_list.
/// If so, it should be resolved from remote (for Android's DNS relay)
pub fn check_qname_in_proxy_list(&self, qname: &Address) -> Option<bool> {
match self.config.acl {
// Proxy everything by default
None => None,
Some(ref a) => a.check_qname_in_proxy_list(qname),
}
}
#[cfg(feature = "local-dns-relay")]
pub fn check_ip_in_proxy_list(&self, ip: &IpAddr) -> bool {
match self.config.acl {
// Proxy everything by default
None => true,
Some(ref a) => {
// do the reverse lookup in our local cache
let mut reverse_lookup_cache = self.reverse_lookup_cache.lock();
// if a qname is found
if let Some(forward) = reverse_lookup_cache.get(ip) {
// if qname is resolved by remote,
// we should proxy it as well
*forward
} else {
a.check_ip_in_proxy_list(ip)
}
}
}
}
/// Check target address ACL (for client)
pub async fn check_target_bypassed(&self, target: &Address) -> bool {
match self.config.acl {
// Proxy everything by default
None => false,
Some(ref a) => {
#[cfg(feature = "local-dns-relay")]
{
if let Address::SocketAddress(ref saddr) = target {
// do the reverse lookup in our local cache
let mut reverse_lookup_cache = self.reverse_lookup_cache.lock();
// if a qname is found
if let Some(forward) = reverse_lookup_cache.get(&saddr.ip()) {
return !*forward
}
}
}
a.check_target_bypassed(self, target).await
}
}
}
/// Get client flow statistics
#[cfg(feature = "local-flow-stat")]
pub fn local_flow_statistic(&self) -> &ServerFlowStatistic {
&self.local_flow_statistic
}
}
|
//! A `Context` is an opaque owner and manager of core global data.
#[llvm_versions(7.0..=latest)]
use crate::InlineAsmDialect;
use libc::c_void;
#[llvm_versions(4.0..7.0)]
use llvm_sys::core::LLVMConstInlineAsm;
#[llvm_versions(12.0..=latest)]
use llvm_sys::core::LLVMCreateTypeAttribute;
#[llvm_versions(7.0..=latest)]
use llvm_sys::core::LLVMGetInlineAsm;
#[llvm_versions(6.0..=latest)]
use llvm_sys::core::LLVMMetadataTypeInContext;
use llvm_sys::core::{
LLVMAppendBasicBlockInContext, LLVMConstStringInContext, LLVMConstStructInContext, LLVMContextCreate,
LLVMContextDispose, LLVMContextSetDiagnosticHandler, LLVMCreateBuilderInContext, LLVMCreateEnumAttribute,
LLVMCreateStringAttribute, LLVMDoubleTypeInContext, LLVMFP128TypeInContext, LLVMFloatTypeInContext,
LLVMGetGlobalContext, LLVMGetMDKindIDInContext, LLVMHalfTypeInContext, LLVMInsertBasicBlockInContext,
LLVMInt16TypeInContext, LLVMInt1TypeInContext, LLVMInt32TypeInContext, LLVMInt64TypeInContext,
LLVMInt8TypeInContext, LLVMIntTypeInContext, LLVMMDNodeInContext, LLVMMDStringInContext,
LLVMModuleCreateWithNameInContext, LLVMPPCFP128TypeInContext, LLVMStructCreateNamed, LLVMStructTypeInContext,
LLVMVoidTypeInContext, LLVMX86FP80TypeInContext,
};
use llvm_sys::ir_reader::LLVMParseIRInContext;
use llvm_sys::prelude::{LLVMContextRef, LLVMDiagnosticInfoRef, LLVMTypeRef, LLVMValueRef};
use llvm_sys::target::{LLVMIntPtrTypeForASInContext, LLVMIntPtrTypeInContext};
use once_cell::sync::Lazy;
use parking_lot::{Mutex, MutexGuard};
use crate::attributes::Attribute;
use crate::basic_block::BasicBlock;
use crate::builder::Builder;
use crate::memory_buffer::MemoryBuffer;
use crate::module::Module;
use crate::support::{to_c_str, LLVMString};
use crate::targets::TargetData;
#[llvm_versions(12.0..=latest)]
use crate::types::AnyTypeEnum;
#[llvm_versions(6.0..=latest)]
use crate::types::MetadataType;
use crate::types::{AsTypeRef, BasicTypeEnum, FloatType, FunctionType, IntType, StructType, VoidType};
use crate::values::{
AsValueRef, BasicMetadataValueEnum, BasicValueEnum, FunctionValue, MetadataValue, PointerValue, StructValue,
VectorValue,
};
use crate::AddressSpace;
#[cfg(feature = "internal-getters")]
use crate::LLVMReference;
use std::marker::PhantomData;
use std::mem::{forget, ManuallyDrop};
use std::ops::Deref;
use std::ptr;
use std::thread_local;
// The idea of using a Mutex<Context> here and a thread local'd MutexGuard<Context> in
// GLOBAL_CTX_LOCK is to ensure two things:
// 1) Only one thread has access to the global context at a time.
// 2) The thread has shared access across different points in the thread.
// This is still technically unsafe because another program in the same process
// could also be accessing the global context via the C API. `get_global` has been
// marked unsafe for this reason. Iff this isn't the case then this should be fully safe.
static GLOBAL_CTX: Lazy<Mutex<Context>> = Lazy::new(|| unsafe { Mutex::new(Context::new(LLVMGetGlobalContext())) });
thread_local! {
pub(crate) static GLOBAL_CTX_LOCK: Lazy<MutexGuard<'static, Context>> = Lazy::new(|| {
GLOBAL_CTX.lock()
});
}
/// This struct allows us to share method impls across Context and ContextRef types
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct UnsafeContext(pub(crate) LLVMContextRef);
impl UnsafeContext {
pub(crate) unsafe fn new(context: LLVMContextRef) -> Self {
assert!(!context.is_null());
UnsafeContext(context)
}
fn create_builder<'ctx>(&self) -> Builder<'ctx> {
unsafe { Builder::new(LLVMCreateBuilderInContext(self.0)) }
}
fn create_module<'ctx>(&self, name: &str) -> Module<'ctx> {
let c_string = to_c_str(name);
unsafe { Module::new(LLVMModuleCreateWithNameInContext(c_string.as_ptr(), self.0)) }
}
fn create_module_from_ir<'ctx>(&self, memory_buffer: MemoryBuffer) -> Result<Module<'ctx>, LLVMString> {
let mut module = ptr::null_mut();
let mut err_str = ptr::null_mut();
let code = unsafe { LLVMParseIRInContext(self.0, memory_buffer.memory_buffer, &mut module, &mut err_str) };
forget(memory_buffer);
if code == 0 {
unsafe {
return Ok(Module::new(module));
}
}
unsafe { Err(LLVMString::new(err_str)) }
}
fn create_inline_asm<'ctx>(
&self,
ty: FunctionType<'ctx>,
mut assembly: String,
mut constraints: String,
sideeffects: bool,
alignstack: bool,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))] dialect: Option<
InlineAsmDialect,
>,
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
can_throw: bool,
) -> PointerValue<'ctx> {
let value = unsafe {
LLVMGetInlineAsm(
ty.as_type_ref(),
assembly.as_mut_ptr() as *mut ::libc::c_char,
assembly.len(),
constraints.as_mut_ptr() as *mut ::libc::c_char,
constraints.len(),
sideeffects as i32,
alignstack as i32,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))]
dialect.unwrap_or(InlineAsmDialect::ATT).into(),
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
{
can_throw as i32
},
)
};
unsafe { PointerValue::new(value) }
}
fn void_type<'ctx>(&self) -> VoidType<'ctx> {
unsafe { VoidType::new(LLVMVoidTypeInContext(self.0)) }
}
fn bool_type<'ctx>(&self) -> IntType<'ctx> {
unsafe { IntType::new(LLVMInt1TypeInContext(self.0)) }
}
fn i8_type<'ctx>(&self) -> IntType<'ctx> {
unsafe { IntType::new(LLVMInt8TypeInContext(self.0)) }
}
fn i16_type<'ctx>(&self) -> IntType<'ctx> {
unsafe { IntType::new(LLVMInt16TypeInContext(self.0)) }
}
fn i32_type<'ctx>(&self) -> IntType<'ctx> {
unsafe { IntType::new(LLVMInt32TypeInContext(self.0)) }
}
fn i64_type<'ctx>(&self) -> IntType<'ctx> {
unsafe { IntType::new(LLVMInt64TypeInContext(self.0)) }
}
// TODO: Call LLVMInt128TypeInContext in applicable versions
fn i128_type<'ctx>(&self) -> IntType<'ctx> {
self.custom_width_int_type(128)
}
fn custom_width_int_type<'ctx>(&self, bits: u32) -> IntType<'ctx> {
unsafe { IntType::new(LLVMIntTypeInContext(self.0, bits)) }
}
#[llvm_versions(6.0..=latest)]
fn metadata_type<'ctx>(&self) -> MetadataType<'ctx> {
unsafe { MetadataType::new(LLVMMetadataTypeInContext(self.0)) }
}
fn ptr_sized_int_type<'ctx>(&self, target_data: &TargetData, address_space: Option<AddressSpace>) -> IntType<'ctx> {
let int_type_ptr = match address_space {
Some(address_space) => unsafe {
LLVMIntPtrTypeForASInContext(self.0, target_data.target_data, address_space as u32)
},
None => unsafe { LLVMIntPtrTypeInContext(self.0, target_data.target_data) },
};
unsafe { IntType::new(int_type_ptr) }
}
fn f16_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMHalfTypeInContext(self.0)) }
}
fn f32_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMFloatTypeInContext(self.0)) }
}
fn f64_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMDoubleTypeInContext(self.0)) }
}
fn x86_f80_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMX86FP80TypeInContext(self.0)) }
}
fn f128_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMFP128TypeInContext(self.0)) }
}
fn ppc_f128_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMPPCFP128TypeInContext(self.0)) }
}
fn struct_type<'ctx>(&self, field_types: &[BasicTypeEnum], packed: bool) -> StructType<'ctx> {
let mut field_types: Vec<LLVMTypeRef> = field_types.iter().map(|val| val.as_type_ref()).collect();
unsafe {
StructType::new(LLVMStructTypeInContext(
self.0,
field_types.as_mut_ptr(),
field_types.len() as u32,
packed as i32,
))
}
}
fn opaque_struct_type<'ctx>(&self, name: &str) -> StructType<'ctx> {
let c_string = to_c_str(name);
unsafe { StructType::new(LLVMStructCreateNamed(self.0, c_string.as_ptr())) }
}
fn const_struct<'ctx>(&self, values: &[BasicValueEnum], packed: bool) -> StructValue<'ctx> {
let mut args: Vec<LLVMValueRef> = values.iter().map(|val| val.as_value_ref()).collect();
unsafe {
StructValue::new(LLVMConstStructInContext(
self.0,
args.as_mut_ptr(),
args.len() as u32,
packed as i32,
))
}
}
fn append_basic_block<'ctx>(&self, function: FunctionValue<'ctx>, name: &str) -> BasicBlock<'ctx> {
let c_string = to_c_str(name);
unsafe {
BasicBlock::new(LLVMAppendBasicBlockInContext(
self.0,
function.as_value_ref(),
c_string.as_ptr(),
))
.expect("Appending basic block should never fail")
}
}
fn insert_basic_block_after<'ctx>(&self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
match basic_block.get_next_basic_block() {
Some(next_basic_block) => self.prepend_basic_block(next_basic_block, name),
None => {
let parent_fn = basic_block.get_parent().unwrap();
self.append_basic_block(parent_fn, name)
},
}
}
fn prepend_basic_block<'ctx>(&self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
let c_string = to_c_str(name);
unsafe {
BasicBlock::new(LLVMInsertBasicBlockInContext(
self.0,
basic_block.basic_block,
c_string.as_ptr(),
))
.expect("Prepending basic block should never fail")
}
}
fn metadata_node<'ctx>(&self, values: &[BasicMetadataValueEnum<'ctx>]) -> MetadataValue<'ctx> {
let mut tuple_values: Vec<LLVMValueRef> = values.iter().map(|val| val.as_value_ref()).collect();
unsafe {
MetadataValue::new(LLVMMDNodeInContext(
self.0,
tuple_values.as_mut_ptr(),
tuple_values.len() as u32,
))
}
}
fn metadata_string<'ctx>(&self, string: &str) -> MetadataValue<'ctx> {
let c_string = to_c_str(string);
unsafe { MetadataValue::new(LLVMMDStringInContext(self.0, c_string.as_ptr(), string.len() as u32)) }
}
fn get_kind_id(&self, key: &str) -> u32 {
unsafe { LLVMGetMDKindIDInContext(self.0, key.as_ptr() as *const ::libc::c_char, key.len() as u32) }
}
fn create_enum_attribute(&self, kind_id: u32, val: u64) -> Attribute {
unsafe { Attribute::new(LLVMCreateEnumAttribute(self.0, kind_id, val)) }
}
fn create_string_attribute(&self, key: &str, val: &str) -> Attribute {
unsafe {
Attribute::new(LLVMCreateStringAttribute(
self.0,
key.as_ptr() as *const _,
key.len() as u32,
val.as_ptr() as *const _,
val.len() as u32,
))
}
}
#[llvm_versions(12.0..=latest)]
fn create_type_attribute(&self, kind_id: u32, type_ref: AnyTypeEnum) -> Attribute {
unsafe { Attribute::new(LLVMCreateTypeAttribute(self.0, kind_id, type_ref.as_type_ref())) }
}
fn const_string<'ctx>(&self, string: &[u8], null_terminated: bool) -> VectorValue<'ctx> {
unsafe {
VectorValue::new(LLVMConstStringInContext(
self.0,
string.as_ptr() as *const ::libc::c_char,
string.len() as u32,
!null_terminated as i32,
))
}
}
fn set_diagnostic_handler(
&self,
handler: extern "C" fn(LLVMDiagnosticInfoRef, *mut c_void),
void_ptr: *mut c_void,
) {
unsafe { LLVMContextSetDiagnosticHandler(self.0, Some(handler), void_ptr) }
}
}
impl PartialEq<Context> for ContextRef<'_> {
fn eq(&self, other: &Context) -> bool {
self.context == other.context
}
}
impl PartialEq<ContextRef<'_>> for Context {
fn eq(&self, other: &ContextRef<'_>) -> bool {
self.context == other.context
}
}
/// A `Context` is a container for all LLVM entities including `Module`s.
///
/// A `Context` is not thread safe and cannot be shared across threads. Multiple `Context`s
/// can, however, execute on different threads simultaneously according to the LLVM docs.
#[derive(Debug, PartialEq, Eq)]
pub struct Context {
pub(crate) context: UnsafeContext,
}
unsafe impl Send for Context {}
impl Context {
pub(crate) unsafe fn new(context: LLVMContextRef) -> Self {
Context {
context: UnsafeContext::new(context),
}
}
/// Creates a new `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// ```
pub fn create() -> Self {
unsafe { Context::new(LLVMContextCreate()) }
}
/// Gets a `Mutex<Context>` which points to the global context singleton.
/// This function is marked unsafe because another program within the same
/// process could easily gain access to the same LLVM context pointer and bypass
/// our `Mutex`. Therefore, using `Context::create()` is the preferred context
/// creation function when you do not specifically need the global context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = unsafe {
/// Context::get_global(|_global_context| {
/// // do stuff
/// })
/// };
/// ```
pub unsafe fn get_global<F, R>(func: F) -> R
where
F: FnOnce(&Context) -> R,
{
GLOBAL_CTX_LOCK.with(|lazy| func(&*lazy))
}
/// Creates a new `Builder` for a `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// ```
#[inline]
pub fn create_builder(&self) -> Builder {
self.context.create_builder()
}
/// Creates a new `Module` for a `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// ```
#[inline]
pub fn create_module(&self, name: &str) -> Module {
self.context.create_module(name)
}
/// Creates a new `Module` for the current `Context` from a `MemoryBuffer`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// let builder = context.create_builder();
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_val = module.add_function("my_fn", fn_type, None);
/// let basic_block = context.append_basic_block(fn_val, "entry");
///
/// builder.position_at_end(basic_block);
/// builder.build_return(None);
///
/// let memory_buffer = module.write_bitcode_to_memory();
///
/// let module2 = context.create_module_from_ir(memory_buffer).unwrap();
/// ```
// REVIEW: I haven't yet been able to find docs or other wrappers that confirm, but my suspicion
// is that the method needs to take ownership of the MemoryBuffer... otherwise I see what looks like
// a double free in valgrind when the MemoryBuffer drops so we are `forget`ting MemoryBuffer here
// for now until we can confirm this is the correct thing to do
#[inline]
pub fn create_module_from_ir(&self, memory_buffer: MemoryBuffer) -> Result<Module, LLVMString> {
self.context.create_module_from_ir(memory_buffer)
}
/// Creates a inline asm function pointer.
///
/// # Example
/// ```no_run
/// use std::convert::TryFrom;
/// use inkwell::context::Context;
/// use inkwell::values::CallableValue;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// let builder = context.create_builder();
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_val = module.add_function("my_fn", fn_type, None);
/// let basic_block = context.append_basic_block(fn_val, "entry");
///
/// builder.position_at_end(basic_block);
/// let asm_fn = context.i64_type().fn_type(&[context.i64_type().into(), context.i64_type().into()], false);
/// let asm = context.create_inline_asm(
/// asm_fn,
/// "syscall".to_string(),
/// "=r,{rax},{rdi}".to_string(),
/// true,
/// false,
/// #[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))] None,
/// #[cfg(not(any(
/// feature = "llvm4-0",
/// feature = "llvm5-0",
/// feature = "llvm6-0",
/// feature = "llvm7-0",
/// feature = "llvm8-0",
/// feature = "llvm9-0",
/// feature = "llvm10-0",
/// feature = "llvm11-0",
/// feature = "llvm12-0"
/// )))]
/// false,
/// );
/// let params = &[context.i64_type().const_int(60, false).into(), context.i64_type().const_int(1, false).into()];
/// let callable_value = CallableValue::try_from(asm).unwrap();
/// builder.build_call(callable_value, params, "exit");
/// builder.build_return(None);
/// ```
#[inline]
pub fn create_inline_asm<'ctx>(
&'ctx self,
ty: FunctionType<'ctx>,
assembly: String,
constraints: String,
sideeffects: bool,
alignstack: bool,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))] dialect: Option<
InlineAsmDialect,
>,
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
can_throw: bool,
) -> PointerValue<'ctx> {
self.context.create_inline_asm(
ty,
assembly,
constraints,
sideeffects,
alignstack,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))]
dialect,
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
can_throw,
)
}
/// Gets the `VoidType`. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let void_type = context.void_type();
///
/// assert_eq!(void_type.get_context(), context);
/// ```
#[inline]
pub fn void_type(&self) -> VoidType {
self.context.void_type()
}
/// Gets the `IntType` representing 1 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let bool_type = context.bool_type();
///
/// assert_eq!(bool_type.get_bit_width(), 1);
/// assert_eq!(bool_type.get_context(), context);
/// ```
#[inline]
pub fn bool_type(&self) -> IntType {
self.context.bool_type()
}
/// Gets the `IntType` representing 8 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i8_type = context.i8_type();
///
/// assert_eq!(i8_type.get_bit_width(), 8);
/// assert_eq!(i8_type.get_context(), context);
/// ```
#[inline]
pub fn i8_type(&self) -> IntType {
self.context.i8_type()
}
/// Gets the `IntType` representing 16 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i16_type = context.i16_type();
///
/// assert_eq!(i16_type.get_bit_width(), 16);
/// assert_eq!(i16_type.get_context(), context);
/// ```
#[inline]
pub fn i16_type(&self) -> IntType {
self.context.i16_type()
}
/// Gets the `IntType` representing 32 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i32_type = context.i32_type();
///
/// assert_eq!(i32_type.get_bit_width(), 32);
/// assert_eq!(i32_type.get_context(), context);
/// ```
#[inline]
pub fn i32_type(&self) -> IntType {
self.context.i32_type()
}
/// Gets the `IntType` representing 64 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i64_type = context.i64_type();
///
/// assert_eq!(i64_type.get_bit_width(), 64);
/// assert_eq!(i64_type.get_context(), context);
/// ```
#[inline]
pub fn i64_type(&self) -> IntType {
self.context.i64_type()
}
/// Gets the `IntType` representing 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i128_type = context.i128_type();
///
/// assert_eq!(i128_type.get_bit_width(), 128);
/// assert_eq!(i128_type.get_context(), context);
/// ```
#[inline]
pub fn i128_type(&self) -> IntType {
self.context.i128_type()
}
/// Gets the `IntType` representing a custom bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i42_type = context.custom_width_int_type(42);
///
/// assert_eq!(i42_type.get_bit_width(), 42);
/// assert_eq!(i42_type.get_context(), context);
/// ```
#[inline]
pub fn custom_width_int_type(&self, bits: u32) -> IntType {
self.context.custom_width_int_type(bits)
}
/// Gets the `MetadataType` representing 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```
/// use inkwell::context::Context;
/// use inkwell::values::IntValue;
///
/// let context = Context::create();
/// let md_type = context.metadata_type();
///
/// assert_eq!(md_type.get_context(), context);
/// ```
#[inline]
#[llvm_versions(6.0..=latest)]
pub fn metadata_type(&self) -> MetadataType {
self.context.metadata_type()
}
/// Gets the `IntType` representing a bit width of a pointer. It will be assigned the referenced context.
///
/// # Example
///
/// ```no_run
/// use inkwell::OptimizationLevel;
/// use inkwell::context::Context;
/// use inkwell::targets::{InitializationConfig, Target};
///
/// Target::initialize_native(&InitializationConfig::default()).expect("Failed to initialize native target");
///
/// let context = Context::create();
/// let module = context.create_module("sum");
/// let execution_engine = module.create_jit_execution_engine(OptimizationLevel::None).unwrap();
/// let target_data = execution_engine.get_target_data();
/// let int_type = context.ptr_sized_int_type(&target_data, None);
/// ```
#[inline]
pub fn ptr_sized_int_type(&self, target_data: &TargetData, address_space: Option<AddressSpace>) -> IntType {
self.context.ptr_sized_int_type(target_data, address_space)
}
/// Gets the `FloatType` representing a 16 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f16_type = context.f16_type();
///
/// assert_eq!(f16_type.get_context(), context);
/// ```
#[inline]
pub fn f16_type(&self) -> FloatType {
self.context.f16_type()
}
/// Gets the `FloatType` representing a 32 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f32_type = context.f32_type();
///
/// assert_eq!(f32_type.get_context(), context);
/// ```
#[inline]
pub fn f32_type(&self) -> FloatType {
self.context.f32_type()
}
/// Gets the `FloatType` representing a 64 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f64_type = context.f64_type();
///
/// assert_eq!(f64_type.get_context(), context);
/// ```
#[inline]
pub fn f64_type(&self) -> FloatType {
self.context.f64_type()
}
/// Gets the `FloatType` representing a 80 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let x86_f80_type = context.x86_f80_type();
///
/// assert_eq!(x86_f80_type.get_context(), context);
/// ```
#[inline]
pub fn x86_f80_type(&self) -> FloatType {
self.context.x86_f80_type()
}
/// Gets the `FloatType` representing a 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f128_type = context.f128_type();
///
/// assert_eq!(f128_type.get_context(), context);
/// ```
// IEEE 754-2008’s binary128 floats according to https://internals.rust-lang.org/t/pre-rfc-introduction-of-half-and-quadruple-precision-floats-f16-and-f128/7521
#[inline]
pub fn f128_type(&self) -> FloatType {
self.context.f128_type()
}
/// Gets the `FloatType` representing a 128 bit width. It will be assigned the current context.
///
/// PPC is two 64 bits side by side rather than one single 128 bit float.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f128_type = context.ppc_f128_type();
///
/// assert_eq!(f128_type.get_context(), context);
/// ```
// Two 64 bits according to https://internals.rust-lang.org/t/pre-rfc-introduction-of-half-and-quadruple-precision-floats-f16-and-f128/7521
#[inline]
pub fn ppc_f128_type(&self) -> FloatType {
self.context.ppc_f128_type()
}
/// Creates a `StructType` definiton from heterogeneous types in the current `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let struct_type = context.struct_type(&[i16_type.into(), f32_type.into()], false);
///
/// assert_eq!(struct_type.get_field_types(), &[i16_type.into(), f32_type.into()]);
/// ```
// REVIEW: AnyType but VoidType? FunctionType?
#[inline]
pub fn struct_type(&self, field_types: &[BasicTypeEnum], packed: bool) -> StructType {
self.context.struct_type(field_types, packed)
}
/// Creates an opaque `StructType` with no type definition yet defined.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let struct_type = context.opaque_struct_type("my_struct");
///
/// assert_eq!(struct_type.get_field_types(), &[]);
/// ```
#[inline]
pub fn opaque_struct_type(&self, name: &str) -> StructType {
self.context.opaque_struct_type(name)
}
/// Creates a constant `StructValue` from constant values.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let f32_one = f32_type.const_float(1.);
/// let i16_two = i16_type.const_int(2, false);
/// let const_struct = context.const_struct(&[i16_two.into(), f32_one.into()], false);
///
/// assert_eq!(const_struct.get_type().get_field_types(), &[i16_type.into(), f32_type.into()]);
/// ```
#[inline]
pub fn const_struct(&self, values: &[BasicValueEnum], packed: bool) -> StructValue {
self.context.const_struct(values, packed)
}
/// Append a named `BasicBlock` at the end of the referenced `FunctionValue`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let last_basic_block = context.append_basic_block(fn_value, "last");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), entry_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), last_basic_block);
/// ```
#[inline]
pub fn append_basic_block<'ctx>(&'ctx self, function: FunctionValue<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.append_basic_block(function, name)
}
/// Append a named `BasicBlock` after the referenced `BasicBlock`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let last_basic_block = context.insert_basic_block_after(entry_basic_block, "last");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), entry_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), last_basic_block);
/// ```
// REVIEW: What happens when using these methods and the BasicBlock doesn't have a parent?
// Should they be callable at all? Needs testing to see what LLVM will do, I suppose. See below unwrap.
// Maybe need SubTypes: BasicBlock<HasParent>, BasicBlock<Orphan>?
#[inline]
pub fn insert_basic_block_after<'ctx>(&'ctx self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.insert_basic_block_after(basic_block, name)
}
/// Prepend a named `BasicBlock` before the referenced `BasicBlock`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let first_basic_block = context.prepend_basic_block(entry_basic_block, "first");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), first_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), entry_basic_block);
/// ```
#[inline]
pub fn prepend_basic_block<'ctx>(&'ctx self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.prepend_basic_block(basic_block, name)
}
/// Creates a `MetadataValue` tuple of heterogeneous types (a "Node") for the current context. It can be assigned to a value.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i8_type = context.i8_type();
/// let i8_two = i8_type.const_int(2, false);
/// let f32_type = context.f32_type();
/// let f32_zero = f32_type.const_float(0.);
/// let md_node = context.metadata_node(&[i8_two.into(), f32_zero.into()]);
/// let f32_one = f32_type.const_float(1.);
/// let void_type = context.void_type();
///
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let fn_type = void_type.fn_type(&[f32_type.into()], false);
/// let fn_value = module.add_function("my_func", fn_type, None);
/// let entry_block = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_block);
///
/// let ret_instr = builder.build_return(None);
///
/// assert!(md_node.is_node());
///
/// ret_instr.set_metadata(md_node, 0);
/// ```
// REVIEW: Maybe more helpful to beginners to call this metadata_tuple?
// REVIEW: Seems to be unassgned to anything
#[inline]
pub fn metadata_node<'ctx>(&'ctx self, values: &[BasicMetadataValueEnum<'ctx>]) -> MetadataValue<'ctx> {
self.context.metadata_node(values)
}
/// Creates a `MetadataValue` string for the current context. It can be assigned to a value.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let md_string = context.metadata_string("Floats are awesome!");
/// let f32_type = context.f32_type();
/// let f32_one = f32_type.const_float(1.);
/// let void_type = context.void_type();
///
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let fn_type = void_type.fn_type(&[f32_type.into()], false);
/// let fn_value = module.add_function("my_func", fn_type, None);
/// let entry_block = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_block);
///
/// let ret_instr = builder.build_return(None);
///
/// assert!(md_string.is_string());
///
/// ret_instr.set_metadata(md_string, 0);
/// ```
// REVIEW: Seems to be unassigned to anything
#[inline]
pub fn metadata_string(&self, string: &str) -> MetadataValue {
self.context.metadata_string(string)
}
/// Obtains the index of a metadata kind id. If the string doesn't exist, LLVM will add it at index `FIRST_CUSTOM_METADATA_KIND_ID` onward.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
/// use inkwell::values::FIRST_CUSTOM_METADATA_KIND_ID;
///
/// let context = Context::create();
///
/// assert_eq!(context.get_kind_id("dbg"), 0);
/// assert_eq!(context.get_kind_id("tbaa"), 1);
/// assert_eq!(context.get_kind_id("prof"), 2);
///
/// // Custom kind id doesn't exist in LLVM until now:
/// assert_eq!(context.get_kind_id("foo"), FIRST_CUSTOM_METADATA_KIND_ID);
/// ```
#[inline]
pub fn get_kind_id(&self, key: &str) -> u32 {
self.context.get_kind_id(key)
}
// LLVM 3.9+
// pub fn get_diagnostic_handler(&self) -> DiagnosticHandler {
// let handler = unsafe {
// LLVMContextGetDiagnosticHandler(self.context)
// };
// // REVIEW: Can this be null?
// DiagnosticHandler::new(handler)
// }
/// Creates an enum `Attribute` in this `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let enum_attribute = context.create_enum_attribute(0, 10);
///
/// assert!(enum_attribute.is_enum());
/// ```
#[inline]
pub fn create_enum_attribute(&self, kind_id: u32, val: u64) -> Attribute {
self.context.create_enum_attribute(kind_id, val)
}
/// Creates a string `Attribute` in this `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let string_attribute = context.create_string_attribute("my_key_123", "my_val");
///
/// assert!(string_attribute.is_string());
/// ```
#[inline]
pub fn create_string_attribute(&self, key: &str, val: &str) -> Attribute {
self.context.create_string_attribute(key, val)
}
/// Create an enum `Attribute` with an `AnyTypeEnum` attached to it.
///
/// # Example
/// ```rust
/// use inkwell::context::Context;
/// use inkwell::attributes::Attribute;
/// use inkwell::types::AnyType;
///
/// let context = Context::create();
/// let kind_id = Attribute::get_named_enum_kind_id("sret");
/// let any_type = context.i32_type().as_any_type_enum();
/// let type_attribute = context.create_type_attribute(
/// kind_id,
/// any_type,
/// );
///
/// assert!(type_attribute.is_type());
/// assert_eq!(type_attribute.get_type_value(), any_type);
/// assert_ne!(type_attribute.get_type_value(), context.i64_type().as_any_type_enum());
/// ```
#[inline]
#[llvm_versions(12.0..=latest)]
pub fn create_type_attribute(&self, kind_id: u32, type_ref: AnyTypeEnum) -> Attribute {
self.context.create_type_attribute(kind_id, type_ref)
}
/// Creates a const string which may be null terminated.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
/// use inkwell::values::AnyValue;
///
/// let context = Context::create();
/// let string = context.const_string(b"my_string", false);
///
/// assert_eq!(string.print_to_string().to_string(), "[9 x i8] c\"my_string\"");
/// ```
// SubTypes: Should return VectorValue<IntValue<i8>>
#[inline]
pub fn const_string(&self, string: &[u8], null_terminated: bool) -> VectorValue {
self.context.const_string(string, null_terminated)
}
#[inline]
pub(crate) fn set_diagnostic_handler(
&self,
handler: extern "C" fn(LLVMDiagnosticInfoRef, *mut c_void),
void_ptr: *mut c_void,
) {
self.context.set_diagnostic_handler(handler, void_ptr)
}
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
LLVMContextDispose(self.context.0);
}
}
}
#[cfg(feature = "internal-getters")]
impl LLVMReference<LLVMContextRef> for Context {
unsafe fn get_ref(&self) -> LLVMContextRef {
self.context
}
}
/// A `ContextRef` is a smart pointer allowing borrowed access to a type's `Context`.
#[derive(Debug, PartialEq, Eq)]
pub struct ContextRef<'ctx> {
context: UnsafeContext,
_marker: PhantomData<&'ctx Context>,
}
impl<'ctx> ContextRef<'ctx> {
pub(crate) unsafe fn new(context: LLVMContextRef) -> Self {
ContextRef {
context: UnsafeContext::new(context),
_marker: PhantomData,
}
}
/// Creates a new `Builder` for a `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// ```
#[inline]
pub fn create_builder(&self) -> Builder<'ctx> {
self.context.create_builder()
}
/// Creates a new `Module` for a `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// ```
#[inline]
pub fn create_module(&self, name: &str) -> Module<'ctx> {
self.context.create_module(name)
}
/// Creates a new `Module` for the current `Context` from a `MemoryBuffer`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// let builder = context.create_builder();
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_val = module.add_function("my_fn", fn_type, None);
/// let basic_block = context.append_basic_block(fn_val, "entry");
///
/// builder.position_at_end(basic_block);
/// builder.build_return(None);
///
/// let memory_buffer = module.write_bitcode_to_memory();
///
/// let module2 = context.create_module_from_ir(memory_buffer).unwrap();
/// ```
// REVIEW: I haven't yet been able to find docs or other wrappers that confirm, but my suspicion
// is that the method needs to take ownership of the MemoryBuffer... otherwise I see what looks like
// a double free in valgrind when the MemoryBuffer drops so we are `forget`ting MemoryBuffer here
// for now until we can confirm this is the correct thing to do
#[inline]
pub fn create_module_from_ir(&self, memory_buffer: MemoryBuffer) -> Result<Module<'ctx>, LLVMString> {
self.context.create_module_from_ir(memory_buffer)
}
/// Creates a inline asm function pointer.
///
/// # Example
/// ```no_run
/// use std::convert::TryFrom;
/// use inkwell::context::Context;
/// use inkwell::values::CallableValue;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// let builder = context.create_builder();
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_val = module.add_function("my_fn", fn_type, None);
/// let basic_block = context.append_basic_block(fn_val, "entry");
///
/// builder.position_at_end(basic_block);
/// let asm_fn = context.i64_type().fn_type(&[context.i64_type().into(), context.i64_type().into()], false);
/// let asm = context.create_inline_asm(
/// asm_fn,
/// "syscall".to_string(),
/// "=r,{rax},{rdi}".to_string(),
/// true,
/// false,
/// #[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))] None,
/// #[cfg(not(any(
/// feature = "llvm4-0",
/// feature = "llvm5-0",
/// feature = "llvm6-0",
/// feature = "llvm7-0",
/// feature = "llvm8-0",
/// feature = "llvm9-0",
/// feature = "llvm10-0",
/// feature = "llvm11-0",
/// feature = "llvm12-0"
/// )))]
/// false,
/// );
/// let params = &[context.i64_type().const_int(60, false).into(), context.i64_type().const_int(1, false).into()];
/// let callable_value = CallableValue::try_from(asm).unwrap();
/// builder.build_call(callable_value, params, "exit");
/// builder.build_return(None);
/// ```
#[inline]
pub fn create_inline_asm(
&self,
ty: FunctionType<'ctx>,
assembly: String,
constraints: String,
sideeffects: bool,
alignstack: bool,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))] dialect: Option<
InlineAsmDialect,
>,
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
can_throw: bool,
) -> PointerValue<'ctx> {
self.context.create_inline_asm(
ty,
assembly,
constraints,
sideeffects,
alignstack,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))]
dialect,
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
can_throw,
)
}
/// Gets the `VoidType`. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let void_type = context.void_type();
///
/// assert_eq!(void_type.get_context(), context);
/// ```
#[inline]
pub fn void_type(&self) -> VoidType<'ctx> {
self.context.void_type()
}
/// Gets the `IntType` representing 1 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let bool_type = context.bool_type();
///
/// assert_eq!(bool_type.get_bit_width(), 1);
/// assert_eq!(bool_type.get_context(), context);
/// ```
#[inline]
pub fn bool_type(&self) -> IntType<'ctx> {
self.context.bool_type()
}
/// Gets the `IntType` representing 8 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i8_type = context.i8_type();
///
/// assert_eq!(i8_type.get_bit_width(), 8);
/// assert_eq!(i8_type.get_context(), context);
/// ```
#[inline]
pub fn i8_type(&self) -> IntType<'ctx> {
self.context.i8_type()
}
/// Gets the `IntType` representing 16 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i16_type = context.i16_type();
///
/// assert_eq!(i16_type.get_bit_width(), 16);
/// assert_eq!(i16_type.get_context(), context);
/// ```
#[inline]
pub fn i16_type(&self) -> IntType<'ctx> {
self.context.i16_type()
}
/// Gets the `IntType` representing 32 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i32_type = context.i32_type();
///
/// assert_eq!(i32_type.get_bit_width(), 32);
/// assert_eq!(i32_type.get_context(), context);
/// ```
#[inline]
pub fn i32_type(&self) -> IntType<'ctx> {
self.context.i32_type()
}
/// Gets the `IntType` representing 64 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i64_type = context.i64_type();
///
/// assert_eq!(i64_type.get_bit_width(), 64);
/// assert_eq!(i64_type.get_context(), context);
/// ```
#[inline]
pub fn i64_type(&self) -> IntType<'ctx> {
self.context.i64_type()
}
/// Gets the `IntType` representing 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i128_type = context.i128_type();
///
/// assert_eq!(i128_type.get_bit_width(), 128);
/// assert_eq!(i128_type.get_context(), context);
/// ```
#[inline]
pub fn i128_type(&self) -> IntType<'ctx> {
self.context.i128_type()
}
/// Gets the `IntType` representing a custom bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i42_type = context.custom_width_int_type(42);
///
/// assert_eq!(i42_type.get_bit_width(), 42);
/// assert_eq!(i42_type.get_context(), context);
/// ```
#[inline]
pub fn custom_width_int_type(&self, bits: u32) -> IntType<'ctx> {
self.context.custom_width_int_type(bits)
}
/// Gets the `MetadataType` representing 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```
/// use inkwell::context::Context;
/// use inkwell::values::IntValue;
///
/// let context = Context::create();
/// let md_type = context.metadata_type();
///
/// assert_eq!(md_type.get_context(), context);
/// ```
#[inline]
#[llvm_versions(6.0..=latest)]
pub fn metadata_type(&self) -> MetadataType<'ctx> {
self.context.metadata_type()
}
/// Gets the `IntType` representing a bit width of a pointer. It will be assigned the referenced context.
///
/// # Example
///
/// ```no_run
/// use inkwell::OptimizationLevel;
/// use inkwell::context::Context;
/// use inkwell::targets::{InitializationConfig, Target};
///
/// Target::initialize_native(&InitializationConfig::default()).expect("Failed to initialize native target");
///
/// let context = Context::create();
/// let module = context.create_module("sum");
/// let execution_engine = module.create_jit_execution_engine(OptimizationLevel::None).unwrap();
/// let target_data = execution_engine.get_target_data();
/// let int_type = context.ptr_sized_int_type(&target_data, None);
/// ```
#[inline]
pub fn ptr_sized_int_type(&self, target_data: &TargetData, address_space: Option<AddressSpace>) -> IntType<'ctx> {
self.context.ptr_sized_int_type(target_data, address_space)
}
/// Gets the `FloatType` representing a 16 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f16_type = context.f16_type();
///
/// assert_eq!(f16_type.get_context(), context);
/// ```
#[inline]
pub fn f16_type(&self) -> FloatType<'ctx> {
self.context.f16_type()
}
/// Gets the `FloatType` representing a 32 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f32_type = context.f32_type();
///
/// assert_eq!(f32_type.get_context(), context);
/// ```
#[inline]
pub fn f32_type(&self) -> FloatType<'ctx> {
self.context.f32_type()
}
/// Gets the `FloatType` representing a 64 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f64_type = context.f64_type();
///
/// assert_eq!(f64_type.get_context(), context);
/// ```
#[inline]
pub fn f64_type(&self) -> FloatType<'ctx> {
self.context.f64_type()
}
/// Gets the `FloatType` representing a 80 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let x86_f80_type = context.x86_f80_type();
///
/// assert_eq!(x86_f80_type.get_context(), context);
/// ```
#[inline]
pub fn x86_f80_type(&self) -> FloatType<'ctx> {
self.context.x86_f80_type()
}
/// Gets the `FloatType` representing a 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f128_type = context.f128_type();
///
/// assert_eq!(f128_type.get_context(), context);
/// ```
// IEEE 754-2008’s binary128 floats according to https://internals.rust-lang.org/t/pre-rfc-introduction-of-half-and-quadruple-precision-floats-f16-and-f128/7521
#[inline]
pub fn f128_type(&self) -> FloatType<'ctx> {
self.context.f128_type()
}
/// Gets the `FloatType` representing a 128 bit width. It will be assigned the current context.
///
/// PPC is two 64 bits side by side rather than one single 128 bit float.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f128_type = context.ppc_f128_type();
///
/// assert_eq!(f128_type.get_context(), context);
/// ```
// Two 64 bits according to https://internals.rust-lang.org/t/pre-rfc-introduction-of-half-and-quadruple-precision-floats-f16-and-f128/7521
#[inline]
pub fn ppc_f128_type(&self) -> FloatType<'ctx> {
self.context.ppc_f128_type()
}
/// Creates a `StructType` definiton from heterogeneous types in the current `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let struct_type = context.struct_type(&[i16_type.into(), f32_type.into()], false);
///
/// assert_eq!(struct_type.get_field_types(), &[i16_type.into(), f32_type.into()]);
/// ```
// REVIEW: AnyType but VoidType? FunctionType?
#[inline]
pub fn struct_type(&self, field_types: &[BasicTypeEnum<'ctx>], packed: bool) -> StructType<'ctx> {
self.context.struct_type(field_types, packed)
}
/// Creates an opaque `StructType` with no type definition yet defined.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let struct_type = context.opaque_struct_type("my_struct");
///
/// assert_eq!(struct_type.get_field_types(), &[]);
/// ```
#[inline]
pub fn opaque_struct_type(&self, name: &str) -> StructType<'ctx> {
self.context.opaque_struct_type(name)
}
/// Creates a constant `StructValue` from constant values.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let f32_one = f32_type.const_float(1.);
/// let i16_two = i16_type.const_int(2, false);
/// let const_struct = context.const_struct(&[i16_two.into(), f32_one.into()], false);
///
/// assert_eq!(const_struct.get_type().get_field_types(), &[i16_type.into(), f32_type.into()]);
/// ```
#[inline]
pub fn const_struct(&self, values: &[BasicValueEnum<'ctx>], packed: bool) -> StructValue<'ctx> {
self.context.const_struct(values, packed)
}
/// Append a named `BasicBlock` at the end of the referenced `FunctionValue`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let last_basic_block = context.append_basic_block(fn_value, "last");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), entry_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), last_basic_block);
/// ```
#[inline]
pub fn append_basic_block(&self, function: FunctionValue<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.append_basic_block(function, name)
}
/// Append a named `BasicBlock` after the referenced `BasicBlock`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let last_basic_block = context.insert_basic_block_after(entry_basic_block, "last");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), entry_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), last_basic_block);
/// ```
// REVIEW: What happens when using these methods and the BasicBlock doesn't have a parent?
// Should they be callable at all? Needs testing to see what LLVM will do, I suppose. See below unwrap.
// Maybe need SubTypes: BasicBlock<HasParent>, BasicBlock<Orphan>?
#[inline]
pub fn insert_basic_block_after(&self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.insert_basic_block_after(basic_block, name)
}
/// Prepend a named `BasicBlock` before the referenced `BasicBlock`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let first_basic_block = context.prepend_basic_block(entry_basic_block, "first");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), first_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), entry_basic_block);
/// ```
#[inline]
pub fn prepend_basic_block(&self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.prepend_basic_block(basic_block, name)
}
/// Creates a `MetadataValue` tuple of heterogeneous types (a "Node") for the current context. It can be assigned to a value.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i8_type = context.i8_type();
/// let i8_two = i8_type.const_int(2, false);
/// let f32_type = context.f32_type();
/// let f32_zero = f32_type.const_float(0.);
/// let md_node = context.metadata_node(&[i8_two.into(), f32_zero.into()]);
/// let f32_one = f32_type.const_float(1.);
/// let void_type = context.void_type();
///
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let fn_type = void_type.fn_type(&[f32_type.into()], false);
/// let fn_value = module.add_function("my_func", fn_type, None);
/// let entry_block = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_block);
///
/// let ret_instr = builder.build_return(None);
///
/// assert!(md_node.is_node());
///
/// ret_instr.set_metadata(md_node, 0);
/// ```
// REVIEW: Maybe more helpful to beginners to call this metadata_tuple?
// REVIEW: Seems to be unassgned to anything
#[inline]
pub fn metadata_node(&self, values: &[BasicMetadataValueEnum<'ctx>]) -> MetadataValue<'ctx> {
self.context.metadata_node(values)
}
/// Creates a `MetadataValue` string for the current context. It can be assigned to a value.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let md_string = context.metadata_string("Floats are awesome!");
/// let f32_type = context.f32_type();
/// let f32_one = f32_type.const_float(1.);
/// let void_type = context.void_type();
///
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let fn_type = void_type.fn_type(&[f32_type.into()], false);
/// let fn_value = module.add_function("my_func", fn_type, None);
/// let entry_block = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_block);
///
/// let ret_instr = builder.build_return(None);
///
/// assert!(md_string.is_string());
///
/// ret_instr.set_metadata(md_string, 0);
/// ```
// REVIEW: Seems to be unassigned to anything
#[inline]
pub fn metadata_string(&self, string: &str) -> MetadataValue<'ctx> {
self.context.metadata_string(string)
}
/// Obtains the index of a metadata kind id. If the string doesn't exist, LLVM will add it at index `FIRST_CUSTOM_METADATA_KIND_ID` onward.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
/// use inkwell::values::FIRST_CUSTOM_METADATA_KIND_ID;
///
/// let context = Context::create();
///
/// assert_eq!(context.get_kind_id("dbg"), 0);
/// assert_eq!(context.get_kind_id("tbaa"), 1);
/// assert_eq!(context.get_kind_id("prof"), 2);
///
/// // Custom kind id doesn't exist in LLVM until now:
/// assert_eq!(context.get_kind_id("foo"), FIRST_CUSTOM_METADATA_KIND_ID);
/// ```
#[inline]
pub fn get_kind_id(&self, key: &str) -> u32 {
self.context.get_kind_id(key)
}
/// Creates an enum `Attribute` in this `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let enum_attribute = context.create_enum_attribute(0, 10);
///
/// assert!(enum_attribute.is_enum());
/// ```
#[inline]
pub fn create_enum_attribute(&self, kind_id: u32, val: u64) -> Attribute {
self.context.create_enum_attribute(kind_id, val)
}
/// Creates a string `Attribute` in this `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let string_attribute = context.create_string_attribute("my_key_123", "my_val");
///
/// assert!(string_attribute.is_string());
/// ```
#[inline]
pub fn create_string_attribute(&self, key: &str, val: &str) -> Attribute {
self.context.create_string_attribute(key, val)
}
/// Create an enum `Attribute` with an `AnyTypeEnum` attached to it.
///
/// # Example
/// ```rust
/// use inkwell::context::Context;
/// use inkwell::attributes::Attribute;
/// use inkwell::types::AnyType;
///
/// let context = Context::create();
/// let kind_id = Attribute::get_named_enum_kind_id("sret");
/// let any_type = context.i32_type().as_any_type_enum();
/// let type_attribute = context.create_type_attribute(
/// kind_id,
/// any_type,
/// );
///
/// assert!(type_attribute.is_type());
/// assert_eq!(type_attribute.get_type_value(), any_type);
/// assert_ne!(type_attribute.get_type_value(), context.i64_type().as_any_type_enum());
/// ```
#[inline]
#[llvm_versions(12.0..=latest)]
pub fn create_type_attribute(&self, kind_id: u32, type_ref: AnyTypeEnum) -> Attribute {
self.context.create_type_attribute(kind_id, type_ref)
}
/// Creates a const string which may be null terminated.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
/// use inkwell::values::AnyValue;
///
/// let context = Context::create();
/// let string = context.const_string(b"my_string", false);
///
/// assert_eq!(string.print_to_string().to_string(), "[9 x i8] c\"my_string\"");
/// ```
// SubTypes: Should return VectorValue<IntValue<i8>>
#[inline]
pub fn const_string(&self, string: &[u8], null_terminated: bool) -> VectorValue<'ctx> {
self.context.const_string(string, null_terminated)
}
#[inline]
pub(crate) fn set_diagnostic_handler(
&self,
handler: extern "C" fn(LLVMDiagnosticInfoRef, *mut c_void),
void_ptr: *mut c_void,
) {
self.context.set_diagnostic_handler(handler, void_ptr)
}
}
#[cfg(feature = "internal-getters")]
impl LLVMReference<LLVMContextRef> for ContextRef<'_> {
unsafe fn get_ref(&self) -> LLVMContextRef {
self.context
}
}
Fixed create_inline_asm cfgs
//! A `Context` is an opaque owner and manager of core global data.
#[llvm_versions(7.0..=latest)]
use crate::InlineAsmDialect;
use libc::c_void;
#[llvm_versions(4.0..7.0)]
use llvm_sys::core::LLVMConstInlineAsm;
#[llvm_versions(12.0..=latest)]
use llvm_sys::core::LLVMCreateTypeAttribute;
#[llvm_versions(7.0..=latest)]
use llvm_sys::core::LLVMGetInlineAsm;
#[llvm_versions(6.0..=latest)]
use llvm_sys::core::LLVMMetadataTypeInContext;
use llvm_sys::core::{
LLVMAppendBasicBlockInContext, LLVMConstStringInContext, LLVMConstStructInContext, LLVMContextCreate,
LLVMContextDispose, LLVMContextSetDiagnosticHandler, LLVMCreateBuilderInContext, LLVMCreateEnumAttribute,
LLVMCreateStringAttribute, LLVMDoubleTypeInContext, LLVMFP128TypeInContext, LLVMFloatTypeInContext,
LLVMGetGlobalContext, LLVMGetMDKindIDInContext, LLVMHalfTypeInContext, LLVMInsertBasicBlockInContext,
LLVMInt16TypeInContext, LLVMInt1TypeInContext, LLVMInt32TypeInContext, LLVMInt64TypeInContext,
LLVMInt8TypeInContext, LLVMIntTypeInContext, LLVMMDNodeInContext, LLVMMDStringInContext,
LLVMModuleCreateWithNameInContext, LLVMPPCFP128TypeInContext, LLVMStructCreateNamed, LLVMStructTypeInContext,
LLVMVoidTypeInContext, LLVMX86FP80TypeInContext,
};
use llvm_sys::ir_reader::LLVMParseIRInContext;
use llvm_sys::prelude::{LLVMContextRef, LLVMDiagnosticInfoRef, LLVMTypeRef, LLVMValueRef};
use llvm_sys::target::{LLVMIntPtrTypeForASInContext, LLVMIntPtrTypeInContext};
use once_cell::sync::Lazy;
use parking_lot::{Mutex, MutexGuard};
use crate::attributes::Attribute;
use crate::basic_block::BasicBlock;
use crate::builder::Builder;
use crate::memory_buffer::MemoryBuffer;
use crate::module::Module;
use crate::support::{to_c_str, LLVMString};
use crate::targets::TargetData;
#[llvm_versions(12.0..=latest)]
use crate::types::AnyTypeEnum;
#[llvm_versions(6.0..=latest)]
use crate::types::MetadataType;
use crate::types::{AsTypeRef, BasicTypeEnum, FloatType, FunctionType, IntType, StructType, VoidType};
use crate::values::{
AsValueRef, BasicMetadataValueEnum, BasicValueEnum, FunctionValue, MetadataValue, PointerValue, StructValue,
VectorValue,
};
use crate::AddressSpace;
#[cfg(feature = "internal-getters")]
use crate::LLVMReference;
use std::marker::PhantomData;
use std::mem::{forget, ManuallyDrop};
use std::ops::Deref;
use std::ptr;
use std::thread_local;
// The idea of using a Mutex<Context> here and a thread local'd MutexGuard<Context> in
// GLOBAL_CTX_LOCK is to ensure two things:
// 1) Only one thread has access to the global context at a time.
// 2) The thread has shared access across different points in the thread.
// This is still technically unsafe because another program in the same process
// could also be accessing the global context via the C API. `get_global` has been
// marked unsafe for this reason. Iff this isn't the case then this should be fully safe.
static GLOBAL_CTX: Lazy<Mutex<Context>> = Lazy::new(|| unsafe { Mutex::new(Context::new(LLVMGetGlobalContext())) });
thread_local! {
pub(crate) static GLOBAL_CTX_LOCK: Lazy<MutexGuard<'static, Context>> = Lazy::new(|| {
GLOBAL_CTX.lock()
});
}
/// This struct allows us to share method impls across Context and ContextRef types
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct ContextImpl(pub(crate) LLVMContextRef);
impl ContextImpl {
pub(crate) unsafe fn new(context: LLVMContextRef) -> Self {
assert!(!context.is_null());
ContextImpl(context)
}
fn create_builder<'ctx>(&self) -> Builder<'ctx> {
unsafe { Builder::new(LLVMCreateBuilderInContext(self.0)) }
}
fn create_module<'ctx>(&self, name: &str) -> Module<'ctx> {
let c_string = to_c_str(name);
unsafe { Module::new(LLVMModuleCreateWithNameInContext(c_string.as_ptr(), self.0)) }
}
fn create_module_from_ir<'ctx>(&self, memory_buffer: MemoryBuffer) -> Result<Module<'ctx>, LLVMString> {
let mut module = ptr::null_mut();
let mut err_str = ptr::null_mut();
let code = unsafe { LLVMParseIRInContext(self.0, memory_buffer.memory_buffer, &mut module, &mut err_str) };
forget(memory_buffer);
if code == 0 {
unsafe {
return Ok(Module::new(module));
}
}
unsafe { Err(LLVMString::new(err_str)) }
}
fn create_inline_asm<'ctx>(
&self,
ty: FunctionType<'ctx>,
mut assembly: String,
mut constraints: String,
sideeffects: bool,
alignstack: bool,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))] dialect: Option<
InlineAsmDialect,
>,
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
can_throw: bool,
) -> PointerValue<'ctx> {
#[cfg(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0"))]
let value = unsafe {
LLVMConstInlineAsm(
ty.as_type_ref(),
assembly.as_mut_ptr() as *mut ::libc::c_char,
assembly.len(),
constraints.as_mut_ptr() as *mut ::libc::c_char,
constraints.len(),
sideeffects as i32,
alignstack as i32,
)
};
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))]
let value = unsafe {
LLVMGetInlineAsm(
ty.as_type_ref(),
assembly.as_mut_ptr() as *mut ::libc::c_char,
assembly.len(),
constraints.as_mut_ptr() as *mut ::libc::c_char,
constraints.len(),
sideeffects as i32,
alignstack as i32,
dialect.unwrap_or(InlineAsmDialect::ATT).into(),
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
{
can_throw as i32
},
)
};
unsafe { PointerValue::new(value) }
}
fn void_type<'ctx>(&self) -> VoidType<'ctx> {
unsafe { VoidType::new(LLVMVoidTypeInContext(self.0)) }
}
fn bool_type<'ctx>(&self) -> IntType<'ctx> {
unsafe { IntType::new(LLVMInt1TypeInContext(self.0)) }
}
fn i8_type<'ctx>(&self) -> IntType<'ctx> {
unsafe { IntType::new(LLVMInt8TypeInContext(self.0)) }
}
fn i16_type<'ctx>(&self) -> IntType<'ctx> {
unsafe { IntType::new(LLVMInt16TypeInContext(self.0)) }
}
fn i32_type<'ctx>(&self) -> IntType<'ctx> {
unsafe { IntType::new(LLVMInt32TypeInContext(self.0)) }
}
fn i64_type<'ctx>(&self) -> IntType<'ctx> {
unsafe { IntType::new(LLVMInt64TypeInContext(self.0)) }
}
// TODO: Call LLVMInt128TypeInContext in applicable versions
fn i128_type<'ctx>(&self) -> IntType<'ctx> {
self.custom_width_int_type(128)
}
fn custom_width_int_type<'ctx>(&self, bits: u32) -> IntType<'ctx> {
unsafe { IntType::new(LLVMIntTypeInContext(self.0, bits)) }
}
#[llvm_versions(6.0..=latest)]
fn metadata_type<'ctx>(&self) -> MetadataType<'ctx> {
unsafe { MetadataType::new(LLVMMetadataTypeInContext(self.0)) }
}
fn ptr_sized_int_type<'ctx>(&self, target_data: &TargetData, address_space: Option<AddressSpace>) -> IntType<'ctx> {
let int_type_ptr = match address_space {
Some(address_space) => unsafe {
LLVMIntPtrTypeForASInContext(self.0, target_data.target_data, address_space as u32)
},
None => unsafe { LLVMIntPtrTypeInContext(self.0, target_data.target_data) },
};
unsafe { IntType::new(int_type_ptr) }
}
fn f16_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMHalfTypeInContext(self.0)) }
}
fn f32_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMFloatTypeInContext(self.0)) }
}
fn f64_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMDoubleTypeInContext(self.0)) }
}
fn x86_f80_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMX86FP80TypeInContext(self.0)) }
}
fn f128_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMFP128TypeInContext(self.0)) }
}
fn ppc_f128_type<'ctx>(&self) -> FloatType<'ctx> {
unsafe { FloatType::new(LLVMPPCFP128TypeInContext(self.0)) }
}
fn struct_type<'ctx>(&self, field_types: &[BasicTypeEnum], packed: bool) -> StructType<'ctx> {
let mut field_types: Vec<LLVMTypeRef> = field_types.iter().map(|val| val.as_type_ref()).collect();
unsafe {
StructType::new(LLVMStructTypeInContext(
self.0,
field_types.as_mut_ptr(),
field_types.len() as u32,
packed as i32,
))
}
}
fn opaque_struct_type<'ctx>(&self, name: &str) -> StructType<'ctx> {
let c_string = to_c_str(name);
unsafe { StructType::new(LLVMStructCreateNamed(self.0, c_string.as_ptr())) }
}
fn const_struct<'ctx>(&self, values: &[BasicValueEnum], packed: bool) -> StructValue<'ctx> {
let mut args: Vec<LLVMValueRef> = values.iter().map(|val| val.as_value_ref()).collect();
unsafe {
StructValue::new(LLVMConstStructInContext(
self.0,
args.as_mut_ptr(),
args.len() as u32,
packed as i32,
))
}
}
fn append_basic_block<'ctx>(&self, function: FunctionValue<'ctx>, name: &str) -> BasicBlock<'ctx> {
let c_string = to_c_str(name);
unsafe {
BasicBlock::new(LLVMAppendBasicBlockInContext(
self.0,
function.as_value_ref(),
c_string.as_ptr(),
))
.expect("Appending basic block should never fail")
}
}
fn insert_basic_block_after<'ctx>(&self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
match basic_block.get_next_basic_block() {
Some(next_basic_block) => self.prepend_basic_block(next_basic_block, name),
None => {
let parent_fn = basic_block.get_parent().unwrap();
self.append_basic_block(parent_fn, name)
},
}
}
fn prepend_basic_block<'ctx>(&self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
let c_string = to_c_str(name);
unsafe {
BasicBlock::new(LLVMInsertBasicBlockInContext(
self.0,
basic_block.basic_block,
c_string.as_ptr(),
))
.expect("Prepending basic block should never fail")
}
}
fn metadata_node<'ctx>(&self, values: &[BasicMetadataValueEnum<'ctx>]) -> MetadataValue<'ctx> {
let mut tuple_values: Vec<LLVMValueRef> = values.iter().map(|val| val.as_value_ref()).collect();
unsafe {
MetadataValue::new(LLVMMDNodeInContext(
self.0,
tuple_values.as_mut_ptr(),
tuple_values.len() as u32,
))
}
}
fn metadata_string<'ctx>(&self, string: &str) -> MetadataValue<'ctx> {
let c_string = to_c_str(string);
unsafe { MetadataValue::new(LLVMMDStringInContext(self.0, c_string.as_ptr(), string.len() as u32)) }
}
fn get_kind_id(&self, key: &str) -> u32 {
unsafe { LLVMGetMDKindIDInContext(self.0, key.as_ptr() as *const ::libc::c_char, key.len() as u32) }
}
fn create_enum_attribute(&self, kind_id: u32, val: u64) -> Attribute {
unsafe { Attribute::new(LLVMCreateEnumAttribute(self.0, kind_id, val)) }
}
fn create_string_attribute(&self, key: &str, val: &str) -> Attribute {
unsafe {
Attribute::new(LLVMCreateStringAttribute(
self.0,
key.as_ptr() as *const _,
key.len() as u32,
val.as_ptr() as *const _,
val.len() as u32,
))
}
}
#[llvm_versions(12.0..=latest)]
fn create_type_attribute(&self, kind_id: u32, type_ref: AnyTypeEnum) -> Attribute {
unsafe { Attribute::new(LLVMCreateTypeAttribute(self.0, kind_id, type_ref.as_type_ref())) }
}
fn const_string<'ctx>(&self, string: &[u8], null_terminated: bool) -> VectorValue<'ctx> {
unsafe {
VectorValue::new(LLVMConstStringInContext(
self.0,
string.as_ptr() as *const ::libc::c_char,
string.len() as u32,
!null_terminated as i32,
))
}
}
fn set_diagnostic_handler(
&self,
handler: extern "C" fn(LLVMDiagnosticInfoRef, *mut c_void),
void_ptr: *mut c_void,
) {
unsafe { LLVMContextSetDiagnosticHandler(self.0, Some(handler), void_ptr) }
}
}
impl PartialEq<Context> for ContextRef<'_> {
fn eq(&self, other: &Context) -> bool {
self.context == other.context
}
}
impl PartialEq<ContextRef<'_>> for Context {
fn eq(&self, other: &ContextRef<'_>) -> bool {
self.context == other.context
}
}
/// A `Context` is a container for all LLVM entities including `Module`s.
///
/// A `Context` is not thread safe and cannot be shared across threads. Multiple `Context`s
/// can, however, execute on different threads simultaneously according to the LLVM docs.
#[derive(Debug, PartialEq, Eq)]
pub struct Context {
pub(crate) context: ContextImpl,
}
unsafe impl Send for Context {}
impl Context {
pub(crate) unsafe fn new(context: LLVMContextRef) -> Self {
Context {
context: ContextImpl::new(context),
}
}
/// Creates a new `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// ```
pub fn create() -> Self {
unsafe { Context::new(LLVMContextCreate()) }
}
/// Gets a `Mutex<Context>` which points to the global context singleton.
/// This function is marked unsafe because another program within the same
/// process could easily gain access to the same LLVM context pointer and bypass
/// our `Mutex`. Therefore, using `Context::create()` is the preferred context
/// creation function when you do not specifically need the global context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = unsafe {
/// Context::get_global(|_global_context| {
/// // do stuff
/// })
/// };
/// ```
pub unsafe fn get_global<F, R>(func: F) -> R
where
F: FnOnce(&Context) -> R,
{
GLOBAL_CTX_LOCK.with(|lazy| func(&*lazy))
}
/// Creates a new `Builder` for a `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// ```
#[inline]
pub fn create_builder(&self) -> Builder {
self.context.create_builder()
}
/// Creates a new `Module` for a `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// ```
#[inline]
pub fn create_module(&self, name: &str) -> Module {
self.context.create_module(name)
}
/// Creates a new `Module` for the current `Context` from a `MemoryBuffer`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// let builder = context.create_builder();
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_val = module.add_function("my_fn", fn_type, None);
/// let basic_block = context.append_basic_block(fn_val, "entry");
///
/// builder.position_at_end(basic_block);
/// builder.build_return(None);
///
/// let memory_buffer = module.write_bitcode_to_memory();
///
/// let module2 = context.create_module_from_ir(memory_buffer).unwrap();
/// ```
// REVIEW: I haven't yet been able to find docs or other wrappers that confirm, but my suspicion
// is that the method needs to take ownership of the MemoryBuffer... otherwise I see what looks like
// a double free in valgrind when the MemoryBuffer drops so we are `forget`ting MemoryBuffer here
// for now until we can confirm this is the correct thing to do
#[inline]
pub fn create_module_from_ir(&self, memory_buffer: MemoryBuffer) -> Result<Module, LLVMString> {
self.context.create_module_from_ir(memory_buffer)
}
/// Creates a inline asm function pointer.
///
/// # Example
/// ```no_run
/// use std::convert::TryFrom;
/// use inkwell::context::Context;
/// use inkwell::values::CallableValue;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// let builder = context.create_builder();
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_val = module.add_function("my_fn", fn_type, None);
/// let basic_block = context.append_basic_block(fn_val, "entry");
///
/// builder.position_at_end(basic_block);
/// let asm_fn = context.i64_type().fn_type(&[context.i64_type().into(), context.i64_type().into()], false);
/// let asm = context.create_inline_asm(
/// asm_fn,
/// "syscall".to_string(),
/// "=r,{rax},{rdi}".to_string(),
/// true,
/// false,
/// #[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))] None,
/// #[cfg(not(any(
/// feature = "llvm4-0",
/// feature = "llvm5-0",
/// feature = "llvm6-0",
/// feature = "llvm7-0",
/// feature = "llvm8-0",
/// feature = "llvm9-0",
/// feature = "llvm10-0",
/// feature = "llvm11-0",
/// feature = "llvm12-0"
/// )))]
/// false,
/// );
/// let params = &[context.i64_type().const_int(60, false).into(), context.i64_type().const_int(1, false).into()];
/// let callable_value = CallableValue::try_from(asm).unwrap();
/// builder.build_call(callable_value, params, "exit");
/// builder.build_return(None);
/// ```
#[inline]
pub fn create_inline_asm<'ctx>(
&'ctx self,
ty: FunctionType<'ctx>,
assembly: String,
constraints: String,
sideeffects: bool,
alignstack: bool,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))] dialect: Option<
InlineAsmDialect,
>,
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
can_throw: bool,
) -> PointerValue<'ctx> {
self.context.create_inline_asm(
ty,
assembly,
constraints,
sideeffects,
alignstack,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))]
dialect,
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
can_throw,
)
}
/// Gets the `VoidType`. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let void_type = context.void_type();
///
/// assert_eq!(void_type.get_context(), context);
/// ```
#[inline]
pub fn void_type(&self) -> VoidType {
self.context.void_type()
}
/// Gets the `IntType` representing 1 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let bool_type = context.bool_type();
///
/// assert_eq!(bool_type.get_bit_width(), 1);
/// assert_eq!(bool_type.get_context(), context);
/// ```
#[inline]
pub fn bool_type(&self) -> IntType {
self.context.bool_type()
}
/// Gets the `IntType` representing 8 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i8_type = context.i8_type();
///
/// assert_eq!(i8_type.get_bit_width(), 8);
/// assert_eq!(i8_type.get_context(), context);
/// ```
#[inline]
pub fn i8_type(&self) -> IntType {
self.context.i8_type()
}
/// Gets the `IntType` representing 16 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i16_type = context.i16_type();
///
/// assert_eq!(i16_type.get_bit_width(), 16);
/// assert_eq!(i16_type.get_context(), context);
/// ```
#[inline]
pub fn i16_type(&self) -> IntType {
self.context.i16_type()
}
/// Gets the `IntType` representing 32 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i32_type = context.i32_type();
///
/// assert_eq!(i32_type.get_bit_width(), 32);
/// assert_eq!(i32_type.get_context(), context);
/// ```
#[inline]
pub fn i32_type(&self) -> IntType {
self.context.i32_type()
}
/// Gets the `IntType` representing 64 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i64_type = context.i64_type();
///
/// assert_eq!(i64_type.get_bit_width(), 64);
/// assert_eq!(i64_type.get_context(), context);
/// ```
#[inline]
pub fn i64_type(&self) -> IntType {
self.context.i64_type()
}
/// Gets the `IntType` representing 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i128_type = context.i128_type();
///
/// assert_eq!(i128_type.get_bit_width(), 128);
/// assert_eq!(i128_type.get_context(), context);
/// ```
#[inline]
pub fn i128_type(&self) -> IntType {
self.context.i128_type()
}
/// Gets the `IntType` representing a custom bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i42_type = context.custom_width_int_type(42);
///
/// assert_eq!(i42_type.get_bit_width(), 42);
/// assert_eq!(i42_type.get_context(), context);
/// ```
#[inline]
pub fn custom_width_int_type(&self, bits: u32) -> IntType {
self.context.custom_width_int_type(bits)
}
/// Gets the `MetadataType` representing 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```
/// use inkwell::context::Context;
/// use inkwell::values::IntValue;
///
/// let context = Context::create();
/// let md_type = context.metadata_type();
///
/// assert_eq!(md_type.get_context(), context);
/// ```
#[inline]
#[llvm_versions(6.0..=latest)]
pub fn metadata_type(&self) -> MetadataType {
self.context.metadata_type()
}
/// Gets the `IntType` representing a bit width of a pointer. It will be assigned the referenced context.
///
/// # Example
///
/// ```no_run
/// use inkwell::OptimizationLevel;
/// use inkwell::context::Context;
/// use inkwell::targets::{InitializationConfig, Target};
///
/// Target::initialize_native(&InitializationConfig::default()).expect("Failed to initialize native target");
///
/// let context = Context::create();
/// let module = context.create_module("sum");
/// let execution_engine = module.create_jit_execution_engine(OptimizationLevel::None).unwrap();
/// let target_data = execution_engine.get_target_data();
/// let int_type = context.ptr_sized_int_type(&target_data, None);
/// ```
#[inline]
pub fn ptr_sized_int_type(&self, target_data: &TargetData, address_space: Option<AddressSpace>) -> IntType {
self.context.ptr_sized_int_type(target_data, address_space)
}
/// Gets the `FloatType` representing a 16 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f16_type = context.f16_type();
///
/// assert_eq!(f16_type.get_context(), context);
/// ```
#[inline]
pub fn f16_type(&self) -> FloatType {
self.context.f16_type()
}
/// Gets the `FloatType` representing a 32 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f32_type = context.f32_type();
///
/// assert_eq!(f32_type.get_context(), context);
/// ```
#[inline]
pub fn f32_type(&self) -> FloatType {
self.context.f32_type()
}
/// Gets the `FloatType` representing a 64 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f64_type = context.f64_type();
///
/// assert_eq!(f64_type.get_context(), context);
/// ```
#[inline]
pub fn f64_type(&self) -> FloatType {
self.context.f64_type()
}
/// Gets the `FloatType` representing a 80 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let x86_f80_type = context.x86_f80_type();
///
/// assert_eq!(x86_f80_type.get_context(), context);
/// ```
#[inline]
pub fn x86_f80_type(&self) -> FloatType {
self.context.x86_f80_type()
}
/// Gets the `FloatType` representing a 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f128_type = context.f128_type();
///
/// assert_eq!(f128_type.get_context(), context);
/// ```
// IEEE 754-2008’s binary128 floats according to https://internals.rust-lang.org/t/pre-rfc-introduction-of-half-and-quadruple-precision-floats-f16-and-f128/7521
#[inline]
pub fn f128_type(&self) -> FloatType {
self.context.f128_type()
}
/// Gets the `FloatType` representing a 128 bit width. It will be assigned the current context.
///
/// PPC is two 64 bits side by side rather than one single 128 bit float.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f128_type = context.ppc_f128_type();
///
/// assert_eq!(f128_type.get_context(), context);
/// ```
// Two 64 bits according to https://internals.rust-lang.org/t/pre-rfc-introduction-of-half-and-quadruple-precision-floats-f16-and-f128/7521
#[inline]
pub fn ppc_f128_type(&self) -> FloatType {
self.context.ppc_f128_type()
}
/// Creates a `StructType` definiton from heterogeneous types in the current `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let struct_type = context.struct_type(&[i16_type.into(), f32_type.into()], false);
///
/// assert_eq!(struct_type.get_field_types(), &[i16_type.into(), f32_type.into()]);
/// ```
// REVIEW: AnyType but VoidType? FunctionType?
#[inline]
pub fn struct_type(&self, field_types: &[BasicTypeEnum], packed: bool) -> StructType {
self.context.struct_type(field_types, packed)
}
/// Creates an opaque `StructType` with no type definition yet defined.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let struct_type = context.opaque_struct_type("my_struct");
///
/// assert_eq!(struct_type.get_field_types(), &[]);
/// ```
#[inline]
pub fn opaque_struct_type(&self, name: &str) -> StructType {
self.context.opaque_struct_type(name)
}
/// Creates a constant `StructValue` from constant values.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let f32_one = f32_type.const_float(1.);
/// let i16_two = i16_type.const_int(2, false);
/// let const_struct = context.const_struct(&[i16_two.into(), f32_one.into()], false);
///
/// assert_eq!(const_struct.get_type().get_field_types(), &[i16_type.into(), f32_type.into()]);
/// ```
#[inline]
pub fn const_struct(&self, values: &[BasicValueEnum], packed: bool) -> StructValue {
self.context.const_struct(values, packed)
}
/// Append a named `BasicBlock` at the end of the referenced `FunctionValue`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let last_basic_block = context.append_basic_block(fn_value, "last");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), entry_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), last_basic_block);
/// ```
#[inline]
pub fn append_basic_block<'ctx>(&'ctx self, function: FunctionValue<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.append_basic_block(function, name)
}
/// Append a named `BasicBlock` after the referenced `BasicBlock`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let last_basic_block = context.insert_basic_block_after(entry_basic_block, "last");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), entry_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), last_basic_block);
/// ```
// REVIEW: What happens when using these methods and the BasicBlock doesn't have a parent?
// Should they be callable at all? Needs testing to see what LLVM will do, I suppose. See below unwrap.
// Maybe need SubTypes: BasicBlock<HasParent>, BasicBlock<Orphan>?
#[inline]
pub fn insert_basic_block_after<'ctx>(&'ctx self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.insert_basic_block_after(basic_block, name)
}
/// Prepend a named `BasicBlock` before the referenced `BasicBlock`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let first_basic_block = context.prepend_basic_block(entry_basic_block, "first");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), first_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), entry_basic_block);
/// ```
#[inline]
pub fn prepend_basic_block<'ctx>(&'ctx self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.prepend_basic_block(basic_block, name)
}
/// Creates a `MetadataValue` tuple of heterogeneous types (a "Node") for the current context. It can be assigned to a value.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i8_type = context.i8_type();
/// let i8_two = i8_type.const_int(2, false);
/// let f32_type = context.f32_type();
/// let f32_zero = f32_type.const_float(0.);
/// let md_node = context.metadata_node(&[i8_two.into(), f32_zero.into()]);
/// let f32_one = f32_type.const_float(1.);
/// let void_type = context.void_type();
///
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let fn_type = void_type.fn_type(&[f32_type.into()], false);
/// let fn_value = module.add_function("my_func", fn_type, None);
/// let entry_block = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_block);
///
/// let ret_instr = builder.build_return(None);
///
/// assert!(md_node.is_node());
///
/// ret_instr.set_metadata(md_node, 0);
/// ```
// REVIEW: Maybe more helpful to beginners to call this metadata_tuple?
// REVIEW: Seems to be unassgned to anything
#[inline]
pub fn metadata_node<'ctx>(&'ctx self, values: &[BasicMetadataValueEnum<'ctx>]) -> MetadataValue<'ctx> {
self.context.metadata_node(values)
}
/// Creates a `MetadataValue` string for the current context. It can be assigned to a value.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let md_string = context.metadata_string("Floats are awesome!");
/// let f32_type = context.f32_type();
/// let f32_one = f32_type.const_float(1.);
/// let void_type = context.void_type();
///
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let fn_type = void_type.fn_type(&[f32_type.into()], false);
/// let fn_value = module.add_function("my_func", fn_type, None);
/// let entry_block = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_block);
///
/// let ret_instr = builder.build_return(None);
///
/// assert!(md_string.is_string());
///
/// ret_instr.set_metadata(md_string, 0);
/// ```
// REVIEW: Seems to be unassigned to anything
#[inline]
pub fn metadata_string(&self, string: &str) -> MetadataValue {
self.context.metadata_string(string)
}
/// Obtains the index of a metadata kind id. If the string doesn't exist, LLVM will add it at index `FIRST_CUSTOM_METADATA_KIND_ID` onward.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
/// use inkwell::values::FIRST_CUSTOM_METADATA_KIND_ID;
///
/// let context = Context::create();
///
/// assert_eq!(context.get_kind_id("dbg"), 0);
/// assert_eq!(context.get_kind_id("tbaa"), 1);
/// assert_eq!(context.get_kind_id("prof"), 2);
///
/// // Custom kind id doesn't exist in LLVM until now:
/// assert_eq!(context.get_kind_id("foo"), FIRST_CUSTOM_METADATA_KIND_ID);
/// ```
#[inline]
pub fn get_kind_id(&self, key: &str) -> u32 {
self.context.get_kind_id(key)
}
// LLVM 3.9+
// pub fn get_diagnostic_handler(&self) -> DiagnosticHandler {
// let handler = unsafe {
// LLVMContextGetDiagnosticHandler(self.context)
// };
// // REVIEW: Can this be null?
// DiagnosticHandler::new(handler)
// }
/// Creates an enum `Attribute` in this `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let enum_attribute = context.create_enum_attribute(0, 10);
///
/// assert!(enum_attribute.is_enum());
/// ```
#[inline]
pub fn create_enum_attribute(&self, kind_id: u32, val: u64) -> Attribute {
self.context.create_enum_attribute(kind_id, val)
}
/// Creates a string `Attribute` in this `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let string_attribute = context.create_string_attribute("my_key_123", "my_val");
///
/// assert!(string_attribute.is_string());
/// ```
#[inline]
pub fn create_string_attribute(&self, key: &str, val: &str) -> Attribute {
self.context.create_string_attribute(key, val)
}
/// Create an enum `Attribute` with an `AnyTypeEnum` attached to it.
///
/// # Example
/// ```rust
/// use inkwell::context::Context;
/// use inkwell::attributes::Attribute;
/// use inkwell::types::AnyType;
///
/// let context = Context::create();
/// let kind_id = Attribute::get_named_enum_kind_id("sret");
/// let any_type = context.i32_type().as_any_type_enum();
/// let type_attribute = context.create_type_attribute(
/// kind_id,
/// any_type,
/// );
///
/// assert!(type_attribute.is_type());
/// assert_eq!(type_attribute.get_type_value(), any_type);
/// assert_ne!(type_attribute.get_type_value(), context.i64_type().as_any_type_enum());
/// ```
#[inline]
#[llvm_versions(12.0..=latest)]
pub fn create_type_attribute(&self, kind_id: u32, type_ref: AnyTypeEnum) -> Attribute {
self.context.create_type_attribute(kind_id, type_ref)
}
/// Creates a const string which may be null terminated.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
/// use inkwell::values::AnyValue;
///
/// let context = Context::create();
/// let string = context.const_string(b"my_string", false);
///
/// assert_eq!(string.print_to_string().to_string(), "[9 x i8] c\"my_string\"");
/// ```
// SubTypes: Should return VectorValue<IntValue<i8>>
#[inline]
pub fn const_string(&self, string: &[u8], null_terminated: bool) -> VectorValue {
self.context.const_string(string, null_terminated)
}
#[inline]
pub(crate) fn set_diagnostic_handler(
&self,
handler: extern "C" fn(LLVMDiagnosticInfoRef, *mut c_void),
void_ptr: *mut c_void,
) {
self.context.set_diagnostic_handler(handler, void_ptr)
}
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
LLVMContextDispose(self.context.0);
}
}
}
#[cfg(feature = "internal-getters")]
impl LLVMReference<LLVMContextRef> for Context {
unsafe fn get_ref(&self) -> LLVMContextRef {
self.context
}
}
/// A `ContextRef` is a smart pointer allowing borrowed access to a type's `Context`.
#[derive(Debug, PartialEq, Eq)]
pub struct ContextRef<'ctx> {
context: ContextImpl,
_marker: PhantomData<&'ctx Context>,
}
impl<'ctx> ContextRef<'ctx> {
pub(crate) unsafe fn new(context: LLVMContextRef) -> Self {
ContextRef {
context: ContextImpl::new(context),
_marker: PhantomData,
}
}
/// Creates a new `Builder` for a `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// ```
#[inline]
pub fn create_builder(&self) -> Builder<'ctx> {
self.context.create_builder()
}
/// Creates a new `Module` for a `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// ```
#[inline]
pub fn create_module(&self, name: &str) -> Module<'ctx> {
self.context.create_module(name)
}
/// Creates a new `Module` for the current `Context` from a `MemoryBuffer`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// let builder = context.create_builder();
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_val = module.add_function("my_fn", fn_type, None);
/// let basic_block = context.append_basic_block(fn_val, "entry");
///
/// builder.position_at_end(basic_block);
/// builder.build_return(None);
///
/// let memory_buffer = module.write_bitcode_to_memory();
///
/// let module2 = context.create_module_from_ir(memory_buffer).unwrap();
/// ```
// REVIEW: I haven't yet been able to find docs or other wrappers that confirm, but my suspicion
// is that the method needs to take ownership of the MemoryBuffer... otherwise I see what looks like
// a double free in valgrind when the MemoryBuffer drops so we are `forget`ting MemoryBuffer here
// for now until we can confirm this is the correct thing to do
#[inline]
pub fn create_module_from_ir(&self, memory_buffer: MemoryBuffer) -> Result<Module<'ctx>, LLVMString> {
self.context.create_module_from_ir(memory_buffer)
}
/// Creates a inline asm function pointer.
///
/// # Example
/// ```no_run
/// use std::convert::TryFrom;
/// use inkwell::context::Context;
/// use inkwell::values::CallableValue;
///
/// let context = Context::create();
/// let module = context.create_module("my_module");
/// let builder = context.create_builder();
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_val = module.add_function("my_fn", fn_type, None);
/// let basic_block = context.append_basic_block(fn_val, "entry");
///
/// builder.position_at_end(basic_block);
/// let asm_fn = context.i64_type().fn_type(&[context.i64_type().into(), context.i64_type().into()], false);
/// let asm = context.create_inline_asm(
/// asm_fn,
/// "syscall".to_string(),
/// "=r,{rax},{rdi}".to_string(),
/// true,
/// false,
/// #[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))] None,
/// #[cfg(not(any(
/// feature = "llvm4-0",
/// feature = "llvm5-0",
/// feature = "llvm6-0",
/// feature = "llvm7-0",
/// feature = "llvm8-0",
/// feature = "llvm9-0",
/// feature = "llvm10-0",
/// feature = "llvm11-0",
/// feature = "llvm12-0"
/// )))]
/// false,
/// );
/// let params = &[context.i64_type().const_int(60, false).into(), context.i64_type().const_int(1, false).into()];
/// let callable_value = CallableValue::try_from(asm).unwrap();
/// builder.build_call(callable_value, params, "exit");
/// builder.build_return(None);
/// ```
#[inline]
pub fn create_inline_asm(
&self,
ty: FunctionType<'ctx>,
assembly: String,
constraints: String,
sideeffects: bool,
alignstack: bool,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))] dialect: Option<
InlineAsmDialect,
>,
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
can_throw: bool,
) -> PointerValue<'ctx> {
self.context.create_inline_asm(
ty,
assembly,
constraints,
sideeffects,
alignstack,
#[cfg(not(any(feature = "llvm4-0", feature = "llvm5-0", feature = "llvm6-0")))]
dialect,
#[cfg(not(any(
feature = "llvm4-0",
feature = "llvm5-0",
feature = "llvm6-0",
feature = "llvm7-0",
feature = "llvm8-0",
feature = "llvm9-0",
feature = "llvm10-0",
feature = "llvm11-0",
feature = "llvm12-0"
)))]
can_throw,
)
}
/// Gets the `VoidType`. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let void_type = context.void_type();
///
/// assert_eq!(void_type.get_context(), context);
/// ```
#[inline]
pub fn void_type(&self) -> VoidType<'ctx> {
self.context.void_type()
}
/// Gets the `IntType` representing 1 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let bool_type = context.bool_type();
///
/// assert_eq!(bool_type.get_bit_width(), 1);
/// assert_eq!(bool_type.get_context(), context);
/// ```
#[inline]
pub fn bool_type(&self) -> IntType<'ctx> {
self.context.bool_type()
}
/// Gets the `IntType` representing 8 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i8_type = context.i8_type();
///
/// assert_eq!(i8_type.get_bit_width(), 8);
/// assert_eq!(i8_type.get_context(), context);
/// ```
#[inline]
pub fn i8_type(&self) -> IntType<'ctx> {
self.context.i8_type()
}
/// Gets the `IntType` representing 16 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i16_type = context.i16_type();
///
/// assert_eq!(i16_type.get_bit_width(), 16);
/// assert_eq!(i16_type.get_context(), context);
/// ```
#[inline]
pub fn i16_type(&self) -> IntType<'ctx> {
self.context.i16_type()
}
/// Gets the `IntType` representing 32 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i32_type = context.i32_type();
///
/// assert_eq!(i32_type.get_bit_width(), 32);
/// assert_eq!(i32_type.get_context(), context);
/// ```
#[inline]
pub fn i32_type(&self) -> IntType<'ctx> {
self.context.i32_type()
}
/// Gets the `IntType` representing 64 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i64_type = context.i64_type();
///
/// assert_eq!(i64_type.get_bit_width(), 64);
/// assert_eq!(i64_type.get_context(), context);
/// ```
#[inline]
pub fn i64_type(&self) -> IntType<'ctx> {
self.context.i64_type()
}
/// Gets the `IntType` representing 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i128_type = context.i128_type();
///
/// assert_eq!(i128_type.get_bit_width(), 128);
/// assert_eq!(i128_type.get_context(), context);
/// ```
#[inline]
pub fn i128_type(&self) -> IntType<'ctx> {
self.context.i128_type()
}
/// Gets the `IntType` representing a custom bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i42_type = context.custom_width_int_type(42);
///
/// assert_eq!(i42_type.get_bit_width(), 42);
/// assert_eq!(i42_type.get_context(), context);
/// ```
#[inline]
pub fn custom_width_int_type(&self, bits: u32) -> IntType<'ctx> {
self.context.custom_width_int_type(bits)
}
/// Gets the `MetadataType` representing 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```
/// use inkwell::context::Context;
/// use inkwell::values::IntValue;
///
/// let context = Context::create();
/// let md_type = context.metadata_type();
///
/// assert_eq!(md_type.get_context(), context);
/// ```
#[inline]
#[llvm_versions(6.0..=latest)]
pub fn metadata_type(&self) -> MetadataType<'ctx> {
self.context.metadata_type()
}
/// Gets the `IntType` representing a bit width of a pointer. It will be assigned the referenced context.
///
/// # Example
///
/// ```no_run
/// use inkwell::OptimizationLevel;
/// use inkwell::context::Context;
/// use inkwell::targets::{InitializationConfig, Target};
///
/// Target::initialize_native(&InitializationConfig::default()).expect("Failed to initialize native target");
///
/// let context = Context::create();
/// let module = context.create_module("sum");
/// let execution_engine = module.create_jit_execution_engine(OptimizationLevel::None).unwrap();
/// let target_data = execution_engine.get_target_data();
/// let int_type = context.ptr_sized_int_type(&target_data, None);
/// ```
#[inline]
pub fn ptr_sized_int_type(&self, target_data: &TargetData, address_space: Option<AddressSpace>) -> IntType<'ctx> {
self.context.ptr_sized_int_type(target_data, address_space)
}
/// Gets the `FloatType` representing a 16 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f16_type = context.f16_type();
///
/// assert_eq!(f16_type.get_context(), context);
/// ```
#[inline]
pub fn f16_type(&self) -> FloatType<'ctx> {
self.context.f16_type()
}
/// Gets the `FloatType` representing a 32 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f32_type = context.f32_type();
///
/// assert_eq!(f32_type.get_context(), context);
/// ```
#[inline]
pub fn f32_type(&self) -> FloatType<'ctx> {
self.context.f32_type()
}
/// Gets the `FloatType` representing a 64 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f64_type = context.f64_type();
///
/// assert_eq!(f64_type.get_context(), context);
/// ```
#[inline]
pub fn f64_type(&self) -> FloatType<'ctx> {
self.context.f64_type()
}
/// Gets the `FloatType` representing a 80 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let x86_f80_type = context.x86_f80_type();
///
/// assert_eq!(x86_f80_type.get_context(), context);
/// ```
#[inline]
pub fn x86_f80_type(&self) -> FloatType<'ctx> {
self.context.x86_f80_type()
}
/// Gets the `FloatType` representing a 128 bit width. It will be assigned the current context.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f128_type = context.f128_type();
///
/// assert_eq!(f128_type.get_context(), context);
/// ```
// IEEE 754-2008’s binary128 floats according to https://internals.rust-lang.org/t/pre-rfc-introduction-of-half-and-quadruple-precision-floats-f16-and-f128/7521
#[inline]
pub fn f128_type(&self) -> FloatType<'ctx> {
self.context.f128_type()
}
/// Gets the `FloatType` representing a 128 bit width. It will be assigned the current context.
///
/// PPC is two 64 bits side by side rather than one single 128 bit float.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
///
/// let f128_type = context.ppc_f128_type();
///
/// assert_eq!(f128_type.get_context(), context);
/// ```
// Two 64 bits according to https://internals.rust-lang.org/t/pre-rfc-introduction-of-half-and-quadruple-precision-floats-f16-and-f128/7521
#[inline]
pub fn ppc_f128_type(&self) -> FloatType<'ctx> {
self.context.ppc_f128_type()
}
/// Creates a `StructType` definiton from heterogeneous types in the current `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let struct_type = context.struct_type(&[i16_type.into(), f32_type.into()], false);
///
/// assert_eq!(struct_type.get_field_types(), &[i16_type.into(), f32_type.into()]);
/// ```
// REVIEW: AnyType but VoidType? FunctionType?
#[inline]
pub fn struct_type(&self, field_types: &[BasicTypeEnum<'ctx>], packed: bool) -> StructType<'ctx> {
self.context.struct_type(field_types, packed)
}
/// Creates an opaque `StructType` with no type definition yet defined.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let struct_type = context.opaque_struct_type("my_struct");
///
/// assert_eq!(struct_type.get_field_types(), &[]);
/// ```
#[inline]
pub fn opaque_struct_type(&self, name: &str) -> StructType<'ctx> {
self.context.opaque_struct_type(name)
}
/// Creates a constant `StructValue` from constant values.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let f32_type = context.f32_type();
/// let i16_type = context.i16_type();
/// let f32_one = f32_type.const_float(1.);
/// let i16_two = i16_type.const_int(2, false);
/// let const_struct = context.const_struct(&[i16_two.into(), f32_one.into()], false);
///
/// assert_eq!(const_struct.get_type().get_field_types(), &[i16_type.into(), f32_type.into()]);
/// ```
#[inline]
pub fn const_struct(&self, values: &[BasicValueEnum<'ctx>], packed: bool) -> StructValue<'ctx> {
self.context.const_struct(values, packed)
}
/// Append a named `BasicBlock` at the end of the referenced `FunctionValue`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let last_basic_block = context.append_basic_block(fn_value, "last");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), entry_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), last_basic_block);
/// ```
#[inline]
pub fn append_basic_block(&self, function: FunctionValue<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.append_basic_block(function, name)
}
/// Append a named `BasicBlock` after the referenced `BasicBlock`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let last_basic_block = context.insert_basic_block_after(entry_basic_block, "last");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), entry_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), last_basic_block);
/// ```
// REVIEW: What happens when using these methods and the BasicBlock doesn't have a parent?
// Should they be callable at all? Needs testing to see what LLVM will do, I suppose. See below unwrap.
// Maybe need SubTypes: BasicBlock<HasParent>, BasicBlock<Orphan>?
#[inline]
pub fn insert_basic_block_after(&self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.insert_basic_block_after(basic_block, name)
}
/// Prepend a named `BasicBlock` before the referenced `BasicBlock`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_basic_block = context.append_basic_block(fn_value, "entry");
///
/// assert_eq!(fn_value.count_basic_blocks(), 1);
///
/// let first_basic_block = context.prepend_basic_block(entry_basic_block, "first");
///
/// assert_eq!(fn_value.count_basic_blocks(), 2);
/// assert_eq!(fn_value.get_first_basic_block().unwrap(), first_basic_block);
/// assert_eq!(fn_value.get_last_basic_block().unwrap(), entry_basic_block);
/// ```
#[inline]
pub fn prepend_basic_block(&self, basic_block: BasicBlock<'ctx>, name: &str) -> BasicBlock<'ctx> {
self.context.prepend_basic_block(basic_block, name)
}
/// Creates a `MetadataValue` tuple of heterogeneous types (a "Node") for the current context. It can be assigned to a value.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let i8_type = context.i8_type();
/// let i8_two = i8_type.const_int(2, false);
/// let f32_type = context.f32_type();
/// let f32_zero = f32_type.const_float(0.);
/// let md_node = context.metadata_node(&[i8_two.into(), f32_zero.into()]);
/// let f32_one = f32_type.const_float(1.);
/// let void_type = context.void_type();
///
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let fn_type = void_type.fn_type(&[f32_type.into()], false);
/// let fn_value = module.add_function("my_func", fn_type, None);
/// let entry_block = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_block);
///
/// let ret_instr = builder.build_return(None);
///
/// assert!(md_node.is_node());
///
/// ret_instr.set_metadata(md_node, 0);
/// ```
// REVIEW: Maybe more helpful to beginners to call this metadata_tuple?
// REVIEW: Seems to be unassgned to anything
#[inline]
pub fn metadata_node(&self, values: &[BasicMetadataValueEnum<'ctx>]) -> MetadataValue<'ctx> {
self.context.metadata_node(values)
}
/// Creates a `MetadataValue` string for the current context. It can be assigned to a value.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let md_string = context.metadata_string("Floats are awesome!");
/// let f32_type = context.f32_type();
/// let f32_one = f32_type.const_float(1.);
/// let void_type = context.void_type();
///
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let fn_type = void_type.fn_type(&[f32_type.into()], false);
/// let fn_value = module.add_function("my_func", fn_type, None);
/// let entry_block = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_block);
///
/// let ret_instr = builder.build_return(None);
///
/// assert!(md_string.is_string());
///
/// ret_instr.set_metadata(md_string, 0);
/// ```
// REVIEW: Seems to be unassigned to anything
#[inline]
pub fn metadata_string(&self, string: &str) -> MetadataValue<'ctx> {
self.context.metadata_string(string)
}
/// Obtains the index of a metadata kind id. If the string doesn't exist, LLVM will add it at index `FIRST_CUSTOM_METADATA_KIND_ID` onward.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
/// use inkwell::values::FIRST_CUSTOM_METADATA_KIND_ID;
///
/// let context = Context::create();
///
/// assert_eq!(context.get_kind_id("dbg"), 0);
/// assert_eq!(context.get_kind_id("tbaa"), 1);
/// assert_eq!(context.get_kind_id("prof"), 2);
///
/// // Custom kind id doesn't exist in LLVM until now:
/// assert_eq!(context.get_kind_id("foo"), FIRST_CUSTOM_METADATA_KIND_ID);
/// ```
#[inline]
pub fn get_kind_id(&self, key: &str) -> u32 {
self.context.get_kind_id(key)
}
/// Creates an enum `Attribute` in this `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let enum_attribute = context.create_enum_attribute(0, 10);
///
/// assert!(enum_attribute.is_enum());
/// ```
#[inline]
pub fn create_enum_attribute(&self, kind_id: u32, val: u64) -> Attribute {
self.context.create_enum_attribute(kind_id, val)
}
/// Creates a string `Attribute` in this `Context`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let string_attribute = context.create_string_attribute("my_key_123", "my_val");
///
/// assert!(string_attribute.is_string());
/// ```
#[inline]
pub fn create_string_attribute(&self, key: &str, val: &str) -> Attribute {
self.context.create_string_attribute(key, val)
}
/// Create an enum `Attribute` with an `AnyTypeEnum` attached to it.
///
/// # Example
/// ```rust
/// use inkwell::context::Context;
/// use inkwell::attributes::Attribute;
/// use inkwell::types::AnyType;
///
/// let context = Context::create();
/// let kind_id = Attribute::get_named_enum_kind_id("sret");
/// let any_type = context.i32_type().as_any_type_enum();
/// let type_attribute = context.create_type_attribute(
/// kind_id,
/// any_type,
/// );
///
/// assert!(type_attribute.is_type());
/// assert_eq!(type_attribute.get_type_value(), any_type);
/// assert_ne!(type_attribute.get_type_value(), context.i64_type().as_any_type_enum());
/// ```
#[inline]
#[llvm_versions(12.0..=latest)]
pub fn create_type_attribute(&self, kind_id: u32, type_ref: AnyTypeEnum) -> Attribute {
self.context.create_type_attribute(kind_id, type_ref)
}
/// Creates a const string which may be null terminated.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
/// use inkwell::values::AnyValue;
///
/// let context = Context::create();
/// let string = context.const_string(b"my_string", false);
///
/// assert_eq!(string.print_to_string().to_string(), "[9 x i8] c\"my_string\"");
/// ```
// SubTypes: Should return VectorValue<IntValue<i8>>
#[inline]
pub fn const_string(&self, string: &[u8], null_terminated: bool) -> VectorValue<'ctx> {
self.context.const_string(string, null_terminated)
}
#[inline]
pub(crate) fn set_diagnostic_handler(
&self,
handler: extern "C" fn(LLVMDiagnosticInfoRef, *mut c_void),
void_ptr: *mut c_void,
) {
self.context.set_diagnostic_handler(handler, void_ptr)
}
}
#[cfg(feature = "internal-getters")]
impl LLVMReference<LLVMContextRef> for ContextRef<'_> {
unsafe fn get_ref(&self) -> LLVMContextRef {
self.context
}
}
|
use ffi;
use types::{Result, PortMidiDeviceId, Error};
use io::{InputPort, OutputPort};
use device::DeviceInfo;
/// The elements are static after initializing
pub struct PortMidi {
device_cnt: i32,
buffer_size: i32,
}
impl PortMidi {
/// Initializes the underlying PortMidi C library.
/// It does not support *hot plugging*, this means
/// that devices that are connect after calling `new`
/// are not picked up by PortMidi.
pub fn new() -> Result<Self> {
try!(Result::from(unsafe { ffi::Pm_Initialize() }));
Ok(PortMidi {
device_cnt: unsafe { ffi::Pm_CountDevices() },
buffer_size: 1024, // TODO: argument
})
}
/// Return the number of devices. This number will not change during the lifetime
/// of the program.
pub fn device_cnt(&self) -> PortMidiDeviceId {
self.device_cnt
}
/// Gets the `PortMidiDeviceId` for the default input, or `None` if
/// there isn't one
///
/// See the PortMidi documentation for details of how to set the default device
pub fn default_input_device_id(&self) -> Result<PortMidiDeviceId> {
match unsafe { ffi::Pm_GetDefaultInputDeviceID() } {
ffi::PM_NO_DEVICE => Err(Error::NoDefaultDevice),
id @ _ => Ok(id),
}
}
/// Gets the `PortMidiDeviceId` for the default output, or `None` if
/// there isn't one
///
/// See the PortMidi documentation for details of how to set the default device
pub fn default_output_device_id(&self) -> Result<PortMidiDeviceId> {
match unsafe { ffi::Pm_GetDefaultOutputDeviceID() } {
ffi::PM_NO_DEVICE => Err(Error::NoDefaultDevice),
id @ _ => Ok(id),
}
}
pub fn device(&self, id: PortMidiDeviceId) -> Result<DeviceInfo> {
DeviceInfo::new(id)
}
pub fn devices(&self) -> Result<Vec<DeviceInfo>> {
let mut devices = Vec::with_capacity(self.device_cnt() as usize);
for res in (0..self.device_cnt()).map(|id| self.device(id)) {
match res {
Ok(device) => devices.push(device),
Err(err) => return Err(err),
}
}
Ok(devices)
}
pub fn default_input_port(&self) -> Result<InputPort> {
let info = try!(self.default_input_device_id().and_then(|id| self.device(id)));
InputPort::new(info, self.buffer_size)
}
pub fn input_port(&self, device: DeviceInfo) -> Result<InputPort> {
if device.is_input() {
InputPort::new(device, self.buffer_size)
} else {
Err(Error::NotAnInputDevice)
}
}
pub fn default_output_port(&self) -> Result<OutputPort> {
let info = try!(self.default_output_device_id().and_then(|id| self.device(id)));
OutputPort::new(info, self.buffer_size)
}
pub fn output_port(&self, device: DeviceInfo) -> Result<InputPort> {
if device.is_output() {
InputPort::new(device, self.buffer_size)
} else {
Err(Error::NotAnOutputDevice)
}
}
}
impl Drop for PortMidi {
fn drop(&mut self) {
Result::from(unsafe { ffi::Pm_Terminate() })
.map_err(|err| println!("Could not terminate library: {}", err));
}
}
-library
use ffi;
use types::{Result, PortMidiDeviceId, Error};
use io::{InputPort, OutputPort};
use device::DeviceInfo;
/// The elements are static after initializing
pub struct PortMidi {
device_cnt: i32,
buffer_size: i32,
}
impl PortMidi {
/// Initializes the underlying PortMidi C library.
/// It does not support *hot plugging*, this means
/// that devices that are connect after calling `new`
/// are not picked up by PortMidi.
pub fn new() -> Result<Self> {
try!(Result::from(unsafe { ffi::Pm_Initialize() }));
Ok(PortMidi {
device_cnt: unsafe { ffi::Pm_CountDevices() },
buffer_size: 1024, // TODO: argument
})
}
/// Return the number of devices. This number will not change during the lifetime
/// of the program.
pub fn device_cnt(&self) -> PortMidiDeviceId {
self.device_cnt
}
/// Gets the `PortMidiDeviceId` for the default input, or `None` if
/// there isn't one
///
/// See the PortMidi documentation for details of how to set the default device
pub fn default_input_device_id(&self) -> Result<PortMidiDeviceId> {
match unsafe { ffi::Pm_GetDefaultInputDeviceID() } {
ffi::PM_NO_DEVICE => Err(Error::NoDefaultDevice),
id @ _ => Ok(id),
}
}
/// Gets the `PortMidiDeviceId` for the default output, or `None` if
/// there isn't one
///
/// See the PortMidi documentation for details of how to set the default device
pub fn default_output_device_id(&self) -> Result<PortMidiDeviceId> {
match unsafe { ffi::Pm_GetDefaultOutputDeviceID() } {
ffi::PM_NO_DEVICE => Err(Error::NoDefaultDevice),
id @ _ => Ok(id),
}
}
pub fn device(&self, id: PortMidiDeviceId) -> Result<DeviceInfo> {
DeviceInfo::new(id)
}
pub fn devices(&self) -> Result<Vec<DeviceInfo>> {
let mut devices = Vec::with_capacity(self.device_cnt() as usize);
for res in (0..self.device_cnt()).map(|id| self.device(id)) {
match res {
Ok(device) => devices.push(device),
Err(err) => return Err(err),
}
}
Ok(devices)
}
pub fn default_input_port(&self) -> Result<InputPort> {
let info = try!(self.default_input_device_id().and_then(|id| self.device(id)));
InputPort::new(info, self.buffer_size)
}
pub fn input_port(&self, device: DeviceInfo) -> Result<InputPort> {
if device.is_input() {
InputPort::new(device, self.buffer_size)
} else {
Err(Error::NotAnInputDevice)
}
}
pub fn default_output_port(&self) -> Result<OutputPort> {
let info = try!(self.default_output_device_id().and_then(|id| self.device(id)));
OutputPort::new(info, self.buffer_size)
}
pub fn output_port(&self, device: DeviceInfo) -> Result<InputPort> {
if device.is_output() {
InputPort::new(device, self.buffer_size)
} else {
Err(Error::NotAnOutputDevice)
}
}
}
impl Drop for PortMidi {
fn drop(&mut self) {
Result::from(unsafe { ffi::Pm_Terminate() })
.map_err(|err| println!("Could not terminate: {}", err));
}
}
|
// $ cargo bench --features full --bench file
#![feature(rustc_private, test)]
#![recursion_limit = "1024"]
#![allow(clippy::missing_panics_doc, clippy::must_use_candidate)]
extern crate test;
#[macro_use]
#[path = "../tests/macros/mod.rs"]
mod macros;
#[path = "../tests/common/mod.rs"]
mod common;
#[path = "../tests/repo/mod.rs"]
pub mod repo;
use proc_macro2::TokenStream;
use std::fs;
use std::str::FromStr;
use test::Bencher;
const FILE: &str = "tests/rust/library/core/src/str/mod.rs";
fn get_tokens() -> TokenStream {
repo::clone_rust();
let content = fs::read_to_string(FILE).unwrap();
TokenStream::from_str(&content).unwrap()
}
#[bench]
fn baseline(b: &mut Bencher) {
let tokens = get_tokens();
b.iter(|| drop(tokens.clone()));
}
#[bench]
fn parse_file(b: &mut Bencher) {
let tokens = get_tokens();
b.iter(|| syn::parse2::<syn::File>(tokens.clone()));
}
Add a benchmark of TokenBuffer::inner_new
// $ cargo bench --features full --bench file
#![feature(rustc_private, test)]
#![recursion_limit = "1024"]
#![allow(
clippy::items_after_statements,
clippy::missing_panics_doc,
clippy::must_use_candidate
)]
extern crate test;
#[macro_use]
#[path = "../tests/macros/mod.rs"]
mod macros;
#[path = "../tests/common/mod.rs"]
mod common;
#[path = "../tests/repo/mod.rs"]
pub mod repo;
use proc_macro2::{Span, TokenStream};
use std::fs;
use std::str::FromStr;
use syn::parse::{ParseStream, Parser};
use test::Bencher;
const FILE: &str = "tests/rust/library/core/src/str/mod.rs";
fn get_tokens() -> TokenStream {
repo::clone_rust();
let content = fs::read_to_string(FILE).unwrap();
TokenStream::from_str(&content).unwrap()
}
#[bench]
fn baseline(b: &mut Bencher) {
let tokens = get_tokens();
b.iter(|| drop(tokens.clone()));
}
#[bench]
fn create_token_buffer(b: &mut Bencher) {
let tokens = get_tokens();
fn immediate_fail(_input: ParseStream) -> syn::Result<()> {
Err(syn::Error::new(Span::call_site(), ""))
}
b.iter(|| immediate_fail.parse2(tokens.clone()));
}
#[bench]
fn parse_file(b: &mut Bencher) {
let tokens = get_tokens();
b.iter(|| syn::parse2::<syn::File>(tokens.clone()));
}
|
// $ cargo bench --features full --bench rust
//
// Syn only, useful for profiling:
// $ RUSTFLAGS='--cfg syn_only' cargo build --release --features full --bench rust
#![cfg_attr(not(syn_only), feature(rustc_private))]
#![recursion_limit = "1024"]
#![allow(clippy::cast_lossless, clippy::unnecessary_wraps)]
#[macro_use]
#[path = "../tests/macros/mod.rs"]
mod macros;
#[path = "../tests/common/mod.rs"]
mod common;
#[path = "../tests/repo/mod.rs"]
mod repo;
use std::fs;
use std::time::{Duration, Instant};
#[cfg(not(syn_only))]
mod tokenstream_parse {
use proc_macro2::TokenStream;
use std::str::FromStr;
pub fn bench(content: &str) -> Result<(), ()> {
TokenStream::from_str(content).map(drop).map_err(drop)
}
}
mod syn_parse {
pub fn bench(content: &str) -> Result<(), ()> {
syn::parse_file(content).map(drop).map_err(drop)
}
}
#[cfg(not(syn_only))]
mod librustc_parse {
extern crate rustc_data_structures;
extern crate rustc_error_messages;
extern crate rustc_errors;
extern crate rustc_parse;
extern crate rustc_session;
extern crate rustc_span;
use rustc_data_structures::sync::Lrc;
use rustc_error_messages::FluentBundle;
use rustc_errors::{emitter::Emitter, Diagnostic, Handler};
use rustc_session::parse::ParseSess;
use rustc_span::source_map::{FilePathMapping, SourceMap};
use rustc_span::{edition::Edition, FileName};
pub fn bench(content: &str) -> Result<(), ()> {
struct SilentEmitter;
impl Emitter for SilentEmitter {
fn emit_diagnostic(&mut self, _diag: &Diagnostic) {}
fn source_map(&self) -> Option<&Lrc<SourceMap>> {
None
}
fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
None
}
fn fallback_fluent_bundle(&self) -> &FluentBundle {
panic!("silent emitter attempted to translate a diagnostic");
}
}
rustc_span::create_session_if_not_set_then(Edition::Edition2018, |_| {
let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let emitter = Box::new(SilentEmitter);
let handler = Handler::with_emitter(false, None, emitter);
let sess = ParseSess::with_span_handler(handler, cm);
if let Err(diagnostic) = rustc_parse::parse_crate_from_source_str(
FileName::Custom("bench".to_owned()),
content.to_owned(),
&sess,
) {
diagnostic.cancel();
return Err(());
};
Ok(())
})
}
}
#[cfg(not(syn_only))]
mod read_from_disk {
pub fn bench(content: &str) -> Result<(), ()> {
let _ = content;
Ok(())
}
}
fn exec(mut codepath: impl FnMut(&str) -> Result<(), ()>) -> Duration {
let begin = Instant::now();
let mut success = 0;
let mut total = 0;
walkdir::WalkDir::new("tests/rust/src")
.into_iter()
.filter_entry(repo::base_dir_filter)
.for_each(|entry| {
let entry = entry.unwrap();
let path = entry.path();
if path.is_dir() {
return;
}
let content = fs::read_to_string(path).unwrap();
let ok = codepath(&content).is_ok();
success += ok as usize;
total += 1;
if !ok {
eprintln!("FAIL {}", path.display());
}
});
assert_eq!(success, total);
begin.elapsed()
}
fn main() {
repo::clone_rust();
macro_rules! testcases {
($($(#[$cfg:meta])* $name:ident,)*) => {
[
$(
$(#[$cfg])*
(stringify!($name), $name::bench as fn(&str) -> Result<(), ()>),
)*
]
};
}
#[cfg(not(syn_only))]
{
let mut lines = 0;
let mut files = 0;
exec(|content| {
lines += content.lines().count();
files += 1;
Ok(())
});
eprintln!("\n{} lines in {} files", lines, files);
}
for (name, f) in testcases!(
#[cfg(not(syn_only))]
read_from_disk,
#[cfg(not(syn_only))]
tokenstream_parse,
syn_parse,
#[cfg(not(syn_only))]
librustc_parse,
) {
eprint!("{:20}", format!("{}:", name));
let elapsed = exec(f);
eprintln!(
"elapsed={}.{:03}s",
elapsed.as_secs(),
elapsed.subsec_millis(),
);
}
eprintln!();
}
Update benches to nightly-2022-08-18
// $ cargo bench --features full --bench rust
//
// Syn only, useful for profiling:
// $ RUSTFLAGS='--cfg syn_only' cargo build --release --features full --bench rust
#![cfg_attr(not(syn_only), feature(rustc_private))]
#![recursion_limit = "1024"]
#![allow(clippy::cast_lossless, clippy::unnecessary_wraps)]
#[macro_use]
#[path = "../tests/macros/mod.rs"]
mod macros;
#[path = "../tests/common/mod.rs"]
mod common;
#[path = "../tests/repo/mod.rs"]
mod repo;
use std::fs;
use std::time::{Duration, Instant};
#[cfg(not(syn_only))]
mod tokenstream_parse {
use proc_macro2::TokenStream;
use std::str::FromStr;
pub fn bench(content: &str) -> Result<(), ()> {
TokenStream::from_str(content).map(drop).map_err(drop)
}
}
mod syn_parse {
pub fn bench(content: &str) -> Result<(), ()> {
syn::parse_file(content).map(drop).map_err(drop)
}
}
#[cfg(not(syn_only))]
mod librustc_parse {
extern crate rustc_data_structures;
extern crate rustc_error_messages;
extern crate rustc_errors;
extern crate rustc_parse;
extern crate rustc_session;
extern crate rustc_span;
use rustc_data_structures::sync::Lrc;
use rustc_error_messages::FluentBundle;
use rustc_errors::{emitter::Emitter, translation::Translate, Diagnostic, Handler};
use rustc_session::parse::ParseSess;
use rustc_span::source_map::{FilePathMapping, SourceMap};
use rustc_span::{edition::Edition, FileName};
pub fn bench(content: &str) -> Result<(), ()> {
struct SilentEmitter;
impl Emitter for SilentEmitter {
fn emit_diagnostic(&mut self, _diag: &Diagnostic) {}
fn source_map(&self) -> Option<&Lrc<SourceMap>> {
None
}
}
impl Translate for SilentEmitter {
fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
None
}
fn fallback_fluent_bundle(&self) -> &FluentBundle {
panic!("silent emitter attempted to translate a diagnostic");
}
}
rustc_span::create_session_if_not_set_then(Edition::Edition2018, |_| {
let cm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let emitter = Box::new(SilentEmitter);
let handler = Handler::with_emitter(false, None, emitter);
let sess = ParseSess::with_span_handler(handler, cm);
if let Err(diagnostic) = rustc_parse::parse_crate_from_source_str(
FileName::Custom("bench".to_owned()),
content.to_owned(),
&sess,
) {
diagnostic.cancel();
return Err(());
};
Ok(())
})
}
}
#[cfg(not(syn_only))]
mod read_from_disk {
pub fn bench(content: &str) -> Result<(), ()> {
let _ = content;
Ok(())
}
}
fn exec(mut codepath: impl FnMut(&str) -> Result<(), ()>) -> Duration {
let begin = Instant::now();
let mut success = 0;
let mut total = 0;
walkdir::WalkDir::new("tests/rust/src")
.into_iter()
.filter_entry(repo::base_dir_filter)
.for_each(|entry| {
let entry = entry.unwrap();
let path = entry.path();
if path.is_dir() {
return;
}
let content = fs::read_to_string(path).unwrap();
let ok = codepath(&content).is_ok();
success += ok as usize;
total += 1;
if !ok {
eprintln!("FAIL {}", path.display());
}
});
assert_eq!(success, total);
begin.elapsed()
}
fn main() {
repo::clone_rust();
macro_rules! testcases {
($($(#[$cfg:meta])* $name:ident,)*) => {
[
$(
$(#[$cfg])*
(stringify!($name), $name::bench as fn(&str) -> Result<(), ()>),
)*
]
};
}
#[cfg(not(syn_only))]
{
let mut lines = 0;
let mut files = 0;
exec(|content| {
lines += content.lines().count();
files += 1;
Ok(())
});
eprintln!("\n{} lines in {} files", lines, files);
}
for (name, f) in testcases!(
#[cfg(not(syn_only))]
read_from_disk,
#[cfg(not(syn_only))]
tokenstream_parse,
syn_parse,
#[cfg(not(syn_only))]
librustc_parse,
) {
eprint!("{:20}", format!("{}:", name));
let elapsed = exec(f);
eprintln!(
"elapsed={}.{:03}s",
elapsed.as_secs(),
elapsed.subsec_millis(),
);
}
eprintln!();
}
|
use Error;
use num::{BigInt, BigUint, FromPrimitive, Integer, One, ToPrimitive, Zero};
use num::bigint::Sign::{Minus, Plus};
use num::bigint::ToBigInt;
use std::cmp::*;
use std::cmp::Ordering::Equal;
use std::fmt;
use std::iter::repeat;
use std::ops::{Add, Div, Mul, Rem, Sub};
use std::str::FromStr;
// Sign mask for the flags field. A value of zero in this bit indicates a
// positive Decimal value, and a value of one in this bit indicates a
// negative Decimal value.
#[allow(overflowing_literals)]
const SIGN_MASK: i32 = 0x80000000;
// Scale mask for the flags field. This byte in the flags field contains
// the power of 10 to divide the Decimal value by. The scale byte must
// contain a value between 0 and 28 inclusive.
const SCALE_MASK: i32 = 0x00FF0000;
const U8_MASK: i32 = 0x000000FF;
const I32_MASK: i64 = 0xFFFFFFFF;
// Number of bits scale is shifted by.
const SCALE_SHIFT: i32 = 16;
// The maximum supported precision
const MAX_PRECISION: u32 = 28;
const MAX_BYTES: usize = 12;
const MAX_BITS: usize = 96;
lazy_static! {
static ref MIN: Decimal = Decimal { flags: -2147483648, lo: -1, mid: -1, hi: -1 };
static ref MAX: Decimal = Decimal { flags: 0, lo: -1, mid: -1, hi: -1 };
}
// Fast access for 10^n where n is 0-9
static POWERS_10: [u32; 10] = [
1,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000,
];
// Fast access for 10^n where n is 10-19
static BIG_POWERS_10: [u64; 10] = [
10000000000,
100000000000,
1000000000000,
10000000000000,
100000000000000,
1000000000000000,
10000000000000000,
100000000000000000,
1000000000000000000,
10000000000000000000,
];
/// `Decimal` represents a 128 bit representation of a fixed-precision decimal number.
/// The finite set of values of type `Decimal` are of the form m / 10^e,
/// where m is an integer such that -2^96 <= m <= 2^96, and e is an integer
/// between 0 and 28 inclusive.
#[derive(Clone, Debug, Copy)]
pub struct Decimal {
// Bits 0-15: unused
// Bits 16-23: Contains "e", a value between 0-28 that indicates the scale
// Bits 24-30: unused
// Bit 31: the sign of the Decimal value, 0 meaning positive and 1 meaning negative.
flags: i32,
// The lo, mid, hi, and flags fields contain the representation of the
// Decimal value as a 96-bit integer.
hi: i32,
lo: i32,
mid: i32,
}
#[allow(dead_code)]
impl Decimal {
/// Returns a `Decimal` with a 64 bit `m` representation and corresponding `e` scale.
///
/// # Arguments
///
/// * `num` - An i64 that represents the `m` portion of the decimal number
/// * `scale` - A u32 representing the `e` portion of the decimal number.
///
/// # Example
///
/// ```
/// use rust_decimal::Decimal;
/// let pi = Decimal::new(3141i64, 3u32);
/// ```
pub fn new(num: i64, scale: u32) -> Decimal {
if scale > MAX_PRECISION {
panic!("Scale exceeds the maximum precision allowed");
}
let flags: i32 = (scale as i32) << SCALE_SHIFT;
if num < 0 {
return Decimal {
flags: flags | SIGN_MASK,
hi: 0,
lo: (num.abs() & I32_MASK) as i32,
mid: ((num.abs() >> 32) & I32_MASK) as i32,
};
}
Decimal {
flags: flags,
hi: 0,
lo: (num & I32_MASK) as i32,
mid: ((num >> 32) & I32_MASK) as i32,
}
}
/// Returns the scale of the decimal number, otherwise known as `e`.
pub fn scale(&self) -> u32 {
((self.flags & SCALE_MASK) >> SCALE_SHIFT) as u32
}
/// An optimized method for changing the sign of a decimal number.
///
/// # Arguments
///
/// * `positive`: true if the resulting decimal should be positive.
pub fn set_sign(&mut self, positive: bool) {
if positive {
if self.is_negative() {
self.flags ^= SIGN_MASK;
}
} else {
self.flags |= SIGN_MASK;
}
}
/// Returns a serialized version of the decimal number.
/// The resulting byte array will have the following representation:
///
/// * Bytes 1-4: flags
/// * Bytes 5-8: lo portion of `m`
/// * Bytes 9-12: mid portion of `m`
/// * Bytes 13-16: high portion of `m`
pub fn serialize(&self) -> [u8; 16] {
[
(self.flags & U8_MASK) as u8,
((self.flags >> 8) & U8_MASK) as u8,
((self.flags >> 16) & U8_MASK) as u8,
((self.flags >> 24) & U8_MASK) as u8,
(self.lo & U8_MASK) as u8,
((self.lo >> 8) & U8_MASK) as u8,
((self.lo >> 16) & U8_MASK) as u8,
((self.lo >> 24) & U8_MASK) as u8,
(self.mid & U8_MASK) as u8,
((self.mid >> 8) & U8_MASK) as u8,
((self.mid >> 16) & U8_MASK) as u8,
((self.mid >> 24) & U8_MASK) as u8,
(self.hi & U8_MASK) as u8,
((self.hi >> 8) & U8_MASK) as u8,
((self.hi >> 16) & U8_MASK) as u8,
((self.hi >> 24) & U8_MASK) as u8,
]
}
/// Deserializes the given bytes into a decimal number.
/// The deserialized byte representation must be 16 bytes and adhere to the followign convention:
///
/// * Bytes 1-4: flags
/// * Bytes 5-8: lo portion of `m`
/// * Bytes 9-12: mid portion of `m`
/// * Bytes 13-16: high portion of `m`
pub fn deserialize(bytes: [u8; 16]) -> Decimal {
Decimal {
flags: (bytes[0] as i32) | (bytes[1] as i32) << 8 | (bytes[2] as i32) << 16 | (bytes[3] as i32) << 24,
lo: (bytes[4] as i32) | (bytes[5] as i32) << 8 | (bytes[6] as i32) << 16 | (bytes[7] as i32) << 24,
mid: (bytes[8] as i32) | (bytes[9] as i32) << 8 | (bytes[10] as i32) << 16 | (bytes[11] as i32) << 24,
hi: (bytes[12] as i32) | (bytes[13] as i32) << 8 | (bytes[14] as i32) << 16 | (bytes[15] as i32) << 24,
}
}
/// Returns `true` if the decimal is negative.
pub fn is_negative(&self) -> bool {
self.flags < 0
}
/// Returns `true` if the decimal is positive.
pub fn is_positive(&self) -> bool {
self.flags >= 0
}
/// Returns the minimum possible number that `Decimal` can represent.
pub fn min_value() -> Decimal {
*MIN
}
/// Returns the maximum possible number that `Decimal` can represent.
pub fn max_value() -> Decimal {
*MAX
}
/// Returns a new `Decimal` number with no fractional portion (i.e. an integer).
/// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8
pub fn round(&self) -> Decimal {
self.round_dp(0)
}
/// Returns a new `Decimal` number with the specified number of decimal points for fractional portion.
/// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8
///
/// # Arguments
/// * `dp`: the number of decimal points to round to.
pub fn round_dp(&self, dp: u32) -> Decimal {
let old_scale = self.scale();
if dp < old_scale && dp < 20 {
// Technically, it's 28...
// Short circuit for zero
if self.is_zero() {
return self.rescale(dp);
}
// Check to see if we need to add or subtract one.
// Some expected results assuming dp = 2 and old_scale = 3:
// 1.235 = 1.24
// 1.2361 = 1.24
// 1.2250 = 1.22
// 1.2251 = 1.23
// If we consider this example, we have the following number in `low`:
// 1235 (scale 3)
// 12361
// 12250
// 12251
let index = dp as usize;
let power10 = if dp < 10 {
Decimal::from_u32(POWERS_10[index]).unwrap()
} else {
Decimal::from_u64(BIG_POWERS_10[index - 10]).unwrap()
};
//println!("{} * {}", self.to_string(), power10.to_string());
let mut value = self.mul(power10);
// Do some midpoint rounding checks
// We're actually doing two things here.
// 1. Figuring out midpoint rounding when we're right on the boundary. e.g. 2.50000
// 2. Figuring out whether to add one or not e.g. 2.51
// We only need to search back a certain number. e.g. 2.500, round(2) search 1.
let raw = self.to_biguint();
// Get the decimal portion
// e.g. 2.5001, round(2) decimal portion = 01
let offset = self.rescale(dp).rescale(old_scale).to_biguint();
//println!("Raw: {}, Offset: {}", raw.to_string(), offset.to_string());
let decimal_portion = raw - offset;
// Rescale to zero so it's easier to work with
value = value.rescale(0u32);
// If the decimal_portion is zero then we round based on the other data
let mut cap = BigUint::from_u32(5u32).unwrap();
for _ in 0..(old_scale - dp - 1) {
cap = cap.mul(BigUint::from_u32(10u32).unwrap());
}
//println!("Cap {} Decimal Portion {}", cap, decimal_portion);
if decimal_portion == cap {
let even_or_odd = value.rem(Decimal::from_u32(2u32).unwrap());
if !even_or_odd.is_zero() {
value = value.add(Decimal::one());
}
} else if decimal_portion > cap {
// Doesn't matter about the decimal portion
if self.is_negative() {
value = value.sub(Decimal::one());
} else {
//println!("Decimal is greater than cap {} > {}", decimal_portion, cap);
value = value.add(Decimal::one());
}
}
// Divide by the power to get back
value.div(power10)
} else {
*self
}
}
pub(crate) fn rescale(&self, exp: u32) -> Decimal {
if exp > MAX_PRECISION {
panic!("Cannot have an exponent greater than {}", MAX_PRECISION);
}
let diff = exp as i32 - self.scale() as i32;
if diff == 0 {
// Since it's a copy type we can just return the self
return *self;
}
// 1.23 is scale 2. If we're making it 1.2300 scale 4
// Raw bit manipulation is hard (going up is easy, going down is hard)
// Let's just use BigUint to help out
let unsigned = self.to_biguint();
let result: BigUint;
// Figure out whether to multiply or divide
let power = Decimal::power_10(diff.abs() as usize);
if diff > 0 {
result = unsigned * power;
} else {
result = unsigned / power;
}
// Convert it back
let bytes = result.to_bytes_le();
Decimal::from_bytes_le(bytes, exp, self.is_negative())
}
fn power_10(exponent: usize) -> BigUint {
if exponent < 10 {
BigUint::from_u32(POWERS_10[exponent]).unwrap()
} else if exponent < 20 {
BigUint::from_u64(BIG_POWERS_10[exponent - 10]).unwrap()
} else {
let u32_exponent = exponent - 19; // -20 + 1 for getting the right u32 index
BigUint::from_u64(BIG_POWERS_10[9]).unwrap() *
BigUint::from_u32(POWERS_10[u32_exponent]).unwrap()
}
}
//
// These do not address scale. If you want that, rescale to 0 first.
//
pub(crate) fn to_biguint(&self) -> BigUint {
let bytes = self.unsigned_bytes_le();
BigUint::from_bytes_le(&bytes[..])
}
fn to_bigint(&self) -> BigInt {
let bytes = self.unsigned_bytes_le();
let sign = if self.is_negative() { Minus } else { Plus };
BigInt::from_bytes_le(sign, &bytes[..])
}
pub(crate) fn from_biguint(res: BigUint, scale: u32, negative: bool) -> Result<Decimal, Error> {
let bytes = res.to_bytes_le();
if bytes.len() > MAX_BYTES {
return Err(Error::new("Decimal Overflow"));
}
if scale > MAX_PRECISION {
return Err(Error::new("Scale exceeds maximum precision"));
}
Ok(Decimal::from_bytes_le(bytes, scale, negative))
}
fn unsigned_bytes_le(&self) -> Vec<u8> {
return vec![
(self.lo & U8_MASK) as u8,
((self.lo >> 8) & U8_MASK) as u8,
((self.lo >> 16) & U8_MASK) as u8,
((self.lo >> 24) & U8_MASK) as u8,
(self.mid & U8_MASK) as u8,
((self.mid >> 8) & U8_MASK) as u8,
((self.mid >> 16) & U8_MASK) as u8,
((self.mid >> 24) & U8_MASK) as u8,
(self.hi & U8_MASK) as u8,
((self.hi >> 8) & U8_MASK) as u8,
((self.hi >> 16) & U8_MASK) as u8,
((self.hi >> 24) & U8_MASK) as u8,
];
}
fn from_bytes_le(bytes: Vec<u8>, scale: u32, negative: bool) -> Decimal {
// Finally build the flags
let mut flags = 0i32;
let mut lo = 0i32;
let mut mid = 0i32;
let mut hi = 0i32;
if scale > 0 {
flags = (scale as i32) << SCALE_SHIFT;
}
if negative {
flags |= SIGN_MASK;
}
if bytes.len() > MAX_BYTES {
panic!("Decimal Overflow");
}
let mut pos = 0;
for b in bytes {
if pos < 4 {
lo |= (b as i32) << (pos * 8);
} else if pos < 8 {
mid |= (b as i32) << ((pos - 4) * 8);
} else {
hi |= (b as i32) << ((pos - 8) * 8);
}
// Move position
pos += 1;
}
// Build up each hi/lo
Decimal {
flags: flags,
hi: hi,
lo: lo,
mid: mid,
}
}
}
macro_rules! impl_from {
($T:ty, $from_ty:path) => {
impl From<$T> for Decimal {
#[inline]
fn from(t: $T) -> Decimal {
$from_ty(t).unwrap()
}
}
}
}
impl_from!(isize, FromPrimitive::from_isize);
impl_from!(i8, FromPrimitive::from_i8);
impl_from!(i16, FromPrimitive::from_i16);
impl_from!(i32, FromPrimitive::from_i32);
impl_from!(i64, FromPrimitive::from_i64);
impl_from!(usize, FromPrimitive::from_usize);
impl_from!(u8, FromPrimitive::from_u8);
impl_from!(u16, FromPrimitive::from_u16);
impl_from!(u32, FromPrimitive::from_u32);
impl_from!(u64, FromPrimitive::from_u64);
macro_rules! forward_val_val_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
impl $imp<$res> for $res {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
(&self).$method(&other)
}
}
}
}
macro_rules! forward_ref_val_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
impl<'a> $imp<$res> for &'a $res {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
self.$method(&other)
}
}
}
}
macro_rules! forward_val_ref_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
impl<'a> $imp<&'a $res> for $res {
type Output = $res;
#[inline]
fn $method(self, other: &$res) -> $res {
(&self).$method(other)
}
}
}
}
macro_rules! forward_all_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
forward_val_val_binop!(impl $imp for $res, $method);
forward_ref_val_binop!(impl $imp for $res, $method);
forward_val_ref_binop!(impl $imp for $res, $method);
};
}
impl Zero for Decimal {
fn is_zero(&self) -> bool {
self.lo.is_zero() && self.mid.is_zero() && self.hi.is_zero()
}
fn zero() -> Decimal {
Decimal {
flags: 0,
hi: 0,
lo: 0,
mid: 0,
}
}
}
impl One for Decimal {
fn one() -> Decimal {
Decimal {
flags: 0,
hi: 0,
lo: 1,
mid: 0,
}
}
}
impl FromStr for Decimal {
type Err = Error;
fn from_str(value: &str) -> Result<Decimal, Self::Err> {
if value.is_empty() {
return Err(Error::new("Invalid decimal: empty"));
}
let mut offset = 0;
let mut len = value.len();
let chars: Vec<char> = value.chars().collect();
let mut negative = false; // assume positive
// handle the sign
if chars[offset] == '-' {
negative = true; // leading minus means negative
offset += 1;
len -= 1;
} else if chars[offset] == '+' {
// leading + allowed
offset += 1;
len -= 1;
}
// should now be at numeric part of the significand
let mut dot_offset: i32 = -1; // '.' offset, -1 if none
let cfirst = offset; // record start of integer
let mut coeff = String::new(); // integer significand array
while len > 0 {
let c = chars[offset];
if c.is_digit(10) {
coeff.push(c);
offset += 1;
len -= 1;
continue;
}
if c == '.' {
if dot_offset >= 0 {
return Err(Error::new("Invalid decimal: two decimal points"));
}
dot_offset = offset as i32;
offset += 1;
len -= 1;
continue;
}
return Err(Error::new("Invalid decimal: unknown character"));
}
// here when no characters left
if coeff.is_empty() {
return Err(Error::new("Invalid decimal: no digits found"));
}
// println!("coeff.len() {}, dot_offset {} cfirst {} negative {}", coeff.len(), dot_offset, cfirst, negative);
let mut scale = 0u32;
if dot_offset >= 0 {
// we had a decimal place so set the scale
scale = (coeff.len() as u32) - (dot_offset as u32 - cfirst as u32);
}
// Parse this into a big uint
let res = BigUint::from_str(&coeff[..]);
if res.is_err() {
return Err(Error::new("Failed to parse string"));
}
Decimal::from_biguint(res.unwrap(), scale, negative)
}
}
impl FromPrimitive for Decimal {
fn from_i32(n: i32) -> Option<Decimal> {
let flags: i32;
let value_copy: i32;
if n >= 0 {
flags = 0;
value_copy = n;
} else {
flags = SIGN_MASK;
value_copy = -n;
}
Some(Decimal {
flags: flags,
lo: value_copy,
mid: 0,
hi: 0,
})
}
fn from_i64(n: i64) -> Option<Decimal> {
let flags: i32;
let value_copy: i64;
if n >= 0 {
flags = 0;
value_copy = n;
} else {
flags = SIGN_MASK;
value_copy = -n;
}
Some(Decimal {
flags: flags,
lo: value_copy as i32,
mid: (value_copy >> 32) as i32,
hi: 0,
})
}
fn from_u32(n: u32) -> Option<Decimal> {
Some(Decimal {
flags: 0,
lo: n as i32,
mid: 0,
hi: 0,
})
}
fn from_u64(n: u64) -> Option<Decimal> {
Some(Decimal {
flags: 0,
lo: n as i32,
mid: (n >> 32) as i32,
hi: 0,
})
}
}
impl ToPrimitive for Decimal {
fn to_f64(&self) -> Option<f64> {
if self.scale() == 0 {
let bytes = self.unsigned_bytes_le();
let sign;
if self.is_negative() {
sign = Minus;
} else {
sign = Plus;
}
BigInt::from_bytes_le(sign, &bytes[..]).to_f64()
} else {
match self.to_string().parse::<f64>() {
Ok(s) => Some(s),
Err(_) => None
}
}
}
fn to_i64(&self) -> Option<i64> {
let d = self.rescale(0);
// Convert to biguint and use that
let bytes = d.unsigned_bytes_le();
let sign;
if self.is_negative() {
sign = Minus;
} else {
sign = Plus;
}
BigInt::from_bytes_le(sign, &bytes[..]).to_i64()
}
fn to_u64(&self) -> Option<u64> {
if self.is_negative() {
return None;
}
// Rescale to 0 (truncate)
let d = self.rescale(0);
if d.hi != 0 {
// Overflow
return None;
}
// Convert to biguint and use that
let bytes = d.unsigned_bytes_le();
BigUint::from_bytes_le(&bytes[..]).to_u64()
}
}
impl fmt::Display for Decimal {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
// Get the scale - where we need to put the decimal point
let mut scale = self.scale() as usize;
// Get the whole number without decimal points (or signs)
let uint = self.to_biguint();
// Convert to a string and manipulate that (neg at front, inject decimal)
let mut rep = uint.to_string();
let len = rep.len();
if let Some(n_dp) = f.precision() {
if n_dp < scale {
rep.truncate(len - scale + n_dp)
} else {
let zeros = repeat("0").take(n_dp - scale).collect::<String>();
rep.push_str(&zeros[..]);
}
scale = n_dp;
}
let len = rep.len();
// Inject the decimal point
if scale > 0 {
// Must be a low fractional
if scale > len {
let mut new_rep = String::new();
let zeros = repeat("0").take(scale as usize - len).collect::<String>();
new_rep.push_str("0.");
new_rep.push_str(&zeros[..]);
new_rep.push_str(&rep[..]);
rep = new_rep;
} else if scale == len {
rep.insert(0, '.');
rep.insert(0, '0');
} else {
rep.insert(len - scale as usize, '.');
}
} else if rep.is_empty() {
// corner case for when we truncated everything in a low fractional
rep.insert(0, '0');
}
f.pad_integral(self.is_positive(), "", &rep)
}
}
fn scaled_biguints(me: &Decimal, other: &Decimal) -> (BigUint, BigUint, u32) {
// Scale to the max
let s_scale = me.scale();
let o_scale = other.scale();
if s_scale > o_scale {
(
me.to_biguint(),
other.rescale(s_scale).to_biguint(),
s_scale,
)
} else if o_scale > s_scale {
(
me.rescale(o_scale).to_biguint(),
other.to_biguint(),
o_scale,
)
} else {
(me.to_biguint(), other.to_biguint(), s_scale)
}
}
fn scaled_bigints(me: &Decimal, other: &Decimal) -> (BigInt, BigInt, u32) {
// Scale to the max
let s_scale = me.scale();
let o_scale = other.scale();
if s_scale > o_scale {
(me.to_bigint(), other.rescale(s_scale).to_bigint(), s_scale)
} else if o_scale > s_scale {
(me.rescale(o_scale).to_bigint(), other.to_bigint(), o_scale)
} else {
(me.to_bigint(), other.to_bigint(), s_scale)
}
}
forward_all_binop!(impl Add for Decimal, add);
impl<'a, 'b> Add<&'b Decimal> for &'a Decimal {
type Output = Decimal;
#[inline]
fn add(self, other: &Decimal) -> Decimal {
// Get big uints to work with
let (left, right, scale) = scaled_biguints(self, other);
// Now we have the big boys - do a quick add
// println!("Left {} Right {}", left, right);
let l_negative = self.is_negative();
let r_negative = other.is_negative();
let result;
let is_negative;
if l_negative && r_negative {
result = left + right;
is_negative = true;
} else if !l_negative && !r_negative {
result = left + right;
is_negative = false;
} else {
// 1 + -2 (l < r, -r => r - l, -)
// 2 + -1 (l > r, -r => l - r, +)
// -1 + 2 (l < r, -l => r - l, +)
// -2 + 1 (l > r, -l => l - r, -)
if r_negative {
if left < right {
result = right - left;
is_negative = true;
} else if left > right {
result = left - right;
is_negative = false;
} else {
result = BigUint::zero();
is_negative = false;
}
} else {
// l_negative
if left < right {
result = right - left;
is_negative = false;
} else if left > right {
result = left - right;
is_negative = true;
} else {
result = BigUint::zero();
is_negative = false;
}
}
}
// Convert it back
let bytes = result.to_bytes_le();
Decimal::from_bytes_le(bytes, scale, is_negative)
}
}
forward_all_binop!(impl Sub for Decimal, sub);
impl<'a, 'b> Sub<&'b Decimal> for &'a Decimal {
type Output = Decimal;
#[inline]
fn sub(self, other: &Decimal) -> Decimal {
// Get big uints to work with
let (left, right, scale) = scaled_biguints(self, other);
// Now we have the big boys - do a quick subtraction
// Both Positive:
// 1 - 2 = -1
// 2 - 1 = 1
// Both negative:
// -1 - -2 = 1
// -2 - -1 = -1
// Mismatch
// -1 - 2 = -3
// -2 - 1 = -3
// 1 - -2 = 3
// 2 - -1 = 3
let l_negative = self.is_negative();
let r_negative = other.is_negative();
let result: BigUint;
let is_negative: bool;
if l_negative ^ r_negative {
result = left + right;
is_negative = l_negative;
} else {
if left > right {
result = left - right;
is_negative = l_negative && r_negative;
} else {
result = right - left;
is_negative = !l_negative && !r_negative;
}
}
// Convert it back
let bytes = result.to_bytes_le();
Decimal::from_bytes_le(bytes, scale, is_negative && !result.is_zero())
}
}
forward_all_binop!(impl Mul for Decimal, mul);
impl<'a, 'b> Mul<&'b Decimal> for &'a Decimal {
type Output = Decimal;
#[inline]
fn mul(self, other: &Decimal) -> Decimal {
// Get big uints to work with
let left = self.to_biguint();
let right = other.to_biguint();
// Easy!
let mut result = left * right; // Has the potential to overflow below if > 2^96
let mut scale = self.scale() + other.scale();
//println!("Result: {}, Scale: {}", result, scale);
//println!("Self Scale: {}, Other Scale: {}", self.scale(), other.scale());
// The result may be an overflow of what we can comfortably represent in 96 bits
// We can only do this if we have a scale to work with
if result.bits() > MAX_BITS {
// Try to truncate until we're ok
let ten = BigUint::from_i32(10).unwrap();
while scale > 0 && result.bits() > 96 {
result = result / &ten;
scale -= 1;
//println!("result: {} new scale: {}", result, scale);
}
}
// Last check for overflow
if result.bits() > MAX_BITS {
panic!("Decimal overflow from multiplication");
}
if scale > MAX_PRECISION {
// Then what? Truncate?
panic!("Scale overflow; cannot represent exp {}", scale);
}
// Negativity is based on xor. e.g.
// 1 * 2 = 2
// -1 * 2 = -2
// 1 * -2 = -2
// -1 * -2 = 2
let bytes = result.to_bytes_le();
Decimal::from_bytes_le(bytes, scale, self.is_negative() ^ other.is_negative())
}
}
forward_all_binop!(impl Div for Decimal, div);
impl<'a, 'b> Div<&'b Decimal> for &'a Decimal {
type Output = Decimal;
#[inline]
fn div(self, other: &Decimal) -> Decimal {
if other.is_zero() {
panic!("Division by zero");
}
// Shortcircuit the basic cases
if self.is_zero() {
return Decimal::zero();
}
let mut rem: BigUint;
let ten = BigUint::from_i32(10).unwrap();
let mut fractional: Vec<u8> = Vec::new();
// Get the values
let (left, right, _) = scaled_biguints(self, other);
// The algorithm for this is:
// (integral, rem) = div_rem(x, y)
// while rem > 0 {
// (part, rem) = div_rem(rem * 10, y)
// fractional_part.push(part)
// }
// This could be a really big number.
// Consider 9,999,999,999,999/10,000,000,000,000
// This would be (0, 9,999,999,999,999)
let (i, r) = left.div_rem(&right);
let mut integral = i;
let length = if integral.is_zero() {
0usize
} else {
integral.to_string().len()
};
rem = r;
// This is slightly too agressive. But it is just being safe. We need to check against Decimal::MAX
while !rem.is_zero() && fractional.len() + length < MAX_PRECISION as usize {
let rem_carried = &ten * rem;
let (frac, r) = rem_carried.div_rem(&right);
fractional.push(frac.to_u8().unwrap());
rem = r;
}
// Add on the fractional part
let scale = fractional.len();
for f in fractional {
integral = integral * &ten + BigUint::from_u8(f).unwrap();
}
let bytes = integral.to_bytes_le();
// Negative only if one or the other is negative
Decimal::from_bytes_le(
bytes,
scale as u32,
self.is_negative() ^ other.is_negative(),
)
}
}
forward_all_binop!(impl Rem for Decimal, rem);
impl<'a, 'b> Rem<&'b Decimal> for &'a Decimal {
type Output = Decimal;
#[inline]
fn rem(self, other: &Decimal) -> Decimal {
if other.is_zero() {
panic!("Division by zero");
}
// Shortcircuit the basic case
if self.is_zero() {
return Decimal::zero();
}
// Make sure they're scaled
let (left, right, scale) = scaled_bigints(self, other);
//println!("{}, {}", left, right);
// Since we're just getting the remainder, we simply need to do a standard mod
let (_, remainder) = left.div_rem(&right);
// Remainder is always positive?
let (sign, bytes) = remainder.to_bytes_le();
Decimal::from_bytes_le(bytes, scale, sign == Minus)
}
}
impl PartialEq for Decimal {
#[inline]
fn eq(&self, other: &Decimal) -> bool {
self.cmp(other) == Equal
}
}
impl Eq for Decimal {}
impl PartialOrd for Decimal {
#[inline]
fn partial_cmp(&self, other: &Decimal) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Decimal {
fn cmp(&self, other: &Decimal) -> Ordering {
// Quick exit if major differences
if self.is_negative() && !other.is_negative() {
return Ordering::Less;
} else if !self.is_negative() && other.is_negative() {
return Ordering::Greater;
}
// If we have 1.23 and 1.2345 then we have
// 123 scale 2 and 12345 scale 4
// We need to convert the first to
// 12300 scale 4 so we can compare equally
let s = self.scale() as u32;
let o = other.scale() as u32;
let si = self.to_bigint();
let oi = other.to_bigint();
if s > o {
let power = Decimal::power_10((s - o) as usize).to_bigint().unwrap();
let d = oi * power;
si.cmp(&d)
} else if s < o {
let power = Decimal::power_10((o - s) as usize).to_bigint().unwrap();
let d = si * power;
d.cmp(&oi)
} else {
si.cmp(&oi)
}
}
}
#[cfg(test)]
mod test {
// Tests on private methods.
//
// All public tests should go under `tests/`.
use super::*;
#[test]
fn rescale_integer_up() {
for scale in 1..25 {
let d = "1".parse::<Decimal>().unwrap().rescale(scale);
let mut s = String::from("1.");
for _ in 0..scale {
s.push('0');
}
assert_eq!(d.to_string(), s);
}
}
#[test]
fn rescale_integer_down() {
for scale in 1..25 {
let d = "1.000000000000000000000000"
.parse::<Decimal>()
.unwrap()
.rescale(scale);
let mut s = String::from("1.");
for _ in 0..scale {
s.push('0');
}
assert_eq!(d.to_string(), s);
}
}
#[test]
fn rescale_float_up() {
for scale in 1..25 {
let d = "1.1".parse::<Decimal>().unwrap().rescale(scale);
let mut s = String::from("1.1");
for _ in 0..(scale - 1) {
s.push('0');
}
assert_eq!(d.to_string(), s);
}
}
#[test]
fn rescale_float_down() {
for scale in 1..24 {
let d = "1.000000000000000000000001"
.parse::<Decimal>()
.unwrap()
.rescale(scale);
let mut s = String::from("1.");
for _ in 0..(scale) {
s.push('0');
}
assert_eq!(d.to_string(), s);
}
}
#[test]
fn round_complex_number() {
// This is 1982.2708333333333
let a = Decimal {
flags: 1572864,
hi: 107459117,
lo: -2075830955,
mid: 849254895,
};
let b = a.round_dp(2u32);
assert_eq!("1982.27", b.to_string());
}
}
Renamed variables for clarity
use Error;
use num::{BigInt, BigUint, FromPrimitive, Integer, One, ToPrimitive, Zero};
use num::bigint::Sign::{Minus, Plus};
use num::bigint::ToBigInt;
use std::cmp::*;
use std::cmp::Ordering::Equal;
use std::fmt;
use std::iter::repeat;
use std::ops::{Add, Div, Mul, Rem, Sub};
use std::str::FromStr;
// Sign mask for the flags field. A value of zero in this bit indicates a
// positive Decimal value, and a value of one in this bit indicates a
// negative Decimal value.
#[allow(overflowing_literals)]
const SIGN_MASK: i32 = 0x80000000;
// Scale mask for the flags field. This byte in the flags field contains
// the power of 10 to divide the Decimal value by. The scale byte must
// contain a value between 0 and 28 inclusive.
const SCALE_MASK: i32 = 0x00FF0000;
const U8_MASK: i32 = 0x000000FF;
const I32_MASK: i64 = 0xFFFFFFFF;
// Number of bits scale is shifted by.
const SCALE_SHIFT: i32 = 16;
// The maximum supported precision
const MAX_PRECISION: u32 = 28;
const MAX_BYTES: usize = 12;
const MAX_BITS: usize = 96;
lazy_static! {
static ref MIN: Decimal = Decimal { flags: -2147483648, lo: -1, mid: -1, hi: -1 };
static ref MAX: Decimal = Decimal { flags: 0, lo: -1, mid: -1, hi: -1 };
}
// Fast access for 10^n where n is 0-9
static POWERS_10: [u32; 10] = [
1,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000,
];
// Fast access for 10^n where n is 10-19
static BIG_POWERS_10: [u64; 10] = [
10000000000,
100000000000,
1000000000000,
10000000000000,
100000000000000,
1000000000000000,
10000000000000000,
100000000000000000,
1000000000000000000,
10000000000000000000,
];
/// `Decimal` represents a 128 bit representation of a fixed-precision decimal number.
/// The finite set of values of type `Decimal` are of the form m / 10^e,
/// where m is an integer such that -2^96 <= m <= 2^96, and e is an integer
/// between 0 and 28 inclusive.
#[derive(Clone, Debug, Copy)]
pub struct Decimal {
// Bits 0-15: unused
// Bits 16-23: Contains "e", a value between 0-28 that indicates the scale
// Bits 24-30: unused
// Bit 31: the sign of the Decimal value, 0 meaning positive and 1 meaning negative.
flags: i32,
// The lo, mid, hi, and flags fields contain the representation of the
// Decimal value as a 96-bit integer.
hi: i32,
lo: i32,
mid: i32,
}
#[allow(dead_code)]
impl Decimal {
/// Returns a `Decimal` with a 64 bit `m` representation and corresponding `e` scale.
///
/// # Arguments
///
/// * `num` - An i64 that represents the `m` portion of the decimal number
/// * `scale` - A u32 representing the `e` portion of the decimal number.
///
/// # Example
///
/// ```
/// use rust_decimal::Decimal;
/// let pi = Decimal::new(3141i64, 3u32);
/// ```
pub fn new(num: i64, scale: u32) -> Decimal {
if scale > MAX_PRECISION {
panic!("Scale exceeds the maximum precision allowed");
}
let flags: i32 = (scale as i32) << SCALE_SHIFT;
if num < 0 {
return Decimal {
flags: flags | SIGN_MASK,
hi: 0,
lo: (num.abs() & I32_MASK) as i32,
mid: ((num.abs() >> 32) & I32_MASK) as i32,
};
}
Decimal {
flags: flags,
hi: 0,
lo: (num & I32_MASK) as i32,
mid: ((num >> 32) & I32_MASK) as i32,
}
}
/// Returns the scale of the decimal number, otherwise known as `e`.
pub fn scale(&self) -> u32 {
((self.flags & SCALE_MASK) >> SCALE_SHIFT) as u32
}
/// An optimized method for changing the sign of a decimal number.
///
/// # Arguments
///
/// * `positive`: true if the resulting decimal should be positive.
pub fn set_sign(&mut self, positive: bool) {
if positive {
if self.is_negative() {
self.flags ^= SIGN_MASK;
}
} else {
self.flags |= SIGN_MASK;
}
}
/// Returns a serialized version of the decimal number.
/// The resulting byte array will have the following representation:
///
/// * Bytes 1-4: flags
/// * Bytes 5-8: lo portion of `m`
/// * Bytes 9-12: mid portion of `m`
/// * Bytes 13-16: high portion of `m`
pub fn serialize(&self) -> [u8; 16] {
[
(self.flags & U8_MASK) as u8,
((self.flags >> 8) & U8_MASK) as u8,
((self.flags >> 16) & U8_MASK) as u8,
((self.flags >> 24) & U8_MASK) as u8,
(self.lo & U8_MASK) as u8,
((self.lo >> 8) & U8_MASK) as u8,
((self.lo >> 16) & U8_MASK) as u8,
((self.lo >> 24) & U8_MASK) as u8,
(self.mid & U8_MASK) as u8,
((self.mid >> 8) & U8_MASK) as u8,
((self.mid >> 16) & U8_MASK) as u8,
((self.mid >> 24) & U8_MASK) as u8,
(self.hi & U8_MASK) as u8,
((self.hi >> 8) & U8_MASK) as u8,
((self.hi >> 16) & U8_MASK) as u8,
((self.hi >> 24) & U8_MASK) as u8,
]
}
/// Deserializes the given bytes into a decimal number.
/// The deserialized byte representation must be 16 bytes and adhere to the followign convention:
///
/// * Bytes 1-4: flags
/// * Bytes 5-8: lo portion of `m`
/// * Bytes 9-12: mid portion of `m`
/// * Bytes 13-16: high portion of `m`
pub fn deserialize(bytes: [u8; 16]) -> Decimal {
Decimal {
flags: (bytes[0] as i32) | (bytes[1] as i32) << 8 | (bytes[2] as i32) << 16 | (bytes[3] as i32) << 24,
lo: (bytes[4] as i32) | (bytes[5] as i32) << 8 | (bytes[6] as i32) << 16 | (bytes[7] as i32) << 24,
mid: (bytes[8] as i32) | (bytes[9] as i32) << 8 | (bytes[10] as i32) << 16 | (bytes[11] as i32) << 24,
hi: (bytes[12] as i32) | (bytes[13] as i32) << 8 | (bytes[14] as i32) << 16 | (bytes[15] as i32) << 24,
}
}
/// Returns `true` if the decimal is negative.
pub fn is_negative(&self) -> bool {
self.flags < 0
}
/// Returns `true` if the decimal is positive.
pub fn is_positive(&self) -> bool {
self.flags >= 0
}
/// Returns the minimum possible number that `Decimal` can represent.
pub fn min_value() -> Decimal {
*MIN
}
/// Returns the maximum possible number that `Decimal` can represent.
pub fn max_value() -> Decimal {
*MAX
}
/// Returns a new `Decimal` number with no fractional portion (i.e. an integer).
/// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8
pub fn round(&self) -> Decimal {
self.round_dp(0)
}
/// Returns a new `Decimal` number with the specified number of decimal points for fractional portion.
/// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8
///
/// # Arguments
/// * `dp`: the number of decimal points to round to.
pub fn round_dp(&self, dp: u32) -> Decimal {
let old_scale = self.scale();
if dp < old_scale && dp < 20 {
// Technically, it's 28...
// Short circuit for zero
if self.is_zero() {
return self.rescale(dp);
}
// Check to see if we need to add or subtract one.
// Some expected results assuming dp = 2 and old_scale = 3:
// 1.235 = 1.24
// 1.2361 = 1.24
// 1.2250 = 1.22
// 1.2251 = 1.23
// If we consider this example, we have the following number in `low`:
// 1235 (scale 3)
// 12361
// 12250
// 12251
let index = dp as usize;
let power10 = if dp < 10 {
Decimal::from_u32(POWERS_10[index]).unwrap()
} else {
Decimal::from_u64(BIG_POWERS_10[index - 10]).unwrap()
};
//println!("{} * {}", self.to_string(), power10.to_string());
let mut value = self.mul(power10);
// Do some midpoint rounding checks
// We're actually doing two things here.
// 1. Figuring out midpoint rounding when we're right on the boundary. e.g. 2.50000
// 2. Figuring out whether to add one or not e.g. 2.51
// We only need to search back a certain number. e.g. 2.500, round(2) search 1.
let raw = self.to_biguint();
// Get the decimal portion
// e.g. 2.5001, round(2) decimal portion = 01
let offset = self.rescale(dp).rescale(old_scale).to_biguint();
//println!("Raw: {}, Offset: {}", raw.to_string(), offset.to_string());
let decimal_portion = raw - offset;
// Rescale to zero so it's easier to work with
value = value.rescale(0u32);
// If the decimal_portion is zero then we round based on the other data
let mut cap = BigUint::from_u32(5u32).unwrap();
for _ in 0..(old_scale - dp - 1) {
cap = cap.mul(BigUint::from_u32(10u32).unwrap());
}
//println!("Cap {} Decimal Portion {}", cap, decimal_portion);
if decimal_portion == cap {
let even_or_odd = value.rem(Decimal::from_u32(2u32).unwrap());
if !even_or_odd.is_zero() {
value = value.add(Decimal::one());
}
} else if decimal_portion > cap {
// Doesn't matter about the decimal portion
if self.is_negative() {
value = value.sub(Decimal::one());
} else {
//println!("Decimal is greater than cap {} > {}", decimal_portion, cap);
value = value.add(Decimal::one());
}
}
// Divide by the power to get back
value.div(power10)
} else {
*self
}
}
pub(crate) fn rescale(&self, exp: u32) -> Decimal {
if exp > MAX_PRECISION {
panic!("Cannot have an exponent greater than {}", MAX_PRECISION);
}
let diff = exp as i32 - self.scale() as i32;
if diff == 0 {
// Since it's a copy type we can just return the self
return *self;
}
// 1.23 is scale 2. If we're making it 1.2300 scale 4
// Raw bit manipulation is hard (going up is easy, going down is hard)
// Let's just use BigUint to help out
let unsigned = self.to_biguint();
let result: BigUint;
// Figure out whether to multiply or divide
let power = Decimal::power_10(diff.abs() as usize);
if diff > 0 {
result = unsigned * power;
} else {
result = unsigned / power;
}
// Convert it back
let bytes = result.to_bytes_le();
Decimal::from_bytes_le(bytes, exp, self.is_negative())
}
fn power_10(exponent: usize) -> BigUint {
if exponent < 10 {
BigUint::from_u32(POWERS_10[exponent]).unwrap()
} else if exponent < 20 {
BigUint::from_u64(BIG_POWERS_10[exponent - 10]).unwrap()
} else {
let u32_exponent = exponent - 19; // -20 + 1 for getting the right u32 index
BigUint::from_u64(BIG_POWERS_10[9]).unwrap() *
BigUint::from_u32(POWERS_10[u32_exponent]).unwrap()
}
}
//
// These do not address scale. If you want that, rescale to 0 first.
//
pub(crate) fn to_biguint(&self) -> BigUint {
let bytes = self.unsigned_bytes_le();
BigUint::from_bytes_le(&bytes[..])
}
fn to_bigint(&self) -> BigInt {
let bytes = self.unsigned_bytes_le();
let sign = if self.is_negative() { Minus } else { Plus };
BigInt::from_bytes_le(sign, &bytes[..])
}
pub(crate) fn from_biguint(res: BigUint, scale: u32, negative: bool) -> Result<Decimal, Error> {
let bytes = res.to_bytes_le();
if bytes.len() > MAX_BYTES {
return Err(Error::new("Decimal Overflow"));
}
if scale > MAX_PRECISION {
return Err(Error::new("Scale exceeds maximum precision"));
}
Ok(Decimal::from_bytes_le(bytes, scale, negative))
}
fn unsigned_bytes_le(&self) -> Vec<u8> {
return vec![
(self.lo & U8_MASK) as u8,
((self.lo >> 8) & U8_MASK) as u8,
((self.lo >> 16) & U8_MASK) as u8,
((self.lo >> 24) & U8_MASK) as u8,
(self.mid & U8_MASK) as u8,
((self.mid >> 8) & U8_MASK) as u8,
((self.mid >> 16) & U8_MASK) as u8,
((self.mid >> 24) & U8_MASK) as u8,
(self.hi & U8_MASK) as u8,
((self.hi >> 8) & U8_MASK) as u8,
((self.hi >> 16) & U8_MASK) as u8,
((self.hi >> 24) & U8_MASK) as u8,
];
}
fn from_bytes_le(bytes: Vec<u8>, scale: u32, negative: bool) -> Decimal {
// Finally build the flags
let mut flags = 0i32;
let mut lo = 0i32;
let mut mid = 0i32;
let mut hi = 0i32;
if scale > 0 {
flags = (scale as i32) << SCALE_SHIFT;
}
if negative {
flags |= SIGN_MASK;
}
if bytes.len() > MAX_BYTES {
panic!("Decimal Overflow");
}
let mut pos = 0;
for b in bytes {
if pos < 4 {
lo |= (b as i32) << (pos * 8);
} else if pos < 8 {
mid |= (b as i32) << ((pos - 4) * 8);
} else {
hi |= (b as i32) << ((pos - 8) * 8);
}
// Move position
pos += 1;
}
// Build up each hi/lo
Decimal {
flags: flags,
hi: hi,
lo: lo,
mid: mid,
}
}
}
macro_rules! impl_from {
($T:ty, $from_ty:path) => {
impl From<$T> for Decimal {
#[inline]
fn from(t: $T) -> Decimal {
$from_ty(t).unwrap()
}
}
}
}
impl_from!(isize, FromPrimitive::from_isize);
impl_from!(i8, FromPrimitive::from_i8);
impl_from!(i16, FromPrimitive::from_i16);
impl_from!(i32, FromPrimitive::from_i32);
impl_from!(i64, FromPrimitive::from_i64);
impl_from!(usize, FromPrimitive::from_usize);
impl_from!(u8, FromPrimitive::from_u8);
impl_from!(u16, FromPrimitive::from_u16);
impl_from!(u32, FromPrimitive::from_u32);
impl_from!(u64, FromPrimitive::from_u64);
macro_rules! forward_val_val_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
impl $imp<$res> for $res {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
(&self).$method(&other)
}
}
}
}
macro_rules! forward_ref_val_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
impl<'a> $imp<$res> for &'a $res {
type Output = $res;
#[inline]
fn $method(self, other: $res) -> $res {
self.$method(&other)
}
}
}
}
macro_rules! forward_val_ref_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
impl<'a> $imp<&'a $res> for $res {
type Output = $res;
#[inline]
fn $method(self, other: &$res) -> $res {
(&self).$method(other)
}
}
}
}
macro_rules! forward_all_binop {
(impl $imp:ident for $res:ty, $method:ident) => {
forward_val_val_binop!(impl $imp for $res, $method);
forward_ref_val_binop!(impl $imp for $res, $method);
forward_val_ref_binop!(impl $imp for $res, $method);
};
}
impl Zero for Decimal {
fn is_zero(&self) -> bool {
self.lo.is_zero() && self.mid.is_zero() && self.hi.is_zero()
}
fn zero() -> Decimal {
Decimal {
flags: 0,
hi: 0,
lo: 0,
mid: 0,
}
}
}
impl One for Decimal {
fn one() -> Decimal {
Decimal {
flags: 0,
hi: 0,
lo: 1,
mid: 0,
}
}
}
impl FromStr for Decimal {
type Err = Error;
fn from_str(value: &str) -> Result<Decimal, Self::Err> {
if value.is_empty() {
return Err(Error::new("Invalid decimal: empty"));
}
let mut offset = 0;
let mut len = value.len();
let chars: Vec<char> = value.chars().collect();
let mut negative = false; // assume positive
// handle the sign
if chars[offset] == '-' {
negative = true; // leading minus means negative
offset += 1;
len -= 1;
} else if chars[offset] == '+' {
// leading + allowed
offset += 1;
len -= 1;
}
// should now be at numeric part of the significand
let mut dot_offset: i32 = -1; // '.' offset, -1 if none
let cfirst = offset; // record start of integer
let mut coeff = String::new(); // integer significand array
while len > 0 {
let c = chars[offset];
if c.is_digit(10) {
coeff.push(c);
offset += 1;
len -= 1;
continue;
}
if c == '.' {
if dot_offset >= 0 {
return Err(Error::new("Invalid decimal: two decimal points"));
}
dot_offset = offset as i32;
offset += 1;
len -= 1;
continue;
}
return Err(Error::new("Invalid decimal: unknown character"));
}
// here when no characters left
if coeff.is_empty() {
return Err(Error::new("Invalid decimal: no digits found"));
}
// println!("coeff.len() {}, dot_offset {} cfirst {} negative {}", coeff.len(), dot_offset, cfirst, negative);
let mut scale = 0u32;
if dot_offset >= 0 {
// we had a decimal place so set the scale
scale = (coeff.len() as u32) - (dot_offset as u32 - cfirst as u32);
}
// Parse this into a big uint
let res = BigUint::from_str(&coeff[..]);
if res.is_err() {
return Err(Error::new("Failed to parse string"));
}
Decimal::from_biguint(res.unwrap(), scale, negative)
}
}
impl FromPrimitive for Decimal {
fn from_i32(n: i32) -> Option<Decimal> {
let flags: i32;
let value_copy: i32;
if n >= 0 {
flags = 0;
value_copy = n;
} else {
flags = SIGN_MASK;
value_copy = -n;
}
Some(Decimal {
flags: flags,
lo: value_copy,
mid: 0,
hi: 0,
})
}
fn from_i64(n: i64) -> Option<Decimal> {
let flags: i32;
let value_copy: i64;
if n >= 0 {
flags = 0;
value_copy = n;
} else {
flags = SIGN_MASK;
value_copy = -n;
}
Some(Decimal {
flags: flags,
lo: value_copy as i32,
mid: (value_copy >> 32) as i32,
hi: 0,
})
}
fn from_u32(n: u32) -> Option<Decimal> {
Some(Decimal {
flags: 0,
lo: n as i32,
mid: 0,
hi: 0,
})
}
fn from_u64(n: u64) -> Option<Decimal> {
Some(Decimal {
flags: 0,
lo: n as i32,
mid: (n >> 32) as i32,
hi: 0,
})
}
}
impl ToPrimitive for Decimal {
fn to_f64(&self) -> Option<f64> {
if self.scale() == 0 {
let bytes = self.unsigned_bytes_le();
let sign;
if self.is_negative() {
sign = Minus;
} else {
sign = Plus;
}
BigInt::from_bytes_le(sign, &bytes[..]).to_f64()
} else {
match self.to_string().parse::<f64>() {
Ok(s) => Some(s),
Err(_) => None
}
}
}
fn to_i64(&self) -> Option<i64> {
let d = self.rescale(0);
// Convert to biguint and use that
let bytes = d.unsigned_bytes_le();
let sign;
if self.is_negative() {
sign = Minus;
} else {
sign = Plus;
}
BigInt::from_bytes_le(sign, &bytes[..]).to_i64()
}
fn to_u64(&self) -> Option<u64> {
if self.is_negative() {
return None;
}
// Rescale to 0 (truncate)
let d = self.rescale(0);
if d.hi != 0 {
// Overflow
return None;
}
// Convert to biguint and use that
let bytes = d.unsigned_bytes_le();
BigUint::from_bytes_le(&bytes[..]).to_u64()
}
}
impl fmt::Display for Decimal {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
// Get the scale - where we need to put the decimal point
let mut scale = self.scale() as usize;
// Get the whole number without decimal points (or signs)
let uint = self.to_biguint();
// Convert to a string and manipulate that (neg at front, inject decimal)
let mut rep = uint.to_string();
let len = rep.len();
if let Some(n_dp) = f.precision() {
if n_dp < scale {
rep.truncate(len - scale + n_dp)
} else {
let zeros = repeat("0").take(n_dp - scale).collect::<String>();
rep.push_str(&zeros[..]);
}
scale = n_dp;
}
let len = rep.len();
// Inject the decimal point
if scale > 0 {
// Must be a low fractional
if scale > len {
let mut new_rep = String::new();
let zeros = repeat("0").take(scale as usize - len).collect::<String>();
new_rep.push_str("0.");
new_rep.push_str(&zeros[..]);
new_rep.push_str(&rep[..]);
rep = new_rep;
} else if scale == len {
rep.insert(0, '.');
rep.insert(0, '0');
} else {
rep.insert(len - scale as usize, '.');
}
} else if rep.is_empty() {
// corner case for when we truncated everything in a low fractional
rep.insert(0, '0');
}
f.pad_integral(self.is_positive(), "", &rep)
}
}
fn scaled_biguints(me: &Decimal, other: &Decimal) -> (BigUint, BigUint, u32) {
// Scale to the max
let s_scale = me.scale();
let o_scale = other.scale();
if s_scale > o_scale {
(
me.to_biguint(),
other.rescale(s_scale).to_biguint(),
s_scale,
)
} else if o_scale > s_scale {
(
me.rescale(o_scale).to_biguint(),
other.to_biguint(),
o_scale,
)
} else {
(me.to_biguint(), other.to_biguint(), s_scale)
}
}
fn scaled_bigints(me: &Decimal, other: &Decimal) -> (BigInt, BigInt, u32) {
// Scale to the max
let s_scale = me.scale();
let o_scale = other.scale();
if s_scale > o_scale {
(me.to_bigint(), other.rescale(s_scale).to_bigint(), s_scale)
} else if o_scale > s_scale {
(me.rescale(o_scale).to_bigint(), other.to_bigint(), o_scale)
} else {
(me.to_bigint(), other.to_bigint(), s_scale)
}
}
forward_all_binop!(impl Add for Decimal, add);
impl<'a, 'b> Add<&'b Decimal> for &'a Decimal {
type Output = Decimal;
#[inline]
fn add(self, other: &Decimal) -> Decimal {
// Get big uints to work with
let (left, right, scale) = scaled_biguints(self, other);
// Now we have the big boys - do a quick add
// println!("Left {} Right {}", left, right);
let l_negative = self.is_negative();
let r_negative = other.is_negative();
let result;
let is_negative;
if l_negative && r_negative {
result = left + right;
is_negative = true;
} else if !l_negative && !r_negative {
result = left + right;
is_negative = false;
} else {
// 1 + -2 (l < r, -r => r - l, -)
// 2 + -1 (l > r, -r => l - r, +)
// -1 + 2 (l < r, -l => r - l, +)
// -2 + 1 (l > r, -l => l - r, -)
if r_negative {
if left < right {
result = right - left;
is_negative = true;
} else if left > right {
result = left - right;
is_negative = false;
} else {
result = BigUint::zero();
is_negative = false;
}
} else {
// l_negative
if left < right {
result = right - left;
is_negative = false;
} else if left > right {
result = left - right;
is_negative = true;
} else {
result = BigUint::zero();
is_negative = false;
}
}
}
// Convert it back
let bytes = result.to_bytes_le();
Decimal::from_bytes_le(bytes, scale, is_negative)
}
}
forward_all_binop!(impl Sub for Decimal, sub);
impl<'a, 'b> Sub<&'b Decimal> for &'a Decimal {
type Output = Decimal;
#[inline]
fn sub(self, other: &Decimal) -> Decimal {
// Get big uints to work with
let (left, right, scale) = scaled_biguints(self, other);
// Now we have the big boys - do a quick subtraction
// Both Positive:
// 1 - 2 = -1
// 2 - 1 = 1
// Both negative:
// -1 - -2 = 1
// -2 - -1 = -1
// Mismatch
// -1 - 2 = -3
// -2 - 1 = -3
// 1 - -2 = 3
// 2 - -1 = 3
let l_negative = self.is_negative();
let r_negative = other.is_negative();
let result: BigUint;
let is_negative: bool;
if l_negative ^ r_negative {
result = left + right;
is_negative = l_negative;
} else {
if left > right {
result = left - right;
is_negative = l_negative && r_negative;
} else {
result = right - left;
is_negative = !l_negative && !r_negative;
}
}
// Convert it back
let bytes = result.to_bytes_le();
Decimal::from_bytes_le(bytes, scale, is_negative && !result.is_zero())
}
}
forward_all_binop!(impl Mul for Decimal, mul);
impl<'a, 'b> Mul<&'b Decimal> for &'a Decimal {
type Output = Decimal;
#[inline]
fn mul(self, other: &Decimal) -> Decimal {
// Get big uints to work with
let left = self.to_biguint();
let right = other.to_biguint();
// Easy!
let mut result = left * right; // Has the potential to overflow below if > 2^96
let mut scale = self.scale() + other.scale();
//println!("Result: {}, Scale: {}", result, scale);
//println!("Self Scale: {}, Other Scale: {}", self.scale(), other.scale());
// The result may be an overflow of what we can comfortably represent in 96 bits
// We can only do this if we have a scale to work with
if result.bits() > MAX_BITS {
// Try to truncate until we're ok
let ten = BigUint::from_i32(10).unwrap();
while scale > 0 && result.bits() > 96 {
result = result / &ten;
scale -= 1;
//println!("result: {} new scale: {}", result, scale);
}
}
// Last check for overflow
if result.bits() > MAX_BITS {
panic!("Decimal overflow from multiplication");
}
if scale > MAX_PRECISION {
// Then what? Truncate?
panic!("Scale overflow; cannot represent exp {}", scale);
}
// Negativity is based on xor. e.g.
// 1 * 2 = 2
// -1 * 2 = -2
// 1 * -2 = -2
// -1 * -2 = 2
let bytes = result.to_bytes_le();
Decimal::from_bytes_le(bytes, scale, self.is_negative() ^ other.is_negative())
}
}
forward_all_binop!(impl Div for Decimal, div);
impl<'a, 'b> Div<&'b Decimal> for &'a Decimal {
type Output = Decimal;
#[inline]
fn div(self, other: &Decimal) -> Decimal {
if other.is_zero() {
panic!("Division by zero");
}
// Shortcircuit the basic cases
if self.is_zero() {
return Decimal::zero();
}
let mut rem: BigUint;
let ten = BigUint::from_i32(10).unwrap();
let mut fractional: Vec<u8> = Vec::new();
// Get the values
let (left, right, _) = scaled_biguints(self, other);
// The algorithm for this is:
// (integral, rem) = div_rem(x, y)
// while rem > 0 {
// (part, rem) = div_rem(rem * 10, y)
// fractional_part.push(part)
// }
// This could be a really big number.
// Consider 9,999,999,999,999/10,000,000,000,000
// This would be (0, 9,999,999,999,999)
let (i, r) = left.div_rem(&right);
let mut integral = i;
let length = if integral.is_zero() {
0usize
} else {
integral.to_string().len()
};
rem = r;
// This is slightly too agressive. But it is just being safe. We need to check against Decimal::MAX
while !rem.is_zero() && fractional.len() + length < MAX_PRECISION as usize {
let rem_carried = &ten * rem;
let (frac, r) = rem_carried.div_rem(&right);
fractional.push(frac.to_u8().unwrap());
rem = r;
}
// Add on the fractional part
let scale = fractional.len();
for f in fractional {
integral = integral * &ten + BigUint::from_u8(f).unwrap();
}
let bytes = integral.to_bytes_le();
// Negative only if one or the other is negative
Decimal::from_bytes_le(
bytes,
scale as u32,
self.is_negative() ^ other.is_negative(),
)
}
}
forward_all_binop!(impl Rem for Decimal, rem);
impl<'a, 'b> Rem<&'b Decimal> for &'a Decimal {
type Output = Decimal;
#[inline]
fn rem(self, other: &Decimal) -> Decimal {
if other.is_zero() {
panic!("Division by zero");
}
// Shortcircuit the basic case
if self.is_zero() {
return Decimal::zero();
}
// Make sure they're scaled
let (left, right, scale) = scaled_bigints(self, other);
//println!("{}, {}", left, right);
// Since we're just getting the remainder, we simply need to do a standard mod
let (_, remainder) = left.div_rem(&right);
// Remainder is always positive?
let (sign, bytes) = remainder.to_bytes_le();
Decimal::from_bytes_le(bytes, scale, sign == Minus)
}
}
impl PartialEq for Decimal {
#[inline]
fn eq(&self, other: &Decimal) -> bool {
self.cmp(other) == Equal
}
}
impl Eq for Decimal {}
impl PartialOrd for Decimal {
#[inline]
fn partial_cmp(&self, other: &Decimal) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Decimal {
fn cmp(&self, other: &Decimal) -> Ordering {
// Quick exit if major differences
if self.is_negative() && !other.is_negative() {
return Ordering::Less;
} else if !self.is_negative() && other.is_negative() {
return Ordering::Greater;
}
// If we have 1.23 and 1.2345 then we have
// 123 scale 2 and 12345 scale 4
// We need to convert the first to
// 12300 scale 4 so we can compare equally
let s = self.scale() as u32;
let o = other.scale() as u32;
let si = self.to_bigint();
let oi = other.to_bigint();
if s > o {
let power = Decimal::power_10((s - o) as usize).to_bigint().unwrap();
let other_scaled = oi * power;
si.cmp(&other_scaled)
} else if s < o {
let power = Decimal::power_10((o - s) as usize).to_bigint().unwrap();
let self_scaled = si * power;
self_scaled.cmp(&oi)
} else {
si.cmp(&oi)
}
}
}
#[cfg(test)]
mod test {
// Tests on private methods.
//
// All public tests should go under `tests/`.
use super::*;
#[test]
fn rescale_integer_up() {
for scale in 1..25 {
let d = "1".parse::<Decimal>().unwrap().rescale(scale);
let mut s = String::from("1.");
for _ in 0..scale {
s.push('0');
}
assert_eq!(d.to_string(), s);
}
}
#[test]
fn rescale_integer_down() {
for scale in 1..25 {
let d = "1.000000000000000000000000"
.parse::<Decimal>()
.unwrap()
.rescale(scale);
let mut s = String::from("1.");
for _ in 0..scale {
s.push('0');
}
assert_eq!(d.to_string(), s);
}
}
#[test]
fn rescale_float_up() {
for scale in 1..25 {
let d = "1.1".parse::<Decimal>().unwrap().rescale(scale);
let mut s = String::from("1.1");
for _ in 0..(scale - 1) {
s.push('0');
}
assert_eq!(d.to_string(), s);
}
}
#[test]
fn rescale_float_down() {
for scale in 1..24 {
let d = "1.000000000000000000000001"
.parse::<Decimal>()
.unwrap()
.rescale(scale);
let mut s = String::from("1.");
for _ in 0..(scale) {
s.push('0');
}
assert_eq!(d.to_string(), s);
}
}
#[test]
fn round_complex_number() {
// This is 1982.2708333333333
let a = Decimal {
flags: 1572864,
hi: 107459117,
lo: -2075830955,
mid: 849254895,
};
let b = a.round_dp(2u32);
assert_eq!("1982.27", b.to_string());
}
}
|
// Copyright 2017 Adam Greig
// Licensed under the MIT license, see LICENSE for details.
//! This module provides decoding functions for turning codewords into data.
//!
//! Please refer to the `decode_ms` and `decode_bf` methods on
//! [`LDPCCode`](../codes/enum.LDPCCode.html) for more details.
use core::i8;
use core::i16;
use core::i32;
use core::f32;
use core::f64;
use core::ops::{Add,AddAssign,Neg,Sub};
use ::codes::LDPCCode;
// Ugh gross yuck.
//
// No `f32::abs()` available with `no_std`, and it's not worth bringing in some
// dependency just to get it. This is however used right in the hottest decoder
// loop and it's so much faster than the obvious `if f < 0 { -f } else { f }`.
fn fabs(f: f64) -> f64 {
unsafe {
let x: u64 = *((&f as *const f64) as *const u64) & 0x7FFFFFFFFFFFFFFF;
*((&x as *const u64) as *const f64)
}
}
fn fabsf(f: f32) -> f32 {
unsafe {
let x: u32 = *((&f as *const f32) as *const u32) & 0x7FFFFFFF;
*((&x as *const u32) as *const f32)
}
}
/// Trait for types that the min-sum decoder can operate with.
///
/// Implemented for `i8`, `i16`, `i32`, `f32`, and `f64`.
pub trait DecodeFrom:
Sized + Clone + Copy + PartialEq + PartialOrd
+ Add + AddAssign + Neg<Output=Self> + Sub<Output=Self>
{
/// 1 in T
fn one() -> Self;
/// 0 in T
fn zero() -> Self;
/// Maximum value T can represent
fn maxval() -> Self;
/// Absolute value of self
fn abs(&self) -> Self;
}
impl DecodeFrom for i8 {
#[inline] fn one() -> i8 { 1 }
#[inline] fn zero() -> i8 { 0 }
#[inline] fn maxval() -> i8 { i8::MAX }
#[inline] fn abs(&self) -> i8 { i8::abs(*self) }
}
impl DecodeFrom for i16 {
#[inline] fn one() -> i16 { 1 }
#[inline] fn zero() -> i16 { 0 }
#[inline] fn maxval() -> i16 { i16::MAX }
#[inline] fn abs(&self) -> i16 { i16::abs(*self) }
}
impl DecodeFrom for i32 {
#[inline] fn one() -> i32 { 1 }
#[inline] fn zero() -> i32 { 0 }
#[inline] fn maxval() -> i32 { i32::MAX }
#[inline] fn abs(&self) -> i32 { i32::abs(*self) }
}
impl DecodeFrom for f32 {
#[inline] fn one() -> f32 { 1.0 }
#[inline] fn zero() -> f32 { 0.0 }
#[inline] fn maxval() -> f32 { f32::MAX }
#[inline] fn abs(&self) -> f32 { fabsf(*self) }
}
impl DecodeFrom for f64 {
#[inline] fn one() -> f64 { 1.0 }
#[inline] fn zero() -> f64 { 0.0 }
#[inline] fn maxval() -> f64 { f64::MAX }
#[inline] fn abs(&self) -> f64 { fabs(*self) }
}
impl LDPCCode {
/// Get the length of [u8] required for the working area of `decode_bf`.
///
/// Equal to n + punctured_bits.
pub fn decode_bf_working_len(&self) -> usize {
self.n() + self.punctured_bits()
}
/// Get the length of [T] required for the working area of `decode_ms`.
///
/// Equal to 2 * paritycheck_sum + 3n + 3p - 2k.
pub fn decode_ms_working_len(&self) -> usize {
(2 * self.paritycheck_sum() as usize + 3*self.n() + 3*self.punctured_bits() - 2*self.k())
}
/// Get the length of [u8] required for the working_u8 area of `decode_ms`.
///
/// Equal to (n+p-k)/8.
pub fn decode_ms_working_u8_len(&self) -> usize {
(self.n() + self.punctured_bits() - self.k()) / 8
}
/// Get the length of [u8] required for the output of any decoder.
///
/// Equal to (n+punctured_bits)/8.
pub fn output_len(&self) -> usize {
(self.n() + self.punctured_bits()) / 8
}
/// Hard erasure decoding algorithm.
///
/// Used to preprocess punctured codes before attempting bit-flipping decoding,
/// as the bit-flipping algorithm cannot handle erasures.
///
/// The basic idea is:
///
/// * For each erased bit `a`:
/// * For each check `i` that `a` is associated with:
/// * If `a` is the only erasure that `i` is associated with,
/// then compute the parity of `i`, and cast a vote for the
/// value of `a` that would give even parity
/// * Otherwise ignore `i`
/// * If there is a majority vote, set `a` to the winning value, and mark
/// it not longer erased. Otherwise, leave it erased.
///
/// This is based on the paper:
/// Novel multi-Gbps bit-flipping decoders for punctured LDPC codes,
/// by Archonta, Kanistras, and Paliouras, MOCAST 2016.
///
/// * `codeword` must be (n+p)/8 long (`self.output_len()`), with the first n/8 bytes already
/// set to the received hard information, and the punctured bits at the end will be updated.
/// * `working` must be (n+p) bytes long (`self.decode_bf_working_len()`).
///
/// Returns `(success, number of iterations run)`. Note that `success` false only indicates
/// that not every punctured bit was correctly recovered; many may have been successful.
//fn decode_erasures(&self, _codeword: &mut [u8], _working: &mut [u8], maxiters: usize) -> (bool, usize)
//{
/*
assert_eq!(codeword.len(), self.output_len());
assert_eq!(working.len(), self.decode_bf_working_len());
let n = self.n();
let p = self.punctured_bits();
// Rename working area
let erasures = working;
// Initialise erasures
for e in &mut erasures[..n] { *e = 0; }
for e in &mut erasures[n..] { *e = 1; }
// Initialise punctured part of output
for c in &mut codeword[n/8..] { *c = 0; }
// Track how many bits we've fixed
let mut bits_fixed = 0;
for iter in 0..maxiters {
// For each punctured bit
for a in n..n+p {
// Skip bits we have since fixed
if erasures[a] == 0 {
continue;
}
// Track votes for 0 (negative) or 1 (positive)
let mut votes = 0;
// For each check this bit is associated with
for i in &vi[vs[a] as usize .. vs[a+1] as usize] {
let i = *i as usize;
let mut parity = 0;
// See what the check parity is, and quit without voting if this check has
// any other erasures
let mut only_one_erasure = true;
for b in &ci[cs[i] as usize .. cs[i+1] as usize] {
let b = *b as usize;
// Skip if this is the bit we're currently considering
if a == b {
continue;
}
// If we see another erasure, stop
if erasures[b] == 1 {
only_one_erasure = false;
break;
}
// Otherwise add up parity for this check
parity += (codeword[b/8] >> (7-(b%8))) & 1;
}
// Cast a vote if we didn't see any other erasures
if only_one_erasure {
votes += if parity & 1 == 1 { 1 } else { -1 };
}
}
// If we have a majority vote one way or the other, great!
// Set ourselves to the majority vote value and clear our erasure status.
if votes != 0 {
erasures[a] = 0;
bits_fixed += 1;
if votes > 0 {
codeword[a/8] |= 1<<(7-(a%8));
} else {
codeword[a/8] &= !(1<<(7-(a%8)));
}
}
if bits_fixed == p {
return (true, iter);
}
}
}
*/
// If we got this far we have not succeeded
//(false, maxiters)
//}
/// Bit flipping decoder.
///
/// This algorithm is quick but only operates on hard information and consequently leaves a
/// lot of error-correcting capability behind. It is around 1-2dB worse than the min-sum
/// decoder. However, it requires much less memory and is a lot quicker.
///
/// Requires:
///
/// * `input` must be `n/8` long, where each bit is the received hard information
/// * `output` must be `(n+p)/8` (=`self.output_len()`) bytes long and is written with the
/// decoded codeword, so the user data is present in the first `k/8` bytes.
/// * `working` must be `n+p` (=`self.decode_bf_working_len()`) bytes long.
///
/// Runs for at most `maxiters` iterations, including attempting to fix punctured erasures on
/// applicable codes.
///
/// Returns `(decoding success, iters)`. For punctured codes, `iters` includes iterations
/// of the erasure decoding algorithm which is run first.
///
/// ## Panics
/// * `input.len()` must be exactly `self.n()/8`
/// * `output.len()` must be exactly `self.output_len()`.
/// * `working.len()` must be exactly `self.decode_bf_working_len()`.
pub fn decode_bf(&self, input: &[u8], output: &mut [u8],
working: &mut [u8], maxiters: usize)
-> (bool, usize)
{
assert_eq!(input.len(), self.n()/8, "input.len() != n/8");
assert_eq!(output.len(), self.output_len(), "output.len != (n+p)/8");
assert_eq!(working.len(), self.decode_bf_working_len(), "working.len() incorrect");
output[..self.n()/8].copy_from_slice(input);
// Working area: we use the top bit of the first k bytes to store that parity check,
// and the remaining 7 bits of the first n+p bytes to store violation count for that var.
for iter in 0..maxiters {
// Zero out violation counts
for v in &mut working[..] { *v = 0 }
// Calculate the parity of each parity check
for (check, var) in self.iter_paritychecks() {
if output[var/8] >> (7-(var%8)) & 1 == 1 {
working[check] ^= 0x80;
}
}
// Count how many parity violations each variable is associated with
let mut max_violations = 0;
for (check, var) in self.iter_paritychecks() {
if working[check] & 0x80 == 0x80 {
// Unless we have more than 127 checks for a single variable, this
// can't overflow into the parity bit. And we don't have that.
working[var] += 1;
if working[var] & 0x7F > max_violations {
max_violations = working[var] & 0x7F;
}
}
}
if max_violations == 0 {
return (true, iter);
} else {
// Flip all the bits that have the maximum number of violations
for (var, violations) in working.iter().enumerate() {
if *violations & 0x7F == max_violations {
output[var/8] ^= 1<<(7-(var%8));
}
}
}
}
(false, maxiters)
}
/// Message passing based min-sum decoder.
///
/// This algorithm is slower and requires more memory than the bit-flipping decode, but
/// operates on soft information and provides very close to optimal decoding. If you don't have
/// soft information, you can use `decode_hard_to_llrs` to go from hard information (bytes from
/// a receiver) to soft information (LLRs).
///
/// Requires:
///
/// * `llrs` must be `n` long, with positive numbers more likely to be a 0 bit.
/// * `output` must be allocated to (n+p)/8 bytes, of which the first k/8 bytes will be set
/// to the decoded message (and the rest to the parity bits of the complete codeword)
/// * `working` is the main working area which must be provided and must have
/// `decode_ms_working_len` elements, equal to
/// 2*paritycheck_sum + 3n + 3*punctured_bits - 2k
/// * `working_u8` is te secondary working area which must be provided and must have
/// `decode_ms_working_u8_len` elements, equal to (n+p-k)/8.
///
/// Will run for at most `maxiters` iterations.
///
/// Returns decoding success and the number of iterations run for.
pub fn decode_ms<T: DecodeFrom>(&self, llrs: &[T], output: &mut [u8],
working: &mut [T], working_u8: &mut [u8],
maxiters: usize)
-> (bool, usize)
{
let n = self.n();
let k = self.k();
let p = self.punctured_bits();
assert_eq!(llrs.len(), n, "llrs.len() != n");
assert_eq!(output.len(), self.output_len(), "output.len() != (n+p)/8");
assert_eq!(working.len(), self.decode_ms_working_len(), "working.len() incorrect");
assert_eq!(working_u8.len(), self.decode_ms_working_u8_len(), "working_u8 != (n+p-k)/8");
// Rename output to parities as we'll use it to keep track of the parity bits until the end
let parities = output;
// Rename working_u8 to ui_sgns, we'll use it to accumulate signs for each check
let ui_sgns = working_u8;
// Zero the working area and split it up
for w in &mut working[..] { *w = T::zero() }
let (u, working) = working.split_at_mut(self.paritycheck_sum() as usize);
let (v, working) = working.split_at_mut(self.paritycheck_sum() as usize);
let (va, working) = working.split_at_mut(n + p);
let (ui_min1, ui_min2) = working.split_at_mut(n + p - k);
for iter in 0..maxiters {
// Initialise the marginals to the input LLRs (and to 0 for punctured bits)
va[..llrs.len()].copy_from_slice(llrs);
for x in &mut va[llrs.len()..] { *x = T::zero() }
for (idx, (check, var)) in self.iter_paritychecks().enumerate() {
// Work out messages to this variable
if v[idx].abs() == ui_min1[check] {
u[idx] = ui_min2[check];
} else {
u[idx] = ui_min1[check];
}
if ui_sgns[check/8] >> (check%8) & 1 == 1 {
u[idx] = -u[idx];
}
if v[idx] < T::zero() {
u[idx] = -u[idx];
}
// Accumulate incoming messages to each variable
va[var] += u[idx];
}
for x in &mut ui_min1[..] { *x = T::maxval() }
for x in &mut ui_min2[..] { *x = T::maxval() }
for x in &mut ui_sgns[..] { *x = 0 }
for x in &mut parities[..] { *x = 0 }
for (idx, (check, var)) in self.iter_paritychecks().enumerate() {
// Work out messages to this parity check
let new_v_ai = va[var] - u[idx];
if v[idx] != T::zero() && (new_v_ai >= T::zero()) != (v[idx] >= T::zero()) {
v[idx] = T::zero();
} else {
v[idx] = new_v_ai;
}
// Accumulate two minimums
if v[idx].abs() < ui_min1[check] {
ui_min2[check] = ui_min1[check];
ui_min1[check] = v[idx].abs();
} else if v[idx].abs() < ui_min2[check] {
ui_min2[check] = v[idx].abs();
}
// Accumulate signs
if v[idx] < T::zero() {
ui_sgns[check/8] ^= 1<<(check%8);
}
// Accumulate parity
if va[var] <= T::zero() {
parities[check/8] ^= 1<<(check%8);
}
}
// Check parities. If none are 1 then we have a valid codeword.
if *parities.iter().max().unwrap() == 0 {
// Hard decode marginals into the output
let output = parities;
for o in &mut output[..] { *o = 0 }
for var in 0..(n + p) {
if va[var] <= T::zero() {
output[var/8] |= 1 << (7 - (var%8));
}
}
return (true, iter);
}
}
// If we failed to find a codeword, at least hard decode the marginals into the output
let output = parities;
for o in &mut output[..] { *o = 0 }
for var in 0..(n + p) {
if va[var] <= T::zero() {
output[var/8] |= 1 << (7 - (var%8));
}
}
(false, maxiters)
}
/// Convert hard information into LLRs.
///
/// The min-sum decoding used in `decode_ms` is invariant to linear scaling
/// in LLR, so it doesn't matter which value is picked so long as the sign
/// is correct. This function just assigns -/+ 1 for 1/0 bits.
///
/// `input` must be n/8 long, `llrs` must be n long.
///
/// ## Panics
/// * `input.len()` must be exactly `self.n()/8`
/// * `llrs.len()` must be exactly `self.n()`
pub fn hard_to_llrs<T: DecodeFrom>(&self, input: &[u8], llrs: &mut [T]) {
assert_eq!(input.len(), self.n()/8, "input.len() != n/8");
assert_eq!(llrs.len(), self.n(), "llrs.len() != n");
let llr = -T::one();
for (idx, byte) in input.iter().enumerate() {
for i in 0..8 {
llrs[idx*8 + i] = if (byte >> (7-i)) & 1 == 1 { llr } else { -llr };
}
}
}
/// Convert LLRs into hard information.
///
/// `llrs` must be n long, `output` must be n/8 long.
///
/// ## Panics
/// * `input.len()` must be exactly `self.n()/8`
/// * `llrs.len()` must be exactly `self.n()`
pub fn llrs_to_hard<T: DecodeFrom>(&self, llrs: &[T], output: &mut [u8]) {
assert_eq!(llrs.len(), self.n(), "llrs.len() != n");
assert_eq!(output.len(), self.n()/8, "output.len() != n/8");
for o in &mut output[..] { *o = 0 }
for (i, llr) in llrs.iter().enumerate() {
if *llr < T::zero() {
output[i/8] |= 1 << (7 - (i%8));
}
}
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use ::codes::{LDPCCode, CodeParams,
TC128_PARAMS, TC256_PARAMS, TC512_PARAMS,
TM1280_PARAMS, TM1536_PARAMS, TM2048_PARAMS,
TM5120_PARAMS, TM6144_PARAMS, TM8192_PARAMS};
const CODES: [LDPCCode; 9] = [LDPCCode::TC128, LDPCCode::TC256, LDPCCode::TC512,
LDPCCode::TM1280, LDPCCode::TM1536, LDPCCode::TM2048,
LDPCCode::TM5120, LDPCCode::TM6144, LDPCCode::TM8192,
];
const PARAMS: [CodeParams; 9] = [TC128_PARAMS, TC256_PARAMS, TC512_PARAMS,
TM1280_PARAMS, TM1536_PARAMS, TM2048_PARAMS,
TM5120_PARAMS, TM6144_PARAMS, TM8192_PARAMS,
];
#[test]
fn test_decode_ms_working_len() {
// XXX
//for (code, param) in CODES.iter().zip(PARAMS.iter()) {
//assert_eq!(code.decode_ms_working_len(), param.decode_ms_working_len);
//assert_eq!(code.decode_ms_working_u8_len(), param.decode_ms_working_u8_len);
//}
}
#[test]
fn test_decode_bf_working_len() {
for (code, param) in CODES.iter().zip(PARAMS.iter()) {
assert_eq!(code.decode_bf_working_len(), param.decode_bf_working_len);
}
}
#[test]
fn test_output_len() {
for (code, param) in CODES.iter().zip(PARAMS.iter()) {
assert_eq!(code.output_len(), param.output_len);
}
}
#[test]
fn test_hard_to_llrs() {
let code = LDPCCode::TC128;
let hard = vec![255, 254, 253, 252, 251, 250, 249, 248,
203, 102, 103, 120, 107, 30, 157, 169];
let mut llrs = vec![0f32; code.n()];
let llr = -1.0;
code.hard_to_llrs(&hard, &mut llrs);
assert_eq!(llrs, vec![
llr, llr, llr, llr, llr, llr, llr, llr,
llr, llr, llr, llr, llr, llr, llr, -llr,
llr, llr, llr, llr, llr, llr, -llr, llr,
llr, llr, llr, llr, llr, llr, -llr, -llr,
llr, llr, llr, llr, llr, -llr, llr, llr,
llr, llr, llr, llr, llr, -llr, llr, -llr,
llr, llr, llr, llr, llr, -llr, -llr, llr,
llr, llr, llr, llr, llr, -llr, -llr, -llr,
llr, llr, -llr, -llr, llr, -llr, llr, llr,
-llr, llr, llr, -llr, -llr, llr, llr, -llr,
-llr, llr, llr, -llr, -llr, llr, llr, llr,
-llr, llr, llr, llr, llr, -llr, -llr, -llr,
-llr, llr, llr, -llr, llr, -llr, llr, llr,
-llr, -llr, -llr, llr, llr, llr, llr, -llr,
llr, -llr, -llr, llr, llr, llr, -llr, llr,
llr, -llr, llr, -llr, llr, -llr, -llr, llr]);
}
#[test]
fn test_llrs_to_hard() {
let code = LDPCCode::TC128;
let llr = -1.0;
let llrs = vec![
llr, llr, llr, llr, llr, llr, llr, llr,
llr, llr, llr, llr, llr, llr, llr, -llr,
llr, llr, llr, llr, llr, llr, -llr, llr,
llr, llr, llr, llr, llr, llr, -llr, -llr,
llr, llr, llr, llr, llr, -llr, llr, llr,
llr, llr, llr, llr, llr, -llr, llr, -llr,
llr, llr, llr, llr, llr, -llr, -llr, llr,
llr, llr, llr, llr, llr, -llr, -llr, -llr,
llr, llr, -llr, -llr, llr, -llr, llr, llr,
-llr, llr, llr, -llr, -llr, llr, llr, -llr,
-llr, llr, llr, -llr, -llr, llr, llr, llr,
-llr, llr, llr, llr, llr, -llr, -llr, -llr,
-llr, llr, llr, -llr, llr, -llr, llr, llr,
-llr, -llr, -llr, llr, llr, llr, llr, -llr,
llr, -llr, -llr, llr, llr, llr, -llr, llr,
llr, -llr, llr, -llr, llr, -llr, -llr, llr];
let mut hard = vec![0u8; code.n()/8];
code.llrs_to_hard(&llrs, &mut hard);
assert_eq!(hard, vec![255, 254, 253, 252, 251, 250, 249, 248,
203, 102, 103, 120, 107, 30, 157, 169]);
}
#[test]
fn test_decode_bf_tc() {
let code = LDPCCode::TC256;
// Make up some TX data
let txdata: Vec<u8> = (0..16).collect();
let mut txcode = vec![0u8; code.n()/8];
code.copy_encode(&txdata, &mut txcode);
// Copy to rx
let mut rxcode = txcode.clone();
// Corrupt some bits
rxcode[0] = 0xFF;
// Allocate working area and output area
let mut working = vec![0u8; code.decode_bf_working_len()];
let mut output = vec![0u8; code.output_len()];
// Run decoder
let (success, _) = code.decode_bf(&rxcode, &mut output, &mut working, 50);
assert!(success);
assert_eq!(&txcode[..], &output[..txcode.len()]);
}
#[test]
fn test_decode_ms() {
let code = LDPCCode::TM1280;
// Make up a TX codeword
let txdata: Vec<u8> = (0..code.k()/8).map(|i| !(i as u8)).collect();
let mut txcode = vec![0u8; code.n()/8];
code.copy_encode(&txdata, &mut txcode);
// Copy it and corrupt the first bit
let mut rxcode = txcode.clone();
rxcode[0] ^= 1<<7;
// Convert the hard data to LLRs
let mut llrs = vec![0i8; code.n()];
for (idx, byte) in rxcode.iter().enumerate() {
for i in 0..8 {
llrs[idx*8 + i] = if (byte >> (7-i)) & 1 == 1 { -1i8 } else { 1i8 };
}
}
//code.hard_to_llrs(&rxcode, &mut llrs);
// Allocate working area and output area
let mut working = vec![0i8; code.decode_ms_working_len()];
let mut working_u8 = vec![0u8; code.output_len() - code.k()/8];
let mut decoded = vec![0u8; code.output_len()];
// Run decoder
let (success, _) = code.decode_ms(&llrs, &mut decoded, &mut working, &mut working_u8, 50);
assert_eq!(&decoded[..8], &txcode[..8]);
assert!(success);
}
}
Add new version of erasure decoder
// Copyright 2017 Adam Greig
// Licensed under the MIT license, see LICENSE for details.
//! This module provides decoding functions for turning codewords into data.
//!
//! Please refer to the `decode_ms` and `decode_bf` methods on
//! [`LDPCCode`](../codes/enum.LDPCCode.html) for more details.
use core::i8;
use core::i16;
use core::i32;
use core::f32;
use core::f64;
use core::ops::{Add,AddAssign,Neg,Sub};
use ::codes::LDPCCode;
// Ugh gross yuck.
//
// No `f32::abs()` available with `no_std`, and it's not worth bringing in some
// dependency just to get it. This is however used right in the hottest decoder
// loop and it's so much faster than the obvious `if f < 0 { -f } else { f }`.
fn fabs(f: f64) -> f64 {
unsafe {
let x: u64 = *((&f as *const f64) as *const u64) & 0x7FFFFFFFFFFFFFFF;
*((&x as *const u64) as *const f64)
}
}
fn fabsf(f: f32) -> f32 {
unsafe {
let x: u32 = *((&f as *const f32) as *const u32) & 0x7FFFFFFF;
*((&x as *const u32) as *const f32)
}
}
/// Trait for types that the min-sum decoder can operate with.
///
/// Implemented for `i8`, `i16`, `i32`, `f32`, and `f64`.
pub trait DecodeFrom:
Sized + Clone + Copy + PartialEq + PartialOrd
+ Add + AddAssign + Neg<Output=Self> + Sub<Output=Self>
{
/// 1 in T
fn one() -> Self;
/// 0 in T
fn zero() -> Self;
/// Maximum value T can represent
fn maxval() -> Self;
/// Absolute value of self
fn abs(&self) -> Self;
}
impl DecodeFrom for i8 {
#[inline] fn one() -> i8 { 1 }
#[inline] fn zero() -> i8 { 0 }
#[inline] fn maxval() -> i8 { i8::MAX }
#[inline] fn abs(&self) -> i8 { i8::abs(*self) }
}
impl DecodeFrom for i16 {
#[inline] fn one() -> i16 { 1 }
#[inline] fn zero() -> i16 { 0 }
#[inline] fn maxval() -> i16 { i16::MAX }
#[inline] fn abs(&self) -> i16 { i16::abs(*self) }
}
impl DecodeFrom for i32 {
#[inline] fn one() -> i32 { 1 }
#[inline] fn zero() -> i32 { 0 }
#[inline] fn maxval() -> i32 { i32::MAX }
#[inline] fn abs(&self) -> i32 { i32::abs(*self) }
}
impl DecodeFrom for f32 {
#[inline] fn one() -> f32 { 1.0 }
#[inline] fn zero() -> f32 { 0.0 }
#[inline] fn maxval() -> f32 { f32::MAX }
#[inline] fn abs(&self) -> f32 { fabsf(*self) }
}
impl DecodeFrom for f64 {
#[inline] fn one() -> f64 { 1.0 }
#[inline] fn zero() -> f64 { 0.0 }
#[inline] fn maxval() -> f64 { f64::MAX }
#[inline] fn abs(&self) -> f64 { fabs(*self) }
}
impl LDPCCode {
/// Get the length of [u8] required for the working area of `decode_bf`.
///
/// Equal to n + punctured_bits.
pub fn decode_bf_working_len(&self) -> usize {
self.n() + self.punctured_bits()
}
/// Get the length of [T] required for the working area of `decode_ms`.
///
/// Equal to 2 * paritycheck_sum + 3n + 3p - 2k.
pub fn decode_ms_working_len(&self) -> usize {
(2 * self.paritycheck_sum() as usize + 3*self.n() + 3*self.punctured_bits() - 2*self.k())
}
/// Get the length of [u8] required for the working_u8 area of `decode_ms`.
///
/// Equal to (n+p-k)/8.
pub fn decode_ms_working_u8_len(&self) -> usize {
(self.n() + self.punctured_bits() - self.k()) / 8
}
/// Get the length of [u8] required for the output of any decoder.
///
/// Equal to (n+punctured_bits)/8.
pub fn output_len(&self) -> usize {
(self.n() + self.punctured_bits()) / 8
}
/// Hard erasure decoding algorithm.
///
/// Used to preprocess punctured codes before attempting bit-flipping decoding,
/// as the bit-flipping algorithm cannot handle erasures.
///
/// The algorithm is:
/// * We compute the parity of each check over all non-erased bits
/// * We count how many erased bits are connected to each check (0, 1, or "more than 1")
/// * Then each parity check with exactly one erased variable casts a vote for
/// that variable, +1 if check parity is 1, otherwise -1
/// * Each variable that receives a majority vote (i.e. not equal 0) is set to that
/// vote and marked decoded
/// * Iterate until all variables are decoded or we reach the iteration limit
///
/// This is based on the paper:
/// Novel multi-Gbps bit-flipping decoders for punctured LDPC codes,
/// by Archonta, Kanistras, and Paliouras, MOCAST 2016.
///
/// * `codeword` must be (n+p)/8 long (`self.output_len()`), with the first n/8 bytes already
/// set to the received hard information, and the punctured bits at the end will be updated.
/// * `working` must be (n+p) bytes long (`self.decode_bf_working_len()`).
///
/// Returns `(success, number of iterations run)`. Success only indicates that every punctured
/// bit got a majority vote; but they might still be wrong; likewise failure means not every
/// bit got a vote but many may still have been determined correctly.
fn decode_erasures(&self, codeword: &mut [u8], working: &mut [u8], maxiters: usize)
-> (bool, usize)
{
assert_eq!(codeword.len(), self.output_len());
assert_eq!(working.len(), self.decode_bf_working_len());
let n = self.n();
let p = self.punctured_bits();
// Working area:
// * The top bit 0x80 for byte 'i' is the parity bit for check 'i'.
// * The second and third top bits 0x60 for byte 'i' indicate the number of erased
// variables connected to check 'i':
// 00 for no erasures, 01 for a single erasure, 11 for more than one erasure
// * The fourth top bit 0x10 for byte 'a' indicates whether variable 'a' is erased
// * The lowest four bits 0x0F for byte 'a' indicate the votes received for variable 'a',
// starting at 8 for 0 votes and being incremented and decremented from there.
// Initialse working area: mark all punctured bits as erased
for w in &mut working[..n] { *w = 0x00 }
for w in &mut working[n..] { *w = 0x10 }
// Also write all the punctured bits in the codeword to zero
for c in &mut codeword[n/8..] { *c = 0x00 }
// Keep track of how many bits we've fixed
let mut bits_fixed = 0;
for iter in 0..maxiters {
// Initialise parity and erasure counts to zero, reset votes, preserve erasure bit
for w in &mut working[..] { *w = (*w & 0x10) | 0x08 }
// Compute check parity and erasure count
for (check, var) in self.iter_paritychecks() {
if working[var] & 0x10 == 0x10 {
// If var is erased, update check erasure count
match working[check] & 0x60 {
0x00 => working[check] |= 0x20,
0x20 => working[check] |= 0x40,
_ => (),
}
} else if codeword[var/8] >> (7-(var%8)) & 1 == 1 {
// If var is not erased and this codeword bit is set, update check parity
working[check] ^= 0x80;
}
}
// Now accumulate votes for each erased variable
for (check, var) in self.iter_paritychecks() {
// If this variable is erased and this check has only one vote
if working[var] & 0x10 == 0x10 && working[check] & 0x60 == 0x20 {
// Vote +1 if our parity is currently 1, -1 otherwise
if working[check] & 0x80 == 0x80 {
working[var] += 1;
} else {
working[var] -= 1;
}
}
}
// Finally set all bits that are erased and have a majority positive vote
for check in 0..(n+p) {
if working[check] & 0x10 == 0x10 && working[check] & 0x0F > 0x08 {
codeword[check/8] |= 1<<(7-(check%8));
working[check] &= !0x10;
bits_fixed += 1;
}
}
if bits_fixed == p {
// Hurray we're done
return (true, iter)
}
}
// If we finished the iteration loop then we did not succeed.
(false, maxiters)
}
/// Bit flipping decoder.
///
/// This algorithm is quick but only operates on hard information and consequently leaves a
/// lot of error-correcting capability behind. It is around 1-2dB worse than the min-sum
/// decoder. However, it requires much less memory and is a lot quicker.
///
/// Requires:
///
/// * `input` must be `n/8` long, where each bit is the received hard information
/// * `output` must be `(n+p)/8` (=`self.output_len()`) bytes long and is written with the
/// decoded codeword, so the user data is present in the first `k/8` bytes.
/// * `working` must be `n+p` (=`self.decode_bf_working_len()`) bytes long.
///
/// Runs for at most `maxiters` iterations, including attempting to fix punctured erasures on
/// applicable codes.
///
/// Returns `(decoding success, iters)`. For punctured codes, `iters` includes iterations
/// of the erasure decoding algorithm which is run first.
///
/// ## Panics
/// * `input.len()` must be exactly `self.n()/8`
/// * `output.len()` must be exactly `self.output_len()`.
/// * `working.len()` must be exactly `self.decode_bf_working_len()`.
pub fn decode_bf(&self, input: &[u8], output: &mut [u8],
working: &mut [u8], maxiters: usize)
-> (bool, usize)
{
assert_eq!(input.len(), self.n()/8, "input.len() != n/8");
assert_eq!(output.len(), self.output_len(), "output.len != (n+p)/8");
assert_eq!(working.len(), self.decode_bf_working_len(), "working.len() incorrect");
output[..self.n()/8].copy_from_slice(input);
// For punctured codes we must first try and fix all the punctured bits.
// We run them through an erasure decoding algorithm and record how many iterations
// it took (so we can return the total).
let erasure_iters = if self.punctured_bits() > 0 {
let (_, iters) = self.decode_erasures(output, working, maxiters);
iters
} else { 0 };
// Working area: we use the top bit of the first k bytes to store that parity check,
// and the remaining 7 bits of the first n+p bytes to store violation count for that var.
for iter in 0..maxiters {
// Zero out violation counts
for v in &mut working[..] { *v = 0 }
// Calculate the parity of each parity check
for (check, var) in self.iter_paritychecks() {
if output[var/8] >> (7-(var%8)) & 1 == 1 {
working[check] ^= 0x80;
}
}
// Count how many parity violations each variable is associated with
let mut max_violations = 0;
for (check, var) in self.iter_paritychecks() {
if working[check] & 0x80 == 0x80 {
// Unless we have more than 127 checks for a single variable, this
// can't overflow into the parity bit. And we don't have that.
working[var] += 1;
if working[var] & 0x7F > max_violations {
max_violations = working[var] & 0x7F;
}
}
}
if max_violations == 0 {
return (true, iter + erasure_iters);
} else {
// Flip all the bits that have the maximum number of violations
for (var, violations) in working.iter().enumerate() {
if *violations & 0x7F == max_violations {
output[var/8] ^= 1<<(7-(var%8));
}
}
}
}
(false, maxiters + erasure_iters)
}
/// Message passing based min-sum decoder.
///
/// This algorithm is slower and requires more memory than the bit-flipping decode, but
/// operates on soft information and provides very close to optimal decoding. If you don't have
/// soft information, you can use `decode_hard_to_llrs` to go from hard information (bytes from
/// a receiver) to soft information (LLRs).
///
/// Requires:
///
/// * `llrs` must be `n` long, with positive numbers more likely to be a 0 bit.
/// * `output` must be allocated to (n+p)/8 bytes, of which the first k/8 bytes will be set
/// to the decoded message (and the rest to the parity bits of the complete codeword)
/// * `working` is the main working area which must be provided and must have
/// `decode_ms_working_len` elements, equal to
/// 2*paritycheck_sum + 3n + 3*punctured_bits - 2k
/// * `working_u8` is te secondary working area which must be provided and must have
/// `decode_ms_working_u8_len` elements, equal to (n+p-k)/8.
///
/// Will run for at most `maxiters` iterations.
///
/// Returns decoding success and the number of iterations run for.
pub fn decode_ms<T: DecodeFrom>(&self, llrs: &[T], output: &mut [u8],
working: &mut [T], working_u8: &mut [u8],
maxiters: usize)
-> (bool, usize)
{
let n = self.n();
let k = self.k();
let p = self.punctured_bits();
assert_eq!(llrs.len(), n, "llrs.len() != n");
assert_eq!(output.len(), self.output_len(), "output.len() != (n+p)/8");
assert_eq!(working.len(), self.decode_ms_working_len(), "working.len() incorrect");
assert_eq!(working_u8.len(), self.decode_ms_working_u8_len(), "working_u8 != (n+p-k)/8");
// Rename output to parities as we'll use it to keep track of the parity bits until the end
let parities = output;
// Rename working_u8 to ui_sgns, we'll use it to accumulate signs for each check
let ui_sgns = working_u8;
// Zero the working area and split it up
for w in &mut working[..] { *w = T::zero() }
let (u, working) = working.split_at_mut(self.paritycheck_sum() as usize);
let (v, working) = working.split_at_mut(self.paritycheck_sum() as usize);
let (va, working) = working.split_at_mut(n + p);
let (ui_min1, ui_min2) = working.split_at_mut(n + p - k);
for iter in 0..maxiters {
// Initialise the marginals to the input LLRs (and to 0 for punctured bits)
va[..llrs.len()].copy_from_slice(llrs);
for x in &mut va[llrs.len()..] { *x = T::zero() }
for (idx, (check, var)) in self.iter_paritychecks().enumerate() {
// Work out messages to this variable
if v[idx].abs() == ui_min1[check] {
u[idx] = ui_min2[check];
} else {
u[idx] = ui_min1[check];
}
if ui_sgns[check/8] >> (check%8) & 1 == 1 {
u[idx] = -u[idx];
}
if v[idx] < T::zero() {
u[idx] = -u[idx];
}
// Accumulate incoming messages to each variable
va[var] += u[idx];
}
for x in &mut ui_min1[..] { *x = T::maxval() }
for x in &mut ui_min2[..] { *x = T::maxval() }
for x in &mut ui_sgns[..] { *x = 0 }
for x in &mut parities[..] { *x = 0 }
for (idx, (check, var)) in self.iter_paritychecks().enumerate() {
// Work out messages to this parity check
let new_v_ai = va[var] - u[idx];
if v[idx] != T::zero() && (new_v_ai >= T::zero()) != (v[idx] >= T::zero()) {
v[idx] = T::zero();
} else {
v[idx] = new_v_ai;
}
// Accumulate two minimums
if v[idx].abs() < ui_min1[check] {
ui_min2[check] = ui_min1[check];
ui_min1[check] = v[idx].abs();
} else if v[idx].abs() < ui_min2[check] {
ui_min2[check] = v[idx].abs();
}
// Accumulate signs
if v[idx] < T::zero() {
ui_sgns[check/8] ^= 1<<(check%8);
}
// Accumulate parity
if va[var] <= T::zero() {
parities[check/8] ^= 1<<(check%8);
}
}
// Check parities. If none are 1 then we have a valid codeword.
if *parities.iter().max().unwrap() == 0 {
// Hard decode marginals into the output
let output = parities;
for o in &mut output[..] { *o = 0 }
for var in 0..(n + p) {
if va[var] <= T::zero() {
output[var/8] |= 1 << (7 - (var%8));
}
}
return (true, iter);
}
}
// If we failed to find a codeword, at least hard decode the marginals into the output
let output = parities;
for o in &mut output[..] { *o = 0 }
for var in 0..(n + p) {
if va[var] <= T::zero() {
output[var/8] |= 1 << (7 - (var%8));
}
}
(false, maxiters)
}
/// Convert hard information into LLRs.
///
/// The min-sum decoding used in `decode_ms` is invariant to linear scaling
/// in LLR, so it doesn't matter which value is picked so long as the sign
/// is correct. This function just assigns -/+ 1 for 1/0 bits.
///
/// `input` must be n/8 long, `llrs` must be n long.
///
/// ## Panics
/// * `input.len()` must be exactly `self.n()/8`
/// * `llrs.len()` must be exactly `self.n()`
pub fn hard_to_llrs<T: DecodeFrom>(&self, input: &[u8], llrs: &mut [T]) {
assert_eq!(input.len(), self.n()/8, "input.len() != n/8");
assert_eq!(llrs.len(), self.n(), "llrs.len() != n");
let llr = -T::one();
for (idx, byte) in input.iter().enumerate() {
for i in 0..8 {
llrs[idx*8 + i] = if (byte >> (7-i)) & 1 == 1 { llr } else { -llr };
}
}
}
/// Convert LLRs into hard information.
///
/// `llrs` must be n long, `output` must be n/8 long.
///
/// ## Panics
/// * `input.len()` must be exactly `self.n()/8`
/// * `llrs.len()` must be exactly `self.n()`
pub fn llrs_to_hard<T: DecodeFrom>(&self, llrs: &[T], output: &mut [u8]) {
assert_eq!(llrs.len(), self.n(), "llrs.len() != n");
assert_eq!(output.len(), self.n()/8, "output.len() != n/8");
for o in &mut output[..] { *o = 0 }
for (i, llr) in llrs.iter().enumerate() {
if *llr < T::zero() {
output[i/8] |= 1 << (7 - (i%8));
}
}
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use ::codes::{LDPCCode, CodeParams,
TC128_PARAMS, TC256_PARAMS, TC512_PARAMS,
TM1280_PARAMS, TM1536_PARAMS, TM2048_PARAMS,
TM5120_PARAMS, TM6144_PARAMS, TM8192_PARAMS};
const CODES: [LDPCCode; 9] = [LDPCCode::TC128, LDPCCode::TC256, LDPCCode::TC512,
LDPCCode::TM1280, LDPCCode::TM1536, LDPCCode::TM2048,
LDPCCode::TM5120, LDPCCode::TM6144, LDPCCode::TM8192,
];
const PARAMS: [CodeParams; 9] = [TC128_PARAMS, TC256_PARAMS, TC512_PARAMS,
TM1280_PARAMS, TM1536_PARAMS, TM2048_PARAMS,
TM5120_PARAMS, TM6144_PARAMS, TM8192_PARAMS,
];
#[test]
fn test_decode_ms_working_len() {
// XXX
//for (code, param) in CODES.iter().zip(PARAMS.iter()) {
//assert_eq!(code.decode_ms_working_len(), param.decode_ms_working_len);
//assert_eq!(code.decode_ms_working_u8_len(), param.decode_ms_working_u8_len);
//}
}
#[test]
fn test_decode_bf_working_len() {
for (code, param) in CODES.iter().zip(PARAMS.iter()) {
assert_eq!(code.decode_bf_working_len(), param.decode_bf_working_len);
}
}
#[test]
fn test_output_len() {
for (code, param) in CODES.iter().zip(PARAMS.iter()) {
assert_eq!(code.output_len(), param.output_len);
}
}
#[test]
fn test_hard_to_llrs() {
let code = LDPCCode::TC128;
let hard = vec![255, 254, 253, 252, 251, 250, 249, 248,
203, 102, 103, 120, 107, 30, 157, 169];
let mut llrs = vec![0f32; code.n()];
let llr = -1.0;
code.hard_to_llrs(&hard, &mut llrs);
assert_eq!(llrs, vec![
llr, llr, llr, llr, llr, llr, llr, llr,
llr, llr, llr, llr, llr, llr, llr, -llr,
llr, llr, llr, llr, llr, llr, -llr, llr,
llr, llr, llr, llr, llr, llr, -llr, -llr,
llr, llr, llr, llr, llr, -llr, llr, llr,
llr, llr, llr, llr, llr, -llr, llr, -llr,
llr, llr, llr, llr, llr, -llr, -llr, llr,
llr, llr, llr, llr, llr, -llr, -llr, -llr,
llr, llr, -llr, -llr, llr, -llr, llr, llr,
-llr, llr, llr, -llr, -llr, llr, llr, -llr,
-llr, llr, llr, -llr, -llr, llr, llr, llr,
-llr, llr, llr, llr, llr, -llr, -llr, -llr,
-llr, llr, llr, -llr, llr, -llr, llr, llr,
-llr, -llr, -llr, llr, llr, llr, llr, -llr,
llr, -llr, -llr, llr, llr, llr, -llr, llr,
llr, -llr, llr, -llr, llr, -llr, -llr, llr]);
}
#[test]
fn test_llrs_to_hard() {
let code = LDPCCode::TC128;
let llr = -1.0;
let llrs = vec![
llr, llr, llr, llr, llr, llr, llr, llr,
llr, llr, llr, llr, llr, llr, llr, -llr,
llr, llr, llr, llr, llr, llr, -llr, llr,
llr, llr, llr, llr, llr, llr, -llr, -llr,
llr, llr, llr, llr, llr, -llr, llr, llr,
llr, llr, llr, llr, llr, -llr, llr, -llr,
llr, llr, llr, llr, llr, -llr, -llr, llr,
llr, llr, llr, llr, llr, -llr, -llr, -llr,
llr, llr, -llr, -llr, llr, -llr, llr, llr,
-llr, llr, llr, -llr, -llr, llr, llr, -llr,
-llr, llr, llr, -llr, -llr, llr, llr, llr,
-llr, llr, llr, llr, llr, -llr, -llr, -llr,
-llr, llr, llr, -llr, llr, -llr, llr, llr,
-llr, -llr, -llr, llr, llr, llr, llr, -llr,
llr, -llr, -llr, llr, llr, llr, -llr, llr,
llr, -llr, llr, -llr, llr, -llr, -llr, llr];
let mut hard = vec![0u8; code.n()/8];
code.llrs_to_hard(&llrs, &mut hard);
assert_eq!(hard, vec![255, 254, 253, 252, 251, 250, 249, 248,
203, 102, 103, 120, 107, 30, 157, 169]);
}
#[test]
fn test_decode_bf_tc() {
let code = LDPCCode::TC256;
// Make up some TX data
let txdata: Vec<u8> = (0..16).collect();
let mut txcode = vec![0u8; code.n()/8];
code.copy_encode(&txdata, &mut txcode);
// Copy to rx
let mut rxcode = txcode.clone();
// Corrupt some bits
rxcode[0] = 0xFF;
// Allocate working area and output area
let mut working = vec![0u8; code.decode_bf_working_len()];
let mut output = vec![0u8; code.output_len()];
// Run decoder
let (success, _) = code.decode_bf(&rxcode, &mut output, &mut working, 50);
assert!(success);
assert_eq!(&txcode[..], &output[..txcode.len()]);
}
#[test]
fn test_decode_ms() {
let code = LDPCCode::TM1280;
// Make up a TX codeword
let txdata: Vec<u8> = (0..code.k()/8).map(|i| !(i as u8)).collect();
let mut txcode = vec![0u8; code.n()/8];
code.copy_encode(&txdata, &mut txcode);
// Copy it and corrupt the first bit
let mut rxcode = txcode.clone();
rxcode[0] ^= 1<<7;
// Convert the hard data to LLRs
let mut llrs = vec![0i8; code.n()];
for (idx, byte) in rxcode.iter().enumerate() {
for i in 0..8 {
llrs[idx*8 + i] = if (byte >> (7-i)) & 1 == 1 { -1i8 } else { 1i8 };
}
}
//code.hard_to_llrs(&rxcode, &mut llrs);
// Allocate working area and output area
let mut working = vec![0i8; code.decode_ms_working_len()];
let mut working_u8 = vec![0u8; code.output_len() - code.k()/8];
let mut decoded = vec![0u8; code.output_len()];
// Run decoder
let (success, _) = code.decode_ms(&llrs, &mut decoded, &mut working, &mut working_u8, 50);
assert_eq!(&decoded[..8], &txcode[..8]);
assert!(success);
}
}
|
use byteorder::ReadBytesExt;
use error::{Error, Result, UnsupportedFeature};
use huffman::{fill_default_mjpeg_tables, HuffmanDecoder, HuffmanTable};
use marker::Marker;
use parser::{AdobeColorTransform, AppData, CodingProcess, Component, Dimensions, EntropyCoding, FrameInfo,
parse_app, parse_com, parse_dht, parse_dqt, parse_dri, parse_sof, parse_sos, ScanInfo};
use upsampler::Upsampler;
use std::cmp;
use std::io::Read;
use std::mem;
use std::ops::Range;
use std::sync::Arc;
use worker::{RowData, PlatformWorker, Worker};
pub const MAX_COMPONENTS: usize = 4;
static UNZIGZAG: [u8; 64] = [
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63,
];
/// An enumeration over combinations of color spaces and bit depths a pixel can have.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PixelFormat {
/// Luminance (grayscale), 8 bits
L8,
/// RGB, 8 bits per channel
RGB24,
/// CMYK, 8 bits per channel
CMYK32,
}
impl PixelFormat {
/// Determine the size in bytes of each pixel in this format
pub fn pixel_bytes(&self) -> usize {
match self {
PixelFormat::L8 => 1,
PixelFormat::RGB24 => 3,
PixelFormat::CMYK32 => 4,
}
}
}
/// Represents metadata of an image.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct ImageInfo {
/// The width of the image, in pixels.
pub width: u16,
/// The height of the image, in pixels.
pub height: u16,
/// The pixel format of the image.
pub pixel_format: PixelFormat,
}
/// JPEG decoder
pub struct Decoder<R> {
reader: R,
frame: Option<FrameInfo>,
dc_huffman_tables: Vec<Option<HuffmanTable>>,
ac_huffman_tables: Vec<Option<HuffmanTable>>,
quantization_tables: [Option<Arc<[u16; 64]>>; 4],
restart_interval: u16,
color_transform: Option<AdobeColorTransform>,
is_jfif: bool,
is_mjpeg: bool,
// Used for progressive JPEGs.
coefficients: Vec<Vec<i16>>,
// Bitmask of which coefficients has been completely decoded.
coefficients_finished: [u64; MAX_COMPONENTS],
}
impl<R: Read> Decoder<R> {
/// Creates a new `Decoder` using the reader `reader`.
pub fn new(reader: R) -> Decoder<R> {
Decoder {
reader: reader,
frame: None,
dc_huffman_tables: vec![None, None, None, None],
ac_huffman_tables: vec![None, None, None, None],
quantization_tables: [None, None, None, None],
restart_interval: 0,
color_transform: None,
is_jfif: false,
is_mjpeg: false,
coefficients: Vec::new(),
coefficients_finished: [0; MAX_COMPONENTS],
}
}
/// Returns metadata about the image.
///
/// The returned value will be `None` until a call to either `read_info` or `decode` has
/// returned `Ok`.
pub fn info(&self) -> Option<ImageInfo> {
match self.frame {
Some(ref frame) => {
let pixel_format = match frame.components.len() {
1 => PixelFormat::L8,
3 => PixelFormat::RGB24,
4 => PixelFormat::CMYK32,
_ => panic!(),
};
Some(ImageInfo {
width: frame.output_size.width,
height: frame.output_size.height,
pixel_format: pixel_format,
})
},
None => None,
}
}
/// Tries to read metadata from the image without decoding it.
///
/// If successful, the metadata can be obtained using the `info` method.
pub fn read_info(&mut self) -> Result<()> {
self.decode_internal(true).map(|_| ())
}
/// Configure the decoder to scale the image during decoding.
///
/// This efficiently scales the image by the smallest supported scale
/// factor that produces an image larger than or equal to the requested
/// size in at least one axis. The currently implemented scale factors
/// are 1/8, 1/4, 1/2 and 1.
///
/// To generate a thumbnail of an exact size, pass the desired size and
/// then scale to the final size using a traditional resampling algorithm.
pub fn scale(&mut self, requested_width: u16, requested_height: u16) -> Result<(u16, u16)> {
self.read_info()?;
let frame = self.frame.as_mut().unwrap();
let idct_size = crate::idct::choose_idct_size(frame.image_size, Dimensions{ width: requested_width, height: requested_height });
frame.update_idct_size(idct_size)?;
Ok((frame.output_size.width, frame.output_size.height))
}
/// Decodes the image and returns the decoded pixels if successful.
pub fn decode(&mut self) -> Result<Vec<u8>> {
self.decode_internal(false)
}
fn decode_internal(&mut self, stop_after_metadata: bool) -> Result<Vec<u8>> {
if stop_after_metadata && self.frame.is_some() {
// The metadata has already been read.
return Ok(Vec::new());
}
else if self.frame.is_none() && (self.reader.read_u8()? != 0xFF || Marker::from_u8(self.reader.read_u8()?) != Some(Marker::SOI)) {
return Err(Error::Format("first two bytes are not an SOI marker".to_owned()));
}
let mut previous_marker = Marker::SOI;
let mut pending_marker = None;
let mut worker = None;
let mut scans_processed = 0;
let mut planes = vec![Vec::new(); self.frame.as_ref().map_or(0, |frame| frame.components.len())];
loop {
let marker = match pending_marker.take() {
Some(m) => m,
None => self.read_marker()?,
};
match marker {
// Frame header
Marker::SOF(..) => {
// Section 4.10
// "An image contains only one frame in the cases of sequential and
// progressive coding processes; an image contains multiple frames for the
// hierarchical mode."
if self.frame.is_some() {
return Err(Error::Unsupported(UnsupportedFeature::Hierarchical));
}
let frame = parse_sof(&mut self.reader, marker)?;
let component_count = frame.components.len();
if frame.is_differential {
return Err(Error::Unsupported(UnsupportedFeature::Hierarchical));
}
if frame.coding_process == CodingProcess::Lossless {
return Err(Error::Unsupported(UnsupportedFeature::Lossless));
}
if frame.entropy_coding == EntropyCoding::Arithmetic {
return Err(Error::Unsupported(UnsupportedFeature::ArithmeticEntropyCoding));
}
if frame.precision != 8 {
return Err(Error::Unsupported(UnsupportedFeature::SamplePrecision(frame.precision)));
}
if component_count != 1 && component_count != 3 && component_count != 4 {
return Err(Error::Unsupported(UnsupportedFeature::ComponentCount(component_count as u8)));
}
// Make sure we support the subsampling ratios used.
let _ = Upsampler::new(&frame.components, frame.image_size.width, frame.image_size.height)?;
self.frame = Some(frame);
if stop_after_metadata {
return Ok(Vec::new());
}
planes = vec![Vec::new(); component_count];
},
// Scan header
Marker::SOS => {
if self.frame.is_none() {
return Err(Error::Format("scan encountered before frame".to_owned()));
}
if worker.is_none() {
worker = Some(PlatformWorker::new()?);
}
let frame = self.frame.clone().unwrap();
let scan = parse_sos(&mut self.reader, &frame)?;
if frame.coding_process == CodingProcess::DctProgressive && self.coefficients.is_empty() {
self.coefficients = frame.components.iter().map(|c| {
let block_count = c.block_size.width as usize * c.block_size.height as usize;
vec![0; block_count * 64]
}).collect();
}
// This was previously buggy, so let's explain the log here a bit. When a
// progressive frame is encoded then the coefficients (DC, AC) of each
// component (=color plane) can be split amongst scans. In particular it can
// happen or at least occurs in the wild that a scan contains coefficient 0 of
// all components. If now one but not all components had all other coefficients
// delivered in previous scans then such a scan contains all components but
// completes only some of them! (This is technically NOT permitted for all
// other coefficients as the standard dictates that scans with coefficients
// other than the 0th must only contain ONE component so we would either
// complete it or not. We may want to detect and error in case more component
// are part of a scan than allowed.) What a weird edge case.
//
// But this means we track precisely which components get completed here.
let mut finished = [false; MAX_COMPONENTS];
if scan.successive_approximation_low == 0 {
for (&i, component_finished) in scan.component_indices.iter().zip(&mut finished) {
if self.coefficients_finished[i] == !0 {
continue;
}
for j in scan.spectral_selection.clone() {
self.coefficients_finished[i] |= 1 << j;
}
if self.coefficients_finished[i] == !0 {
*component_finished = true;
}
}
}
let (marker, data) = self.decode_scan(&frame, &scan, worker.as_mut().unwrap(), &finished)?;
if let Some(data) = data {
for (i, plane) in data.into_iter().enumerate().filter(|&(_, ref plane)| !plane.is_empty()) {
if self.coefficients_finished[i] == !0 {
planes[i] = plane;
}
}
}
pending_marker = marker;
scans_processed += 1;
},
// Table-specification and miscellaneous markers
// Quantization table-specification
Marker::DQT => {
let tables = parse_dqt(&mut self.reader)?;
for (i, &table) in tables.iter().enumerate() {
if let Some(table) = table {
let mut unzigzagged_table = [0u16; 64];
for j in 0 .. 64 {
unzigzagged_table[UNZIGZAG[j] as usize] = table[j];
}
self.quantization_tables[i] = Some(Arc::new(unzigzagged_table));
}
}
},
// Huffman table-specification
Marker::DHT => {
let is_baseline = self.frame.as_ref().map(|frame| frame.is_baseline);
let (dc_tables, ac_tables) = parse_dht(&mut self.reader, is_baseline)?;
let current_dc_tables = mem::replace(&mut self.dc_huffman_tables, vec![]);
self.dc_huffman_tables = dc_tables.into_iter()
.zip(current_dc_tables.into_iter())
.map(|(a, b)| a.or(b))
.collect();
let current_ac_tables = mem::replace(&mut self.ac_huffman_tables, vec![]);
self.ac_huffman_tables = ac_tables.into_iter()
.zip(current_ac_tables.into_iter())
.map(|(a, b)| a.or(b))
.collect();
},
// Arithmetic conditioning table-specification
Marker::DAC => return Err(Error::Unsupported(UnsupportedFeature::ArithmeticEntropyCoding)),
// Restart interval definition
Marker::DRI => self.restart_interval = parse_dri(&mut self.reader)?,
// Comment
Marker::COM => {
let _comment = parse_com(&mut self.reader)?;
},
// Application data
Marker::APP(..) => {
if let Some(data) = parse_app(&mut self.reader, marker)? {
match data {
AppData::Adobe(color_transform) => self.color_transform = Some(color_transform),
AppData::Jfif => {
// From the JFIF spec:
// "The APP0 marker is used to identify a JPEG FIF file.
// The JPEG FIF APP0 marker is mandatory right after the SOI marker."
// Some JPEGs in the wild does not follow this though, so we allow
// JFIF headers anywhere APP0 markers are allowed.
/*
if previous_marker != Marker::SOI {
return Err(Error::Format("the JFIF APP0 marker must come right after the SOI marker".to_owned()));
}
*/
self.is_jfif = true;
},
AppData::Avi1 => self.is_mjpeg = true,
}
}
},
// Restart
Marker::RST(..) => {
// Some encoders emit a final RST marker after entropy-coded data, which
// decode_scan does not take care of. So if we encounter one, we ignore it.
if previous_marker != Marker::SOS {
return Err(Error::Format("RST found outside of entropy-coded data".to_owned()));
}
},
// Define number of lines
Marker::DNL => {
// Section B.2.1
// "If a DNL segment (see B.2.5) is present, it shall immediately follow the first scan."
if previous_marker != Marker::SOS || scans_processed != 1 {
return Err(Error::Format("DNL is only allowed immediately after the first scan".to_owned()));
}
return Err(Error::Unsupported(UnsupportedFeature::DNL));
},
// Hierarchical mode markers
Marker::DHP | Marker::EXP => return Err(Error::Unsupported(UnsupportedFeature::Hierarchical)),
// End of image
Marker::EOI => break,
_ => return Err(Error::Format(format!("{:?} marker found where not allowed", marker))),
}
previous_marker = marker;
}
if planes.is_empty() || planes.iter().any(|plane| plane.is_empty()) {
return Err(Error::Format("no data found".to_owned()));
}
let frame = self.frame.as_ref().unwrap();
compute_image(&frame.components, planes, frame.output_size, self.is_jfif, self.color_transform)
}
fn read_marker(&mut self) -> Result<Marker> {
loop {
// This should be an error as the JPEG spec doesn't allow extraneous data between marker segments.
// libjpeg allows this though and there are images in the wild utilising it, so we are
// forced to support this behavior.
// Sony Ericsson P990i is an example of a device which produce this sort of JPEGs.
while self.reader.read_u8()? != 0xFF {}
// Section B.1.1.2
// All markers are assigned two-byte codes: an X’FF’ byte followed by a
// byte which is not equal to 0 or X’FF’ (see Table B.1). Any marker may
// optionally be preceded by any number of fill bytes, which are bytes
// assigned code X’FF’.
let mut byte = self.reader.read_u8()?;
// Section B.1.1.2
// "Any marker may optionally be preceded by any number of fill bytes, which are bytes assigned code X’FF’."
while byte == 0xFF {
byte = self.reader.read_u8()?;
}
if byte != 0x00 && byte != 0xFF {
return Ok(Marker::from_u8(byte).unwrap());
}
}
}
fn decode_scan(&mut self,
frame: &FrameInfo,
scan: &ScanInfo,
worker: &mut PlatformWorker,
finished: &[bool; MAX_COMPONENTS])
-> Result<(Option<Marker>, Option<Vec<Vec<u8>>>)> {
assert!(scan.component_indices.len() <= MAX_COMPONENTS);
let components: Vec<Component> = scan.component_indices.iter()
.map(|&i| frame.components[i].clone())
.collect();
// Verify that all required quantization tables has been set.
if components.iter().any(|component| self.quantization_tables[component.quantization_table_index].is_none()) {
return Err(Error::Format("use of unset quantization table".to_owned()));
}
if self.is_mjpeg {
fill_default_mjpeg_tables(scan, &mut self.dc_huffman_tables, &mut self.ac_huffman_tables);
}
// Verify that all required huffman tables has been set.
if scan.spectral_selection.start == 0 &&
scan.dc_table_indices.iter().any(|&i| self.dc_huffman_tables[i].is_none()) {
return Err(Error::Format("scan makes use of unset dc huffman table".to_owned()));
}
if scan.spectral_selection.end > 1 &&
scan.ac_table_indices.iter().any(|&i| self.ac_huffman_tables[i].is_none()) {
return Err(Error::Format("scan makes use of unset ac huffman table".to_owned()));
}
// Prepare the worker thread for the work to come.
for (i, component) in components.iter().enumerate() {
if finished[i] {
let row_data = RowData {
index: i,
component: component.clone(),
quantization_table: self.quantization_tables[component.quantization_table_index].clone().unwrap(),
};
worker.start(row_data)?;
}
}
let blocks_per_mcu: Vec<u16> = components.iter()
.map(|c| c.horizontal_sampling_factor as u16 * c.vertical_sampling_factor as u16)
.collect();
let is_progressive = frame.coding_process == CodingProcess::DctProgressive;
let is_interleaved = components.len() > 1;
let mut dummy_block = [0i16; 64];
let mut huffman = HuffmanDecoder::new();
let mut dc_predictors = [0i16; MAX_COMPONENTS];
let mut mcus_left_until_restart = self.restart_interval;
let mut expected_rst_num = 0;
let mut eob_run = 0;
let mut mcu_row_coefficients = Vec::with_capacity(components.len());
if !is_progressive {
for (_, component) in components.iter().enumerate().filter(|&(i, _)| finished[i]) {
let coefficients_per_mcu_row = component.block_size.width as usize * component.vertical_sampling_factor as usize * 64;
mcu_row_coefficients.push(vec![0i16; coefficients_per_mcu_row]);
}
}
for mcu_y in 0 .. frame.mcu_size.height {
for mcu_x in 0 .. frame.mcu_size.width {
for (i, component) in components.iter().enumerate() {
for j in 0 .. blocks_per_mcu[i] {
let (block_x, block_y) = if is_interleaved {
// Section A.2.3
(mcu_x * component.horizontal_sampling_factor as u16 + j % component.horizontal_sampling_factor as u16,
mcu_y * component.vertical_sampling_factor as u16 + j / component.horizontal_sampling_factor as u16)
}
else {
// Section A.2.2
let blocks_per_row = component.block_size.width as usize;
let block_num = (mcu_y as usize * frame.mcu_size.width as usize +
mcu_x as usize) * blocks_per_mcu[i] as usize + j as usize;
let x = (block_num % blocks_per_row) as u16;
let y = (block_num / blocks_per_row) as u16;
if x * component.dct_scale as u16 >= component.size.width || y * component.dct_scale as u16 >= component.size.height {
continue;
}
(x, y)
};
let block_offset = (block_y as usize * component.block_size.width as usize + block_x as usize) * 64;
let mcu_row_offset = mcu_y as usize * component.block_size.width as usize * component.vertical_sampling_factor as usize * 64;
let coefficients = if is_progressive {
&mut self.coefficients[scan.component_indices[i]][block_offset .. block_offset + 64]
} else if finished[i] {
&mut mcu_row_coefficients[i][block_offset - mcu_row_offset .. block_offset - mcu_row_offset + 64]
} else {
&mut dummy_block[..]
};
if scan.successive_approximation_high == 0 {
decode_block(&mut self.reader,
coefficients,
&mut huffman,
self.dc_huffman_tables[scan.dc_table_indices[i]].as_ref(),
self.ac_huffman_tables[scan.ac_table_indices[i]].as_ref(),
scan.spectral_selection.clone(),
scan.successive_approximation_low,
&mut eob_run,
&mut dc_predictors[i])?;
}
else {
decode_block_successive_approximation(&mut self.reader,
coefficients,
&mut huffman,
self.ac_huffman_tables[scan.ac_table_indices[i]].as_ref(),
scan.spectral_selection.clone(),
scan.successive_approximation_low,
&mut eob_run)?;
}
}
}
if self.restart_interval > 0 {
let is_last_mcu = mcu_x == frame.mcu_size.width - 1 && mcu_y == frame.mcu_size.height - 1;
mcus_left_until_restart -= 1;
if mcus_left_until_restart == 0 && !is_last_mcu {
match huffman.take_marker(&mut self.reader)? {
Some(Marker::RST(n)) => {
if n != expected_rst_num {
return Err(Error::Format(format!("found RST{} where RST{} was expected", n, expected_rst_num)));
}
huffman.reset();
// Section F.2.1.3.1
dc_predictors = [0i16; MAX_COMPONENTS];
// Section G.1.2.2
eob_run = 0;
expected_rst_num = (expected_rst_num + 1) % 8;
mcus_left_until_restart = self.restart_interval;
},
Some(marker) => return Err(Error::Format(format!("found marker {:?} inside scan where RST{} was expected", marker, expected_rst_num))),
None => return Err(Error::Format(format!("no marker found where RST{} was expected", expected_rst_num))),
}
}
}
}
// Send the coefficients from this MCU row to the worker thread for dequantization and idct.
for (i, component) in components.iter().enumerate() {
if finished[i] {
let coefficients_per_mcu_row = component.block_size.width as usize * component.vertical_sampling_factor as usize * 64;
let row_coefficients = if is_progressive {
let offset = mcu_y as usize * coefficients_per_mcu_row;
self.coefficients[scan.component_indices[i]][offset .. offset + coefficients_per_mcu_row].to_vec()
} else {
mem::replace(&mut mcu_row_coefficients[i], vec![0i16; coefficients_per_mcu_row])
};
worker.append_row((i, row_coefficients))?;
}
}
}
let mut marker = huffman.take_marker(&mut self.reader)?;
while let Some(Marker::RST(_)) = marker {
marker = self.read_marker().ok();
}
if finished.iter().any(|&c| c) {
// Retrieve all the data from the worker thread.
let mut data = vec![Vec::new(); frame.components.len()];
for (i, &component_index) in scan.component_indices.iter().enumerate() {
if finished[i] {
data[component_index] = worker.get_result(i)?;
}
}
Ok((marker, Some(data)))
}
else {
Ok((marker, None))
}
}
}
fn decode_block<R: Read>(reader: &mut R,
coefficients: &mut [i16],
huffman: &mut HuffmanDecoder,
dc_table: Option<&HuffmanTable>,
ac_table: Option<&HuffmanTable>,
spectral_selection: Range<u8>,
successive_approximation_low: u8,
eob_run: &mut u16,
dc_predictor: &mut i16) -> Result<()> {
debug_assert_eq!(coefficients.len(), 64);
if spectral_selection.start == 0 {
// Section F.2.2.1
// Figure F.12
let value = huffman.decode(reader, dc_table.unwrap())?;
let diff = match value {
0 => 0,
1..=11 => huffman.receive_extend(reader, value)?,
_ => {
// Section F.1.2.1.1
// Table F.1
return Err(Error::Format("invalid DC difference magnitude category".to_owned()));
},
};
// Malicious JPEG files can cause this add to overflow, therefore we use wrapping_add.
// One example of such a file is tests/crashtest/images/dc-predictor-overflow.jpg
*dc_predictor = dc_predictor.wrapping_add(diff);
coefficients[0] = *dc_predictor << successive_approximation_low;
}
let mut index = cmp::max(spectral_selection.start, 1);
if index < spectral_selection.end && *eob_run > 0 {
*eob_run -= 1;
return Ok(());
}
// Section F.1.2.2.1
while index < spectral_selection.end {
if let Some((value, run)) = huffman.decode_fast_ac(reader, ac_table.unwrap())? {
index += run;
if index >= spectral_selection.end {
break;
}
coefficients[UNZIGZAG[index as usize] as usize] = value << successive_approximation_low;
index += 1;
}
else {
let byte = huffman.decode(reader, ac_table.unwrap())?;
let r = byte >> 4;
let s = byte & 0x0f;
if s == 0 {
match r {
15 => index += 16, // Run length of 16 zero coefficients.
_ => {
*eob_run = (1 << r) - 1;
if r > 0 {
*eob_run += huffman.get_bits(reader, r)?;
}
break;
},
}
}
else {
index += r;
if index >= spectral_selection.end {
break;
}
coefficients[UNZIGZAG[index as usize] as usize] = huffman.receive_extend(reader, s)? << successive_approximation_low;
index += 1;
}
}
}
Ok(())
}
fn decode_block_successive_approximation<R: Read>(reader: &mut R,
coefficients: &mut [i16],
huffman: &mut HuffmanDecoder,
ac_table: Option<&HuffmanTable>,
spectral_selection: Range<u8>,
successive_approximation_low: u8,
eob_run: &mut u16) -> Result<()> {
debug_assert_eq!(coefficients.len(), 64);
let bit = 1 << successive_approximation_low;
if spectral_selection.start == 0 {
// Section G.1.2.1
if huffman.get_bits(reader, 1)? == 1 {
coefficients[0] |= bit;
}
}
else {
// Section G.1.2.3
if *eob_run > 0 {
*eob_run -= 1;
refine_non_zeroes(reader, coefficients, huffman, spectral_selection, 64, bit)?;
return Ok(());
}
let mut index = spectral_selection.start;
while index < spectral_selection.end {
let byte = huffman.decode(reader, ac_table.unwrap())?;
let r = byte >> 4;
let s = byte & 0x0f;
let mut zero_run_length = r;
let mut value = 0;
match s {
0 => {
match r {
15 => {
// Run length of 16 zero coefficients.
// We don't need to do anything special here, zero_run_length is 15
// and then value (which is zero) gets written, resulting in 16
// zero coefficients.
},
_ => {
*eob_run = (1 << r) - 1;
if r > 0 {
*eob_run += huffman.get_bits(reader, r)?;
}
// Force end of block.
zero_run_length = 64;
},
}
},
1 => {
if huffman.get_bits(reader, 1)? == 1 {
value = bit;
}
else {
value = -bit;
}
},
_ => return Err(Error::Format("unexpected huffman code".to_owned())),
}
let range = Range {
start: index,
end: spectral_selection.end,
};
index = refine_non_zeroes(reader, coefficients, huffman, range, zero_run_length, bit)?;
if value != 0 {
coefficients[UNZIGZAG[index as usize] as usize] = value;
}
index += 1;
}
}
Ok(())
}
fn refine_non_zeroes<R: Read>(reader: &mut R,
coefficients: &mut [i16],
huffman: &mut HuffmanDecoder,
range: Range<u8>,
zrl: u8,
bit: i16) -> Result<u8> {
debug_assert_eq!(coefficients.len(), 64);
let last = range.end - 1;
let mut zero_run_length = zrl;
for i in range {
let index = UNZIGZAG[i as usize] as usize;
if coefficients[index] == 0 {
if zero_run_length == 0 {
return Ok(i);
}
zero_run_length -= 1;
}
else if huffman.get_bits(reader, 1)? == 1 && coefficients[index] & bit == 0 {
if coefficients[index] > 0 {
coefficients[index] += bit;
}
else {
coefficients[index] -= bit;
}
}
}
Ok(last)
}
fn compute_image(components: &[Component],
mut data: Vec<Vec<u8>>,
output_size: Dimensions,
is_jfif: bool,
color_transform: Option<AdobeColorTransform>) -> Result<Vec<u8>> {
if data.iter().any(Vec::is_empty) {
return Err(Error::Format("not all components have data".to_owned()));
}
if components.len() == 1 {
let component = &components[0];
let mut decoded: Vec<u8> = data.remove(0);
let width = component.size.width as usize;
let height = component.size.height as usize;
let size = width * height;
let line_stride = component.block_size.width as usize * component.dct_scale;
// if the image width is a multiple of the block size,
// then we don't have to move bytes in the decoded data
if usize::from(output_size.width) != line_stride {
let mut buffer = vec![0u8; width];
// The first line already starts at index 0, so we need to move only lines 1..height
for y in 1..height {
let destination_idx = y * width;
let source_idx = y * line_stride;
// We could use copy_within, but we need to support old rust versions
buffer.copy_from_slice(&decoded[source_idx..][..width]);
let destination = &mut decoded[destination_idx..][..width];
destination.copy_from_slice(&buffer);
}
}
decoded.resize(size, 0);
Ok(decoded)
}
else {
compute_image_parallel(components, data, output_size, is_jfif, color_transform)
}
}
#[cfg(feature="rayon")]
fn compute_image_parallel(components: &[Component],
data: Vec<Vec<u8>>,
output_size: Dimensions,
is_jfif: bool,
color_transform: Option<AdobeColorTransform>) -> Result<Vec<u8>> {
use rayon::prelude::*;
let color_convert_func = choose_color_convert_func(components.len(), is_jfif, color_transform)?;
let upsampler = Upsampler::new(components, output_size.width, output_size.height)?;
let line_size = output_size.width as usize * components.len();
let mut image = vec![0u8; line_size * output_size.height as usize];
image.par_chunks_mut(line_size)
.with_max_len(1)
.enumerate()
.for_each(|(row, line)| {
upsampler.upsample_and_interleave_row(&data, row, output_size.width as usize, line);
color_convert_func(line);
});
Ok(image)
}
#[cfg(not(feature="rayon"))]
fn compute_image_parallel(components: &[Component],
data: Vec<Vec<u8>>,
output_size: Dimensions,
is_jfif: bool,
color_transform: Option<AdobeColorTransform>) -> Result<Vec<u8>> {
let color_convert_func = choose_color_convert_func(components.len(), is_jfif, color_transform)?;
let upsampler = Upsampler::new(components, output_size.width, output_size.height)?;
let line_size = output_size.width as usize * components.len();
let mut image = vec![0u8; line_size * output_size.height as usize];
for (row, line) in image.chunks_mut(line_size)
.enumerate() {
upsampler.upsample_and_interleave_row(&data, row, output_size.width as usize, line);
color_convert_func(line);
}
Ok(image)
}
fn choose_color_convert_func(component_count: usize,
_is_jfif: bool,
color_transform: Option<AdobeColorTransform>)
-> Result<fn(&mut [u8])> {
match component_count {
3 => {
// http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe
// Unknown means the data is RGB, so we don't need to perform any color conversion on it.
if color_transform == Some(AdobeColorTransform::Unknown) {
Ok(color_convert_line_null)
}
else {
Ok(color_convert_line_ycbcr)
}
},
4 => {
// http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe
match color_transform {
Some(AdobeColorTransform::Unknown) => Ok(color_convert_line_cmyk),
Some(_) => Ok(color_convert_line_ycck),
None => Err(Error::Format("4 components without Adobe APP14 metadata to indicate color space".to_owned())),
}
},
_ => panic!(),
}
}
fn color_convert_line_null(_data: &mut [u8]) {
}
fn color_convert_line_ycbcr(data: &mut [u8]) {
for chunk in data.chunks_exact_mut(3) {
let (r, g, b) = ycbcr_to_rgb(chunk[0], chunk[1], chunk[2]);
chunk[0] = r;
chunk[1] = g;
chunk[2] = b;
}
}
fn color_convert_line_ycck(data: &mut [u8]) {
for chunk in data.chunks_exact_mut(4) {
let (r, g, b) = ycbcr_to_rgb(chunk[0], chunk[1], chunk[2]);
let k = chunk[3];
chunk[0] = r;
chunk[1] = g;
chunk[2] = b;
chunk[3] = 255 - k;
}
}
fn color_convert_line_cmyk(data: &mut [u8]) {
for chunk in data.chunks_exact_mut(4) {
chunk[0] = 255 - chunk[0];
chunk[1] = 255 - chunk[1];
chunk[2] = 255 - chunk[2];
chunk[3] = 255 - chunk[3];
}
}
// ITU-R BT.601
fn ycbcr_to_rgb(y: u8, cb: u8, cr: u8) -> (u8, u8, u8) {
let y = y as f32;
let cb = cb as f32 - 128.0;
let cr = cr as f32 - 128.0;
let r = y + 1.40200 * cr;
let g = y - 0.34414 * cb - 0.71414 * cr;
let b = y + 1.77200 * cb;
// TODO: Rust has defined float-to-int conversion as saturating,
// which is exactly what we need here. However, as of this writing
// it still hasn't reached the stable channel.
// This can be simplified to `(r + 0.5) as u8` without any clamping
// as soon as our MSRV reaches the version that has saturating casts.
// The version without explicit clamping is also noticeably faster.
(clamp_to_u8((r + 0.5) as i32) as u8,
clamp_to_u8((g + 0.5) as i32) as u8,
clamp_to_u8((b + 0.5) as i32) as u8)
}
fn clamp_to_u8(value: i32) -> i32 {
let value = std::cmp::max(value, 0);
std::cmp::min(value, 255)
}
Consolidate planes check in decoder.rs
Redundant checks existed in compute_image and decode_internal for
checking if any of the planes were empty.
use byteorder::ReadBytesExt;
use error::{Error, Result, UnsupportedFeature};
use huffman::{fill_default_mjpeg_tables, HuffmanDecoder, HuffmanTable};
use marker::Marker;
use parser::{AdobeColorTransform, AppData, CodingProcess, Component, Dimensions, EntropyCoding, FrameInfo,
parse_app, parse_com, parse_dht, parse_dqt, parse_dri, parse_sof, parse_sos, ScanInfo};
use upsampler::Upsampler;
use std::cmp;
use std::io::Read;
use std::mem;
use std::ops::Range;
use std::sync::Arc;
use worker::{RowData, PlatformWorker, Worker};
pub const MAX_COMPONENTS: usize = 4;
static UNZIGZAG: [u8; 64] = [
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63,
];
/// An enumeration over combinations of color spaces and bit depths a pixel can have.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PixelFormat {
/// Luminance (grayscale), 8 bits
L8,
/// RGB, 8 bits per channel
RGB24,
/// CMYK, 8 bits per channel
CMYK32,
}
impl PixelFormat {
/// Determine the size in bytes of each pixel in this format
pub fn pixel_bytes(&self) -> usize {
match self {
PixelFormat::L8 => 1,
PixelFormat::RGB24 => 3,
PixelFormat::CMYK32 => 4,
}
}
}
/// Represents metadata of an image.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct ImageInfo {
/// The width of the image, in pixels.
pub width: u16,
/// The height of the image, in pixels.
pub height: u16,
/// The pixel format of the image.
pub pixel_format: PixelFormat,
}
/// JPEG decoder
pub struct Decoder<R> {
reader: R,
frame: Option<FrameInfo>,
dc_huffman_tables: Vec<Option<HuffmanTable>>,
ac_huffman_tables: Vec<Option<HuffmanTable>>,
quantization_tables: [Option<Arc<[u16; 64]>>; 4],
restart_interval: u16,
color_transform: Option<AdobeColorTransform>,
is_jfif: bool,
is_mjpeg: bool,
// Used for progressive JPEGs.
coefficients: Vec<Vec<i16>>,
// Bitmask of which coefficients has been completely decoded.
coefficients_finished: [u64; MAX_COMPONENTS],
}
impl<R: Read> Decoder<R> {
/// Creates a new `Decoder` using the reader `reader`.
pub fn new(reader: R) -> Decoder<R> {
Decoder {
reader: reader,
frame: None,
dc_huffman_tables: vec![None, None, None, None],
ac_huffman_tables: vec![None, None, None, None],
quantization_tables: [None, None, None, None],
restart_interval: 0,
color_transform: None,
is_jfif: false,
is_mjpeg: false,
coefficients: Vec::new(),
coefficients_finished: [0; MAX_COMPONENTS],
}
}
/// Returns metadata about the image.
///
/// The returned value will be `None` until a call to either `read_info` or `decode` has
/// returned `Ok`.
pub fn info(&self) -> Option<ImageInfo> {
match self.frame {
Some(ref frame) => {
let pixel_format = match frame.components.len() {
1 => PixelFormat::L8,
3 => PixelFormat::RGB24,
4 => PixelFormat::CMYK32,
_ => panic!(),
};
Some(ImageInfo {
width: frame.output_size.width,
height: frame.output_size.height,
pixel_format: pixel_format,
})
},
None => None,
}
}
/// Tries to read metadata from the image without decoding it.
///
/// If successful, the metadata can be obtained using the `info` method.
pub fn read_info(&mut self) -> Result<()> {
self.decode_internal(true).map(|_| ())
}
/// Configure the decoder to scale the image during decoding.
///
/// This efficiently scales the image by the smallest supported scale
/// factor that produces an image larger than or equal to the requested
/// size in at least one axis. The currently implemented scale factors
/// are 1/8, 1/4, 1/2 and 1.
///
/// To generate a thumbnail of an exact size, pass the desired size and
/// then scale to the final size using a traditional resampling algorithm.
pub fn scale(&mut self, requested_width: u16, requested_height: u16) -> Result<(u16, u16)> {
self.read_info()?;
let frame = self.frame.as_mut().unwrap();
let idct_size = crate::idct::choose_idct_size(frame.image_size, Dimensions{ width: requested_width, height: requested_height });
frame.update_idct_size(idct_size)?;
Ok((frame.output_size.width, frame.output_size.height))
}
/// Decodes the image and returns the decoded pixels if successful.
pub fn decode(&mut self) -> Result<Vec<u8>> {
self.decode_internal(false)
}
fn decode_internal(&mut self, stop_after_metadata: bool) -> Result<Vec<u8>> {
if stop_after_metadata && self.frame.is_some() {
// The metadata has already been read.
return Ok(Vec::new());
}
else if self.frame.is_none() && (self.reader.read_u8()? != 0xFF || Marker::from_u8(self.reader.read_u8()?) != Some(Marker::SOI)) {
return Err(Error::Format("first two bytes are not an SOI marker".to_owned()));
}
let mut previous_marker = Marker::SOI;
let mut pending_marker = None;
let mut worker = None;
let mut scans_processed = 0;
let mut planes = vec![Vec::new(); self.frame.as_ref().map_or(0, |frame| frame.components.len())];
loop {
let marker = match pending_marker.take() {
Some(m) => m,
None => self.read_marker()?,
};
match marker {
// Frame header
Marker::SOF(..) => {
// Section 4.10
// "An image contains only one frame in the cases of sequential and
// progressive coding processes; an image contains multiple frames for the
// hierarchical mode."
if self.frame.is_some() {
return Err(Error::Unsupported(UnsupportedFeature::Hierarchical));
}
let frame = parse_sof(&mut self.reader, marker)?;
let component_count = frame.components.len();
if frame.is_differential {
return Err(Error::Unsupported(UnsupportedFeature::Hierarchical));
}
if frame.coding_process == CodingProcess::Lossless {
return Err(Error::Unsupported(UnsupportedFeature::Lossless));
}
if frame.entropy_coding == EntropyCoding::Arithmetic {
return Err(Error::Unsupported(UnsupportedFeature::ArithmeticEntropyCoding));
}
if frame.precision != 8 {
return Err(Error::Unsupported(UnsupportedFeature::SamplePrecision(frame.precision)));
}
if component_count != 1 && component_count != 3 && component_count != 4 {
return Err(Error::Unsupported(UnsupportedFeature::ComponentCount(component_count as u8)));
}
// Make sure we support the subsampling ratios used.
let _ = Upsampler::new(&frame.components, frame.image_size.width, frame.image_size.height)?;
self.frame = Some(frame);
if stop_after_metadata {
return Ok(Vec::new());
}
planes = vec![Vec::new(); component_count];
},
// Scan header
Marker::SOS => {
if self.frame.is_none() {
return Err(Error::Format("scan encountered before frame".to_owned()));
}
if worker.is_none() {
worker = Some(PlatformWorker::new()?);
}
let frame = self.frame.clone().unwrap();
let scan = parse_sos(&mut self.reader, &frame)?;
if frame.coding_process == CodingProcess::DctProgressive && self.coefficients.is_empty() {
self.coefficients = frame.components.iter().map(|c| {
let block_count = c.block_size.width as usize * c.block_size.height as usize;
vec![0; block_count * 64]
}).collect();
}
// This was previously buggy, so let's explain the log here a bit. When a
// progressive frame is encoded then the coefficients (DC, AC) of each
// component (=color plane) can be split amongst scans. In particular it can
// happen or at least occurs in the wild that a scan contains coefficient 0 of
// all components. If now one but not all components had all other coefficients
// delivered in previous scans then such a scan contains all components but
// completes only some of them! (This is technically NOT permitted for all
// other coefficients as the standard dictates that scans with coefficients
// other than the 0th must only contain ONE component so we would either
// complete it or not. We may want to detect and error in case more component
// are part of a scan than allowed.) What a weird edge case.
//
// But this means we track precisely which components get completed here.
let mut finished = [false; MAX_COMPONENTS];
if scan.successive_approximation_low == 0 {
for (&i, component_finished) in scan.component_indices.iter().zip(&mut finished) {
if self.coefficients_finished[i] == !0 {
continue;
}
for j in scan.spectral_selection.clone() {
self.coefficients_finished[i] |= 1 << j;
}
if self.coefficients_finished[i] == !0 {
*component_finished = true;
}
}
}
let (marker, data) = self.decode_scan(&frame, &scan, worker.as_mut().unwrap(), &finished)?;
if let Some(data) = data {
for (i, plane) in data.into_iter().enumerate().filter(|&(_, ref plane)| !plane.is_empty()) {
if self.coefficients_finished[i] == !0 {
planes[i] = plane;
}
}
}
pending_marker = marker;
scans_processed += 1;
},
// Table-specification and miscellaneous markers
// Quantization table-specification
Marker::DQT => {
let tables = parse_dqt(&mut self.reader)?;
for (i, &table) in tables.iter().enumerate() {
if let Some(table) = table {
let mut unzigzagged_table = [0u16; 64];
for j in 0 .. 64 {
unzigzagged_table[UNZIGZAG[j] as usize] = table[j];
}
self.quantization_tables[i] = Some(Arc::new(unzigzagged_table));
}
}
},
// Huffman table-specification
Marker::DHT => {
let is_baseline = self.frame.as_ref().map(|frame| frame.is_baseline);
let (dc_tables, ac_tables) = parse_dht(&mut self.reader, is_baseline)?;
let current_dc_tables = mem::replace(&mut self.dc_huffman_tables, vec![]);
self.dc_huffman_tables = dc_tables.into_iter()
.zip(current_dc_tables.into_iter())
.map(|(a, b)| a.or(b))
.collect();
let current_ac_tables = mem::replace(&mut self.ac_huffman_tables, vec![]);
self.ac_huffman_tables = ac_tables.into_iter()
.zip(current_ac_tables.into_iter())
.map(|(a, b)| a.or(b))
.collect();
},
// Arithmetic conditioning table-specification
Marker::DAC => return Err(Error::Unsupported(UnsupportedFeature::ArithmeticEntropyCoding)),
// Restart interval definition
Marker::DRI => self.restart_interval = parse_dri(&mut self.reader)?,
// Comment
Marker::COM => {
let _comment = parse_com(&mut self.reader)?;
},
// Application data
Marker::APP(..) => {
if let Some(data) = parse_app(&mut self.reader, marker)? {
match data {
AppData::Adobe(color_transform) => self.color_transform = Some(color_transform),
AppData::Jfif => {
// From the JFIF spec:
// "The APP0 marker is used to identify a JPEG FIF file.
// The JPEG FIF APP0 marker is mandatory right after the SOI marker."
// Some JPEGs in the wild does not follow this though, so we allow
// JFIF headers anywhere APP0 markers are allowed.
/*
if previous_marker != Marker::SOI {
return Err(Error::Format("the JFIF APP0 marker must come right after the SOI marker".to_owned()));
}
*/
self.is_jfif = true;
},
AppData::Avi1 => self.is_mjpeg = true,
}
}
},
// Restart
Marker::RST(..) => {
// Some encoders emit a final RST marker after entropy-coded data, which
// decode_scan does not take care of. So if we encounter one, we ignore it.
if previous_marker != Marker::SOS {
return Err(Error::Format("RST found outside of entropy-coded data".to_owned()));
}
},
// Define number of lines
Marker::DNL => {
// Section B.2.1
// "If a DNL segment (see B.2.5) is present, it shall immediately follow the first scan."
if previous_marker != Marker::SOS || scans_processed != 1 {
return Err(Error::Format("DNL is only allowed immediately after the first scan".to_owned()));
}
return Err(Error::Unsupported(UnsupportedFeature::DNL));
},
// Hierarchical mode markers
Marker::DHP | Marker::EXP => return Err(Error::Unsupported(UnsupportedFeature::Hierarchical)),
// End of image
Marker::EOI => break,
_ => return Err(Error::Format(format!("{:?} marker found where not allowed", marker))),
}
previous_marker = marker;
}
let frame = self.frame.as_ref().unwrap();
compute_image(&frame.components, planes, frame.output_size, self.is_jfif, self.color_transform)
}
fn read_marker(&mut self) -> Result<Marker> {
loop {
// This should be an error as the JPEG spec doesn't allow extraneous data between marker segments.
// libjpeg allows this though and there are images in the wild utilising it, so we are
// forced to support this behavior.
// Sony Ericsson P990i is an example of a device which produce this sort of JPEGs.
while self.reader.read_u8()? != 0xFF {}
// Section B.1.1.2
// All markers are assigned two-byte codes: an X’FF’ byte followed by a
// byte which is not equal to 0 or X’FF’ (see Table B.1). Any marker may
// optionally be preceded by any number of fill bytes, which are bytes
// assigned code X’FF’.
let mut byte = self.reader.read_u8()?;
// Section B.1.1.2
// "Any marker may optionally be preceded by any number of fill bytes, which are bytes assigned code X’FF’."
while byte == 0xFF {
byte = self.reader.read_u8()?;
}
if byte != 0x00 && byte != 0xFF {
return Ok(Marker::from_u8(byte).unwrap());
}
}
}
fn decode_scan(&mut self,
frame: &FrameInfo,
scan: &ScanInfo,
worker: &mut PlatformWorker,
finished: &[bool; MAX_COMPONENTS])
-> Result<(Option<Marker>, Option<Vec<Vec<u8>>>)> {
assert!(scan.component_indices.len() <= MAX_COMPONENTS);
let components: Vec<Component> = scan.component_indices.iter()
.map(|&i| frame.components[i].clone())
.collect();
// Verify that all required quantization tables has been set.
if components.iter().any(|component| self.quantization_tables[component.quantization_table_index].is_none()) {
return Err(Error::Format("use of unset quantization table".to_owned()));
}
if self.is_mjpeg {
fill_default_mjpeg_tables(scan, &mut self.dc_huffman_tables, &mut self.ac_huffman_tables);
}
// Verify that all required huffman tables has been set.
if scan.spectral_selection.start == 0 &&
scan.dc_table_indices.iter().any(|&i| self.dc_huffman_tables[i].is_none()) {
return Err(Error::Format("scan makes use of unset dc huffman table".to_owned()));
}
if scan.spectral_selection.end > 1 &&
scan.ac_table_indices.iter().any(|&i| self.ac_huffman_tables[i].is_none()) {
return Err(Error::Format("scan makes use of unset ac huffman table".to_owned()));
}
// Prepare the worker thread for the work to come.
for (i, component) in components.iter().enumerate() {
if finished[i] {
let row_data = RowData {
index: i,
component: component.clone(),
quantization_table: self.quantization_tables[component.quantization_table_index].clone().unwrap(),
};
worker.start(row_data)?;
}
}
let blocks_per_mcu: Vec<u16> = components.iter()
.map(|c| c.horizontal_sampling_factor as u16 * c.vertical_sampling_factor as u16)
.collect();
let is_progressive = frame.coding_process == CodingProcess::DctProgressive;
let is_interleaved = components.len() > 1;
let mut dummy_block = [0i16; 64];
let mut huffman = HuffmanDecoder::new();
let mut dc_predictors = [0i16; MAX_COMPONENTS];
let mut mcus_left_until_restart = self.restart_interval;
let mut expected_rst_num = 0;
let mut eob_run = 0;
let mut mcu_row_coefficients = Vec::with_capacity(components.len());
if !is_progressive {
for (_, component) in components.iter().enumerate().filter(|&(i, _)| finished[i]) {
let coefficients_per_mcu_row = component.block_size.width as usize * component.vertical_sampling_factor as usize * 64;
mcu_row_coefficients.push(vec![0i16; coefficients_per_mcu_row]);
}
}
for mcu_y in 0 .. frame.mcu_size.height {
for mcu_x in 0 .. frame.mcu_size.width {
for (i, component) in components.iter().enumerate() {
for j in 0 .. blocks_per_mcu[i] {
let (block_x, block_y) = if is_interleaved {
// Section A.2.3
(mcu_x * component.horizontal_sampling_factor as u16 + j % component.horizontal_sampling_factor as u16,
mcu_y * component.vertical_sampling_factor as u16 + j / component.horizontal_sampling_factor as u16)
}
else {
// Section A.2.2
let blocks_per_row = component.block_size.width as usize;
let block_num = (mcu_y as usize * frame.mcu_size.width as usize +
mcu_x as usize) * blocks_per_mcu[i] as usize + j as usize;
let x = (block_num % blocks_per_row) as u16;
let y = (block_num / blocks_per_row) as u16;
if x * component.dct_scale as u16 >= component.size.width || y * component.dct_scale as u16 >= component.size.height {
continue;
}
(x, y)
};
let block_offset = (block_y as usize * component.block_size.width as usize + block_x as usize) * 64;
let mcu_row_offset = mcu_y as usize * component.block_size.width as usize * component.vertical_sampling_factor as usize * 64;
let coefficients = if is_progressive {
&mut self.coefficients[scan.component_indices[i]][block_offset .. block_offset + 64]
} else if finished[i] {
&mut mcu_row_coefficients[i][block_offset - mcu_row_offset .. block_offset - mcu_row_offset + 64]
} else {
&mut dummy_block[..]
};
if scan.successive_approximation_high == 0 {
decode_block(&mut self.reader,
coefficients,
&mut huffman,
self.dc_huffman_tables[scan.dc_table_indices[i]].as_ref(),
self.ac_huffman_tables[scan.ac_table_indices[i]].as_ref(),
scan.spectral_selection.clone(),
scan.successive_approximation_low,
&mut eob_run,
&mut dc_predictors[i])?;
}
else {
decode_block_successive_approximation(&mut self.reader,
coefficients,
&mut huffman,
self.ac_huffman_tables[scan.ac_table_indices[i]].as_ref(),
scan.spectral_selection.clone(),
scan.successive_approximation_low,
&mut eob_run)?;
}
}
}
if self.restart_interval > 0 {
let is_last_mcu = mcu_x == frame.mcu_size.width - 1 && mcu_y == frame.mcu_size.height - 1;
mcus_left_until_restart -= 1;
if mcus_left_until_restart == 0 && !is_last_mcu {
match huffman.take_marker(&mut self.reader)? {
Some(Marker::RST(n)) => {
if n != expected_rst_num {
return Err(Error::Format(format!("found RST{} where RST{} was expected", n, expected_rst_num)));
}
huffman.reset();
// Section F.2.1.3.1
dc_predictors = [0i16; MAX_COMPONENTS];
// Section G.1.2.2
eob_run = 0;
expected_rst_num = (expected_rst_num + 1) % 8;
mcus_left_until_restart = self.restart_interval;
},
Some(marker) => return Err(Error::Format(format!("found marker {:?} inside scan where RST{} was expected", marker, expected_rst_num))),
None => return Err(Error::Format(format!("no marker found where RST{} was expected", expected_rst_num))),
}
}
}
}
// Send the coefficients from this MCU row to the worker thread for dequantization and idct.
for (i, component) in components.iter().enumerate() {
if finished[i] {
let coefficients_per_mcu_row = component.block_size.width as usize * component.vertical_sampling_factor as usize * 64;
let row_coefficients = if is_progressive {
let offset = mcu_y as usize * coefficients_per_mcu_row;
self.coefficients[scan.component_indices[i]][offset .. offset + coefficients_per_mcu_row].to_vec()
} else {
mem::replace(&mut mcu_row_coefficients[i], vec![0i16; coefficients_per_mcu_row])
};
worker.append_row((i, row_coefficients))?;
}
}
}
let mut marker = huffman.take_marker(&mut self.reader)?;
while let Some(Marker::RST(_)) = marker {
marker = self.read_marker().ok();
}
if finished.iter().any(|&c| c) {
// Retrieve all the data from the worker thread.
let mut data = vec![Vec::new(); frame.components.len()];
for (i, &component_index) in scan.component_indices.iter().enumerate() {
if finished[i] {
data[component_index] = worker.get_result(i)?;
}
}
Ok((marker, Some(data)))
}
else {
Ok((marker, None))
}
}
}
fn decode_block<R: Read>(reader: &mut R,
coefficients: &mut [i16],
huffman: &mut HuffmanDecoder,
dc_table: Option<&HuffmanTable>,
ac_table: Option<&HuffmanTable>,
spectral_selection: Range<u8>,
successive_approximation_low: u8,
eob_run: &mut u16,
dc_predictor: &mut i16) -> Result<()> {
debug_assert_eq!(coefficients.len(), 64);
if spectral_selection.start == 0 {
// Section F.2.2.1
// Figure F.12
let value = huffman.decode(reader, dc_table.unwrap())?;
let diff = match value {
0 => 0,
1..=11 => huffman.receive_extend(reader, value)?,
_ => {
// Section F.1.2.1.1
// Table F.1
return Err(Error::Format("invalid DC difference magnitude category".to_owned()));
},
};
// Malicious JPEG files can cause this add to overflow, therefore we use wrapping_add.
// One example of such a file is tests/crashtest/images/dc-predictor-overflow.jpg
*dc_predictor = dc_predictor.wrapping_add(diff);
coefficients[0] = *dc_predictor << successive_approximation_low;
}
let mut index = cmp::max(spectral_selection.start, 1);
if index < spectral_selection.end && *eob_run > 0 {
*eob_run -= 1;
return Ok(());
}
// Section F.1.2.2.1
while index < spectral_selection.end {
if let Some((value, run)) = huffman.decode_fast_ac(reader, ac_table.unwrap())? {
index += run;
if index >= spectral_selection.end {
break;
}
coefficients[UNZIGZAG[index as usize] as usize] = value << successive_approximation_low;
index += 1;
}
else {
let byte = huffman.decode(reader, ac_table.unwrap())?;
let r = byte >> 4;
let s = byte & 0x0f;
if s == 0 {
match r {
15 => index += 16, // Run length of 16 zero coefficients.
_ => {
*eob_run = (1 << r) - 1;
if r > 0 {
*eob_run += huffman.get_bits(reader, r)?;
}
break;
},
}
}
else {
index += r;
if index >= spectral_selection.end {
break;
}
coefficients[UNZIGZAG[index as usize] as usize] = huffman.receive_extend(reader, s)? << successive_approximation_low;
index += 1;
}
}
}
Ok(())
}
fn decode_block_successive_approximation<R: Read>(reader: &mut R,
coefficients: &mut [i16],
huffman: &mut HuffmanDecoder,
ac_table: Option<&HuffmanTable>,
spectral_selection: Range<u8>,
successive_approximation_low: u8,
eob_run: &mut u16) -> Result<()> {
debug_assert_eq!(coefficients.len(), 64);
let bit = 1 << successive_approximation_low;
if spectral_selection.start == 0 {
// Section G.1.2.1
if huffman.get_bits(reader, 1)? == 1 {
coefficients[0] |= bit;
}
}
else {
// Section G.1.2.3
if *eob_run > 0 {
*eob_run -= 1;
refine_non_zeroes(reader, coefficients, huffman, spectral_selection, 64, bit)?;
return Ok(());
}
let mut index = spectral_selection.start;
while index < spectral_selection.end {
let byte = huffman.decode(reader, ac_table.unwrap())?;
let r = byte >> 4;
let s = byte & 0x0f;
let mut zero_run_length = r;
let mut value = 0;
match s {
0 => {
match r {
15 => {
// Run length of 16 zero coefficients.
// We don't need to do anything special here, zero_run_length is 15
// and then value (which is zero) gets written, resulting in 16
// zero coefficients.
},
_ => {
*eob_run = (1 << r) - 1;
if r > 0 {
*eob_run += huffman.get_bits(reader, r)?;
}
// Force end of block.
zero_run_length = 64;
},
}
},
1 => {
if huffman.get_bits(reader, 1)? == 1 {
value = bit;
}
else {
value = -bit;
}
},
_ => return Err(Error::Format("unexpected huffman code".to_owned())),
}
let range = Range {
start: index,
end: spectral_selection.end,
};
index = refine_non_zeroes(reader, coefficients, huffman, range, zero_run_length, bit)?;
if value != 0 {
coefficients[UNZIGZAG[index as usize] as usize] = value;
}
index += 1;
}
}
Ok(())
}
fn refine_non_zeroes<R: Read>(reader: &mut R,
coefficients: &mut [i16],
huffman: &mut HuffmanDecoder,
range: Range<u8>,
zrl: u8,
bit: i16) -> Result<u8> {
debug_assert_eq!(coefficients.len(), 64);
let last = range.end - 1;
let mut zero_run_length = zrl;
for i in range {
let index = UNZIGZAG[i as usize] as usize;
if coefficients[index] == 0 {
if zero_run_length == 0 {
return Ok(i);
}
zero_run_length -= 1;
}
else if huffman.get_bits(reader, 1)? == 1 && coefficients[index] & bit == 0 {
if coefficients[index] > 0 {
coefficients[index] += bit;
}
else {
coefficients[index] -= bit;
}
}
}
Ok(last)
}
fn compute_image(components: &[Component],
mut data: Vec<Vec<u8>>,
output_size: Dimensions,
is_jfif: bool,
color_transform: Option<AdobeColorTransform>) -> Result<Vec<u8>> {
if data.is_empty() || data.iter().any(Vec::is_empty) {
return Err(Error::Format("not all components have data".to_owned()));
}
if components.len() == 1 {
let component = &components[0];
let mut decoded: Vec<u8> = data.remove(0);
let width = component.size.width as usize;
let height = component.size.height as usize;
let size = width * height;
let line_stride = component.block_size.width as usize * component.dct_scale;
// if the image width is a multiple of the block size,
// then we don't have to move bytes in the decoded data
if usize::from(output_size.width) != line_stride {
let mut buffer = vec![0u8; width];
// The first line already starts at index 0, so we need to move only lines 1..height
for y in 1..height {
let destination_idx = y * width;
let source_idx = y * line_stride;
// We could use copy_within, but we need to support old rust versions
buffer.copy_from_slice(&decoded[source_idx..][..width]);
let destination = &mut decoded[destination_idx..][..width];
destination.copy_from_slice(&buffer);
}
}
decoded.resize(size, 0);
Ok(decoded)
}
else {
compute_image_parallel(components, data, output_size, is_jfif, color_transform)
}
}
#[cfg(feature="rayon")]
fn compute_image_parallel(components: &[Component],
data: Vec<Vec<u8>>,
output_size: Dimensions,
is_jfif: bool,
color_transform: Option<AdobeColorTransform>) -> Result<Vec<u8>> {
use rayon::prelude::*;
let color_convert_func = choose_color_convert_func(components.len(), is_jfif, color_transform)?;
let upsampler = Upsampler::new(components, output_size.width, output_size.height)?;
let line_size = output_size.width as usize * components.len();
let mut image = vec![0u8; line_size * output_size.height as usize];
image.par_chunks_mut(line_size)
.with_max_len(1)
.enumerate()
.for_each(|(row, line)| {
upsampler.upsample_and_interleave_row(&data, row, output_size.width as usize, line);
color_convert_func(line);
});
Ok(image)
}
#[cfg(not(feature="rayon"))]
fn compute_image_parallel(components: &[Component],
data: Vec<Vec<u8>>,
output_size: Dimensions,
is_jfif: bool,
color_transform: Option<AdobeColorTransform>) -> Result<Vec<u8>> {
let color_convert_func = choose_color_convert_func(components.len(), is_jfif, color_transform)?;
let upsampler = Upsampler::new(components, output_size.width, output_size.height)?;
let line_size = output_size.width as usize * components.len();
let mut image = vec![0u8; line_size * output_size.height as usize];
for (row, line) in image.chunks_mut(line_size)
.enumerate() {
upsampler.upsample_and_interleave_row(&data, row, output_size.width as usize, line);
color_convert_func(line);
}
Ok(image)
}
fn choose_color_convert_func(component_count: usize,
_is_jfif: bool,
color_transform: Option<AdobeColorTransform>)
-> Result<fn(&mut [u8])> {
match component_count {
3 => {
// http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe
// Unknown means the data is RGB, so we don't need to perform any color conversion on it.
if color_transform == Some(AdobeColorTransform::Unknown) {
Ok(color_convert_line_null)
}
else {
Ok(color_convert_line_ycbcr)
}
},
4 => {
// http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/JPEG.html#Adobe
match color_transform {
Some(AdobeColorTransform::Unknown) => Ok(color_convert_line_cmyk),
Some(_) => Ok(color_convert_line_ycck),
None => Err(Error::Format("4 components without Adobe APP14 metadata to indicate color space".to_owned())),
}
},
_ => panic!(),
}
}
fn color_convert_line_null(_data: &mut [u8]) {
}
fn color_convert_line_ycbcr(data: &mut [u8]) {
for chunk in data.chunks_exact_mut(3) {
let (r, g, b) = ycbcr_to_rgb(chunk[0], chunk[1], chunk[2]);
chunk[0] = r;
chunk[1] = g;
chunk[2] = b;
}
}
fn color_convert_line_ycck(data: &mut [u8]) {
for chunk in data.chunks_exact_mut(4) {
let (r, g, b) = ycbcr_to_rgb(chunk[0], chunk[1], chunk[2]);
let k = chunk[3];
chunk[0] = r;
chunk[1] = g;
chunk[2] = b;
chunk[3] = 255 - k;
}
}
fn color_convert_line_cmyk(data: &mut [u8]) {
for chunk in data.chunks_exact_mut(4) {
chunk[0] = 255 - chunk[0];
chunk[1] = 255 - chunk[1];
chunk[2] = 255 - chunk[2];
chunk[3] = 255 - chunk[3];
}
}
// ITU-R BT.601
fn ycbcr_to_rgb(y: u8, cb: u8, cr: u8) -> (u8, u8, u8) {
let y = y as f32;
let cb = cb as f32 - 128.0;
let cr = cr as f32 - 128.0;
let r = y + 1.40200 * cr;
let g = y - 0.34414 * cb - 0.71414 * cr;
let b = y + 1.77200 * cb;
// TODO: Rust has defined float-to-int conversion as saturating,
// which is exactly what we need here. However, as of this writing
// it still hasn't reached the stable channel.
// This can be simplified to `(r + 0.5) as u8` without any clamping
// as soon as our MSRV reaches the version that has saturating casts.
// The version without explicit clamping is also noticeably faster.
(clamp_to_u8((r + 0.5) as i32) as u8,
clamp_to_u8((g + 0.5) as i32) as u8,
clamp_to_u8((b + 0.5) as i32) as u8)
}
fn clamp_to_u8(value: i32) -> i32 {
let value = std::cmp::max(value, 0);
std::cmp::min(value, 255)
}
|
use av_format::error::*;
use std::io::SeekFrom;
use av_data::packet::Packet;
use av_data::timeinfo::TimeInfo;
use av_data::params::*;
use av_format::stream::Stream;
use av_format::buffer::Buffered;
use av_format::demuxer::{Demuxer, Event};
use av_format::demuxer::{Descr, Descriptor};
use av_format::common::GlobalInfo;
use std::collections::VecDeque;
use rational::Rational64;
use ebml::{ebml_header, EBMLHeader};
use elements::{segment, segment_element, Cluster, SeekHead, Info, Tracks, TrackEntry,
SegmentElement, simple_block};
use nom::{self, Err, IResult, Offset};
#[derive(Debug, Clone, PartialEq)]
pub struct MkvDemuxer {
pub header: Option<EBMLHeader>,
pub seek_head: Option<SeekHead>,
pub info: Option<Info>,
pub tracks: Option<Tracks>,
pub queue: VecDeque<Event>,
pub blockstream: Vec<u8>,
}
impl MkvDemuxer {
pub fn new() -> MkvDemuxer {
MkvDemuxer {
header: None,
seek_head: None,
info: None,
tracks: None,
queue: VecDeque::new(),
blockstream: Vec::new(),
}
}
pub fn parse_until_tracks<'a>(&mut self, original_input: &'a [u8]) -> IResult<&'a [u8], ()> {
let (i1, header) = try_parse!(original_input, ebml_header);
self.header = Some(header);
let (mut input, _) = try_parse!(i1, segment);
self.seek_head = None;
self.info = None;
self.tracks = None;
loop {
if self.seek_head.is_some() && self.info.is_some() && self.tracks.is_some() {
return Ok((input, ()));
}
// println!("offset: {}", original_input.offset(input));
let (i3, element) = try_parse!(input, segment_element);
match element {
SegmentElement::SeekHead(s) => {
// println!("got seek head: {:#?}", s);
if self.seek_head.is_some() {
return Err(Err::Error(error_position!(input, nom::ErrorKind::Custom(1))));
} else {
self.seek_head = Some(s);
}
}
SegmentElement::Info(i) => {
// println!("got info: {:#?}", i);
if self.info.is_some() {
return Err(Err::Error(error_position!(input, nom::ErrorKind::Custom(1))));
} else {
self.info = Some(i);
}
}
SegmentElement::Tracks(t) => {
// println!("got tracks: {:#?}", t);
if self.tracks.is_some() {
return Err(Err::Error(error_position!(input, nom::ErrorKind::Custom(1))));
} else {
self.tracks = Some(t);
}
}
el => {
println!("got element: {:#?}", el);
}
}
input = i3;
}
}
}
use nom::Needed;
impl Demuxer for MkvDemuxer {
fn read_headers(&mut self, buf: &Box<Buffered>, info: &mut GlobalInfo) -> Result<SeekFrom> {
match self.parse_until_tracks(buf.data()) {
Ok((i, _)) => {
info.duration = self.info.as_ref().and_then(|info| info.duration).map(|d| d as u64);
if let Some(ref t) = self.tracks {
info.streams = t.tracks.iter().map(|tr| {
track_to_stream(self.info.as_ref().unwrap(), tr)
}).collect();
}
Ok(SeekFrom::Current(buf.data().offset(i) as i64))
},
Err(Err::Incomplete(needed)) => {
let sz = match needed {
Needed::Size(size) => buf.data().len() + size,
_ => 1024,
};
Err(Error::MoreDataNeeded(sz))
},
e => {
println!("error reading headers: {:?}", e);
Err(Error::InvalidData)
}
}
}
fn read_event(&mut self, buf: &Box<Buffered>) -> Result<(SeekFrom, Event)> {
if let Some(event) = self.queue.pop_front() {
Ok((SeekFrom::Current(0), event))
} else {
// println!("no more stored packet, parsing a new one");
match segment_element(buf.data()) {
Ok((i, element)) => {
let seek = SeekFrom::Current(buf.data().offset(i) as i64);
match element {
SegmentElement::Cluster(c) => {
//self.clusters.push(c);
// println!("got cluster element at timecode: {}", c.timecode);
let mut packets = c.generate_packets();
self.queue.extend(packets.drain(..));
if let Some(event) = self.queue.pop_front() {
return Ok((seek, event));
}
}
_el => {
// println!("got element: {:#?}", el);
}
}
Ok((seek, Event::MoreDataNeeded(0)))
},
Err(Err::Incomplete(Needed::Size(size))) => {
Err(Error::MoreDataNeeded(size))
},
e => {
println!("parsing issue: {:?}", e);
Err(Error::InvalidData)
}
}
}
}
}
fn track_entry_codec_id(t: &TrackEntry) -> Option<String> {
// TODO: Support V_QUICKTIME and V_MS/VFW/FOURCC
match t.codec_id.as_ref() {
"A_OPUS" => Some("opus".to_owned()),
"A_VORBIS" => Some("vorbis".to_owned()),
"V_AV1" => Some("av1".to_owned()),
"V_VP8" => Some("vp8".to_owned()),
"V_VP9" => Some("vp9".to_owned()),
_ => None,
}
}
fn track_entry_video_kind(t: &TrackEntry) -> Option<MediaKind> {
// TODO: Validate that a track::video exists for track::type video before.
if let Some(ref video) = t.video {
let v = VideoInfo {
width: video.pixel_width as usize,
height: video.pixel_height as usize,
// TODO parse Colour and/or CodecPrivate to extract the format
format: None,
};
Some(MediaKind::Video(v))
} else {
None
}
}
fn track_entry_audio_kind(t: &TrackEntry) -> Option<MediaKind> {
use av_data::audiosample::*;
// TODO: Validate that a track::video exists for track::type video before.
if let Some(ref audio) = t.audio {
let rate = if let Some(r) = audio.output_sampling_frequency {
r
} else {
audio.sampling_frequency
};
// TODO: complete it
let map = if audio.channel_positions.is_none() {
Some(ChannelMap::default_map(audio.channels as usize))
} else {
unimplemented!("Convert matroska map to rust-av map")
};
let a = AudioInfo {
rate: rate as usize,
map: map,
format: None,
};
Some(MediaKind::Audio(a))
} else {
None
}
}
fn track_entry_media_kind(t: &TrackEntry) -> Option<MediaKind> {
// TODO: Use an enum for the track type
match t.track_type {
0x1 => track_entry_video_kind(t),
0x2 => track_entry_audio_kind(t),
_ => None,
}
}
// TODO: make sure the timecode_scale isn't 0
pub fn track_to_stream(info: &Info, t: &TrackEntry) -> Stream {
let num = if let Some(ts) = t.track_timecode_scale {
(ts * info.timecode_scale as f64) as i64
} else {
info.timecode_scale as i64
};
Stream {
id: t.track_uid as isize,
index: t.track_number as usize,
start: None,
duration: t.default_duration,
timebase: Rational64::new(num, 1000 * 1000 * 1000),
// TODO: Extend CodecParams and fill it with the remaining information
params: CodecParams {
extradata: t.codec_private.clone(),
bit_rate: 0,
delay: t.codec_delay.unwrap_or(0) as usize,
convergence_window: t.seek_pre_roll.unwrap_or(0) as usize,
codec_id: track_entry_codec_id(t),
kind: track_entry_media_kind(t),
},
}
}
impl<'a> Cluster<'a> {
pub fn generate_packets(&self) -> Vec<Event> {
let mut v = Vec::new();
for block_data in self.simple_block.iter() {
if let Ok((i, block)) = simple_block(block_data) {
//println!("parsing simple block: {:?}", block);
let packet = Packet {
data: i.into(),
t: TimeInfo {
pts: Some(block.timecode as i64),
dts: None,
duration: None,
timebase: None,
},
pos: None,
stream_index: block.track_number as isize,
is_key: block.keyframe,
is_corrupted: false,
};
v.push(Event::NewPacket(packet));
} else {
println!("error parsing simple block");
}
}
v
}
}
struct Des {
d: Descr,
}
impl Descriptor for Des {
fn create(&self) -> Box<Demuxer> {
Box::new(MkvDemuxer::new())
}
fn describe<'a>(&'a self) -> &'a Descr {
&self.d
}
fn probe(&self, data: &[u8]) -> u8 {
match ebml_header(&data[..100]) {
Ok(_) => 100,
_ => 0,
}
}
}
pub const MKV_DESC: &Descriptor = &Des {
d: Descr {
name: "matroska",
demuxer: "mkv",
description: "Nom-based Matroska demuxer",
extensions: &["mkv", "webm", "mka"],
mime: &["video/x-matroska", "audio/x-matroska"],
}
};
#[cfg(test)]
#[allow(non_upper_case_globals)]
mod tests {
use super::*;
use std::io::Cursor;
use nom::Offset;
use av_format::demuxer::Context;
use av_format::buffer::*;
const webm: &'static [u8] = include_bytes!("../assets/bbb-vp9-opus.webm");
#[test]
fn parse_headers() {
let mut demuxer = MkvDemuxer::new();
let res = demuxer.parse_until_tracks(webm);
println!("got parsing res: {:?}", res);
match res {
Ok((i, _)) => {
println!("offset: {} bytes", webm.offset(i));
}
e => {
println!("could not parse: {:?}", e);
}
}
println!("demuxer: {:#?}", demuxer);
}
#[test]
fn parse_headers_incremental_buffer() {
let mut demuxer = MkvDemuxer::new();
for n in 100..2000 {
let res = demuxer.parse_until_tracks(&webm[0..n]);
match res {
Ok(_) => println!("Size {} ok", n),
Err(Err::Incomplete(needed)) => println!("Incomplete {} needs {:?}", n, needed),
Err(e) => {
panic!("Error at size {}: {:?}", n, e);
}
}
}
}
#[test]
fn context() {
let mut context = Context::new(Box::new(MkvDemuxer::new()),
Box::new(AccReader::new(Cursor::new(webm))));
println!("DEMUXER CONTEXT read headers: {:?}", context.read_headers().unwrap());
println!("DEMUXER CONTEXT streams: {:?}", context.info.streams);
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
}
}
Update to the current API
use av_format::error::*;
use std::io::SeekFrom;
use av_data::packet::Packet;
use av_data::timeinfo::TimeInfo;
use av_data::params::*;
use av_format::stream::Stream;
use av_format::buffer::Buffered;
use av_format::demuxer::{Demuxer, Event};
use av_format::demuxer::{Descr, Descriptor};
use av_format::common::GlobalInfo;
use std::collections::VecDeque;
use rational::Rational64;
use ebml::{ebml_header, EBMLHeader};
use elements::{segment, segment_element, Cluster, SeekHead, Info, Tracks, TrackEntry,
SegmentElement, simple_block};
use nom::{self, Err, IResult, Offset};
#[derive(Debug, Clone)]
pub struct MkvDemuxer {
pub header: Option<EBMLHeader>,
pub seek_head: Option<SeekHead>,
pub info: Option<Info>,
pub tracks: Option<Tracks>,
pub queue: VecDeque<Event>,
pub blockstream: Vec<u8>,
}
impl MkvDemuxer {
pub fn new() -> MkvDemuxer {
MkvDemuxer {
header: None,
seek_head: None,
info: None,
tracks: None,
queue: VecDeque::new(),
blockstream: Vec::new(),
}
}
pub fn parse_until_tracks<'a>(&mut self, original_input: &'a [u8]) -> IResult<&'a [u8], ()> {
let (i1, header) = try_parse!(original_input, ebml_header);
self.header = Some(header);
let (mut input, _) = try_parse!(i1, segment);
self.seek_head = None;
self.info = None;
self.tracks = None;
loop {
if self.seek_head.is_some() && self.info.is_some() && self.tracks.is_some() {
return Ok((input, ()));
}
// println!("offset: {}", original_input.offset(input));
let (i3, element) = try_parse!(input, segment_element);
match element {
SegmentElement::SeekHead(s) => {
// println!("got seek head: {:#?}", s);
if self.seek_head.is_some() {
return Err(Err::Error(error_position!(input, nom::ErrorKind::Custom(1))));
} else {
self.seek_head = Some(s);
}
}
SegmentElement::Info(i) => {
// println!("got info: {:#?}", i);
if self.info.is_some() {
return Err(Err::Error(error_position!(input, nom::ErrorKind::Custom(1))));
} else {
self.info = Some(i);
}
}
SegmentElement::Tracks(t) => {
// println!("got tracks: {:#?}", t);
if self.tracks.is_some() {
return Err(Err::Error(error_position!(input, nom::ErrorKind::Custom(1))));
} else {
self.tracks = Some(t);
}
}
el => {
println!("got element: {:#?}", el);
}
}
input = i3;
}
}
}
use nom::Needed;
impl Demuxer for MkvDemuxer {
fn read_headers(&mut self, buf: &Box<Buffered>, info: &mut GlobalInfo) -> Result<SeekFrom> {
match self.parse_until_tracks(buf.data()) {
Ok((i, _)) => {
info.duration = self.info.as_ref().and_then(|info| info.duration).map(|d| d as u64);
if let Some(ref t) = self.tracks {
info.streams = t.tracks.iter().map(|tr| {
track_to_stream(self.info.as_ref().unwrap(), tr)
}).collect();
}
Ok(SeekFrom::Current(buf.data().offset(i) as i64))
},
Err(Err::Incomplete(needed)) => {
let sz = match needed {
Needed::Size(size) => buf.data().len() + size,
_ => 1024,
};
Err(Error::MoreDataNeeded(sz))
},
e => {
println!("error reading headers: {:?}", e);
Err(Error::InvalidData)
}
}
}
fn read_event(&mut self, buf: &Box<Buffered>) -> Result<(SeekFrom, Event)> {
if let Some(event) = self.queue.pop_front() {
Ok((SeekFrom::Current(0), event))
} else {
// println!("no more stored packet, parsing a new one");
match segment_element(buf.data()) {
Ok((i, element)) => {
let seek = SeekFrom::Current(buf.data().offset(i) as i64);
match element {
SegmentElement::Cluster(c) => {
//self.clusters.push(c);
// println!("got cluster element at timecode: {}", c.timecode);
let mut packets = c.generate_packets();
self.queue.extend(packets.drain(..));
if let Some(event) = self.queue.pop_front() {
return Ok((seek, event));
}
}
_el => {
// println!("got element: {:#?}", el);
}
}
Ok((seek, Event::MoreDataNeeded(0)))
},
Err(Err::Incomplete(Needed::Size(size))) => {
Err(Error::MoreDataNeeded(size))
},
e => {
println!("parsing issue: {:?}", e);
Err(Error::InvalidData)
}
}
}
}
}
fn track_entry_codec_id(t: &TrackEntry) -> Option<String> {
// TODO: Support V_QUICKTIME and V_MS/VFW/FOURCC
match t.codec_id.as_ref() {
"A_OPUS" => Some("opus".to_owned()),
"A_VORBIS" => Some("vorbis".to_owned()),
"V_AV1" => Some("av1".to_owned()),
"V_VP8" => Some("vp8".to_owned()),
"V_VP9" => Some("vp9".to_owned()),
_ => None,
}
}
fn track_entry_video_kind(t: &TrackEntry) -> Option<MediaKind> {
// TODO: Validate that a track::video exists for track::type video before.
if let Some(ref video) = t.video {
let v = VideoInfo {
width: video.pixel_width as usize,
height: video.pixel_height as usize,
// TODO parse Colour and/or CodecPrivate to extract the format
format: None,
};
Some(MediaKind::Video(v))
} else {
None
}
}
fn track_entry_audio_kind(t: &TrackEntry) -> Option<MediaKind> {
use av_data::audiosample::*;
// TODO: Validate that a track::video exists for track::type video before.
if let Some(ref audio) = t.audio {
let rate = if let Some(r) = audio.output_sampling_frequency {
r
} else {
audio.sampling_frequency
};
// TODO: complete it
let map = if audio.channel_positions.is_none() {
Some(ChannelMap::default_map(audio.channels as usize))
} else {
unimplemented!("Convert matroska map to rust-av map")
};
let a = AudioInfo {
rate: rate as usize,
map: map,
format: None,
};
Some(MediaKind::Audio(a))
} else {
None
}
}
fn track_entry_media_kind(t: &TrackEntry) -> Option<MediaKind> {
// TODO: Use an enum for the track type
match t.track_type {
0x1 => track_entry_video_kind(t),
0x2 => track_entry_audio_kind(t),
_ => None,
}
}
// TODO: make sure the timecode_scale isn't 0
pub fn track_to_stream(info: &Info, t: &TrackEntry) -> Stream {
let num = if let Some(ts) = t.track_timecode_scale {
(ts * info.timecode_scale as f64) as i64
} else {
info.timecode_scale as i64
};
Stream {
id: t.track_uid as isize,
index: t.track_number as usize,
start: None,
duration: t.default_duration,
timebase: Rational64::new(num, 1000 * 1000 * 1000),
// TODO: Extend CodecParams and fill it with the remaining information
params: CodecParams {
extradata: t.codec_private.clone(),
bit_rate: 0,
delay: t.codec_delay.unwrap_or(0) as usize,
convergence_window: t.seek_pre_roll.unwrap_or(0) as usize,
codec_id: track_entry_codec_id(t),
kind: track_entry_media_kind(t),
},
user_private: None
}
}
impl<'a> Cluster<'a> {
pub fn generate_packets(&self) -> Vec<Event> {
let mut v = Vec::new();
for block_data in self.simple_block.iter() {
if let Ok((i, block)) = simple_block(block_data) {
//println!("parsing simple block: {:?}", block);
let packet = Packet {
data: i.into(),
t: TimeInfo {
pts: Some(block.timecode as i64),
dts: None,
duration: None,
timebase: None,
},
pos: None,
stream_index: block.track_number as isize,
is_key: block.keyframe,
is_corrupted: false,
};
v.push(Event::NewPacket(packet));
} else {
println!("error parsing simple block");
}
}
v
}
}
struct Des {
d: Descr,
}
impl Descriptor for Des {
fn create(&self) -> Box<Demuxer> {
Box::new(MkvDemuxer::new())
}
fn describe<'a>(&'a self) -> &'a Descr {
&self.d
}
fn probe(&self, data: &[u8]) -> u8 {
match ebml_header(&data[..100]) {
Ok(_) => 100,
_ => 0,
}
}
}
pub const MKV_DESC: &Descriptor = &Des {
d: Descr {
name: "matroska",
demuxer: "mkv",
description: "Nom-based Matroska demuxer",
extensions: &["mkv", "webm", "mka"],
mime: &["video/x-matroska", "audio/x-matroska"],
}
};
#[cfg(test)]
#[allow(non_upper_case_globals)]
mod tests {
use super::*;
use std::io::Cursor;
use nom::Offset;
use av_format::demuxer::Context;
use av_format::buffer::*;
const webm: &'static [u8] = include_bytes!("../assets/bbb-vp9-opus.webm");
#[test]
fn parse_headers() {
let mut demuxer = MkvDemuxer::new();
let res = demuxer.parse_until_tracks(webm);
println!("got parsing res: {:?}", res);
match res {
Ok((i, _)) => {
println!("offset: {} bytes", webm.offset(i));
}
e => {
println!("could not parse: {:?}", e);
}
}
println!("demuxer: {:#?}", demuxer);
}
#[test]
fn parse_headers_incremental_buffer() {
let mut demuxer = MkvDemuxer::new();
for n in 100..2000 {
let res = demuxer.parse_until_tracks(&webm[0..n]);
match res {
Ok(_) => println!("Size {} ok", n),
Err(Err::Incomplete(needed)) => println!("Incomplete {} needs {:?}", n, needed),
Err(e) => {
panic!("Error at size {}: {:?}", n, e);
}
}
}
}
#[test]
fn context() {
let mut context = Context::new(Box::new(MkvDemuxer::new()),
Box::new(AccReader::new(Cursor::new(webm))));
println!("DEMUXER CONTEXT read headers: {:?}", context.read_headers().unwrap());
println!("DEMUXER CONTEXT streams: {:?}", context.info.streams);
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
println!("DEMUXER CONTEXT event: {:?}", context.read_event().unwrap());
}
}
|
// Copyright 2016 Joe Wilm, The Alacritty Project Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The display subsystem including window management, font rasterization, and
//! GPU drawing.
use std::sync::mpsc;
use parking_lot::{MutexGuard};
use Rgb;
use cli;
use config::Config;
use font::{self, Rasterize};
use meter::Meter;
use renderer::{self, GlyphCache, QuadRenderer};
use selection::Selection;
use term::{Term, SizeInfo};
use window::{self, Size, Pixels, Window, SetInnerSize};
#[derive(Debug)]
pub enum Error {
/// Error with window management
Window(window::Error),
/// Error dealing with fonts
Font(font::Error),
/// Error in renderer
Render(renderer::Error),
}
impl ::std::error::Error for Error {
fn cause(&self) -> Option<&::std::error::Error> {
match *self {
Error::Window(ref err) => Some(err),
Error::Font(ref err) => Some(err),
Error::Render(ref err) => Some(err),
}
}
fn description(&self) -> &str {
match *self {
Error::Window(ref err) => err.description(),
Error::Font(ref err) => err.description(),
Error::Render(ref err) => err.description(),
}
}
}
impl ::std::fmt::Display for Error {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
Error::Window(ref err) => err.fmt(f),
Error::Font(ref err) => err.fmt(f),
Error::Render(ref err) => err.fmt(f),
}
}
}
impl From<window::Error> for Error {
fn from(val: window::Error) -> Error {
Error::Window(val)
}
}
impl From<font::Error> for Error {
fn from(val: font::Error) -> Error {
Error::Font(val)
}
}
impl From<renderer::Error> for Error {
fn from(val: renderer::Error) -> Error {
Error::Render(val)
}
}
/// The display wraps a window, font rasterizer, and GPU renderer
pub struct Display {
window: Window,
renderer: QuadRenderer,
glyph_cache: GlyphCache,
render_timer: bool,
rx: mpsc::Receiver<(u32, u32)>,
tx: mpsc::Sender<(u32, u32)>,
meter: Meter,
font_size_modifier: i8,
size_info: SizeInfo,
last_background_color: Rgb,
}
/// Can wakeup the render loop from other threads
pub struct Notifier(window::Proxy);
/// Types that are interested in when the display is resized
pub trait OnResize {
fn on_resize(&mut self, size: &SizeInfo);
}
impl Notifier {
pub fn notify(&self) {
self.0.wakeup_event_loop();
}
}
impl Display {
pub fn notifier(&self) -> Notifier {
Notifier(self.window.create_window_proxy())
}
pub fn update_config(&mut self, config: &Config) {
self.render_timer = config.render_timer();
}
/// Get size info about the display
pub fn size(&self) -> &SizeInfo {
&self.size_info
}
pub fn new(
config: &Config,
options: &cli::Options,
) -> Result<Display, Error> {
// Extract some properties from config
let render_timer = config.render_timer();
// Create the window where Alacritty will be displayed
let mut window = Window::new(&options.title)?;
// get window properties for initializing the other subsytems
let mut viewport_size = window.inner_size_pixels()
.expect("glutin returns window size");
let dpr = window.hidpi_factor();
info!("device_pixel_ratio: {}", dpr);
// Create renderer
let mut renderer = QuadRenderer::new(&config, viewport_size)?;
let (glyph_cache, cell_width, cell_height) =
Self::new_glyph_cache(&window, &mut renderer, config, 0)?;
let dimensions = options.dimensions()
.unwrap_or_else(|| config.dimensions());
// Resize window to specified dimensions unless one or both dimensions are 0
if dimensions.columns_u32() > 0 && dimensions.lines_u32() > 0 {
let width = cell_width as u32 * dimensions.columns_u32();
let height = cell_height as u32 * dimensions.lines_u32();
let new_viewport_size = Size {
width: Pixels(width + 2 * config.padding().x as u32),
height: Pixels(height + 2 * config.padding().y as u32),
};
window.set_inner_size(&new_viewport_size);
renderer.resize(new_viewport_size.width.0 as _, new_viewport_size.height.0 as _);
viewport_size = new_viewport_size
}
info!("Cell Size: ({} x {})", cell_width, cell_height);
let size_info = SizeInfo {
width: viewport_size.width.0 as f32,
height: viewport_size.height.0 as f32,
cell_width: cell_width as f32,
cell_height: cell_height as f32,
padding_x: config.padding().x.floor(),
padding_y: config.padding().y.floor(),
};
// Channel for resize events
//
// macOS has a callback for getting resize events, the channel is used
// to queue resize events until the next draw call. Unfortunately, it
// seems that the event loop is blocked until the window is done
// resizing. If any drawing were to happen during a resize, it would
// need to be in the callback.
let (tx, rx) = mpsc::channel();
// Clear screen
let background_color = config.colors().primary.background;
renderer.with_api(config, &size_info, 0. /* visual bell intensity */, |api| {
api.clear(background_color);
});
Ok(Display {
window: window,
renderer: renderer,
glyph_cache: glyph_cache,
render_timer: render_timer,
tx: tx,
rx: rx,
meter: Meter::new(),
font_size_modifier: 0,
size_info: size_info,
last_background_color: background_color,
})
}
fn new_glyph_cache(window : &Window, renderer : &mut QuadRenderer,
config: &Config, font_size_delta: i8)
-> Result<(GlyphCache, f32, f32), Error>
{
let font = config.font().clone().with_size_delta(font_size_delta as f32);
let dpr = window.hidpi_factor();
let rasterizer = font::Rasterizer::new(dpr, config.use_thin_strokes())?;
// Initialize glyph cache
let glyph_cache = {
info!("Initializing glyph cache");
let init_start = ::std::time::Instant::now();
let cache = renderer.with_loader(|mut api| {
GlyphCache::new(rasterizer, &font, &mut api)
})?;
let stop = init_start.elapsed();
let stop_f = stop.as_secs() as f64 + stop.subsec_nanos() as f64 / 1_000_000_000f64;
info!("Finished initializing glyph cache in {}", stop_f);
cache
};
// Need font metrics to resize the window properly. This suggests to me the
// font metrics should be computed before creating the window in the first
// place so that a resize is not needed.
let metrics = glyph_cache.font_metrics();
let cell_width = (metrics.average_advance + font.offset().x as f64) as u32;
let cell_height = (metrics.line_height + font.offset().y as f64) as u32;
return Ok((glyph_cache, cell_width as f32, cell_height as f32));
}
pub fn update_glyph_cache(&mut self, config: &Config, font_size_delta: i8) {
let cache = &mut self.glyph_cache;
self.renderer.with_loader(|mut api| {
let _ = cache.update_font_size(config.font(), font_size_delta, &mut api);
});
let metrics = cache.font_metrics();
self.size_info.cell_width = ((metrics.average_advance + config.font().offset().x as f64) as f32).floor();
self.size_info.cell_height = ((metrics.line_height + config.font().offset().y as f64) as f32).floor();
}
#[inline]
pub fn resize_channel(&self) -> mpsc::Sender<(u32, u32)> {
self.tx.clone()
}
pub fn window(&mut self) -> &mut Window {
&mut self.window
}
/// Process pending resize events
pub fn handle_resize(
&mut self,
terminal: &mut MutexGuard<Term>,
config: &Config,
items: &mut [&mut OnResize]
) {
// Resize events new_size and are handled outside the poll_events
// iterator. This has the effect of coalescing multiple resize
// events into one.
let mut new_size = None;
// Take most recent resize event, if any
while let Ok(sz) = self.rx.try_recv() {
new_size = Some(sz);
}
if terminal.font_size_modifier != self.font_size_modifier {
// Font size modification detected
self.font_size_modifier = terminal.font_size_modifier;
self.update_glyph_cache(config, terminal.font_size_modifier);
if new_size == None {
// Force a resize to refresh things
new_size = Some((self.size_info.width as u32,
self.size_info.height as u32));
}
}
// Receive any resize events; only call gl::Viewport on last
// available
if let Some((w, h)) = new_size.take() {
self.size_info.width = w as f32;
self.size_info.height = h as f32;
let size = &self.size_info;
terminal.resize(size);
for item in items {
item.on_resize(size)
}
self.window.resize(w, h);
self.renderer.resize(w as i32, h as i32);
}
}
/// Draw the screen
///
/// A reference to Term whose state is being drawn must be provided.
///
/// This call may block if vsync is enabled
pub fn draw(&mut self, mut terminal: MutexGuard<Term>, config: &Config, selection: Option<&Selection>) {
// Clear dirty flag
terminal.dirty = !terminal.visual_bell.completed();
if let Some(title) = terminal.get_next_title() {
self.window.set_title(&title);
}
if let Some(is_urgent) = terminal.next_is_urgent.take() {
// We don't need to set the urgent flag if we already have the
// user's attention.
if !is_urgent || !self.window.is_focused {
self.window.set_urgent(is_urgent);
}
}
let size_info = *terminal.size_info();
let visual_bell_intensity = terminal.visual_bell.intensity();
let background_color = terminal.background_color();
let background_color_changed = background_color != self.last_background_color;
self.last_background_color = background_color;
{
let glyph_cache = &mut self.glyph_cache;
// Draw grid
{
let _sampler = self.meter.sampler();
// Make a copy of size_info since the closure passed here
// borrows terminal mutably
//
// TODO I wonder if the renderable cells iter could avoid the
// mutable borrow
self.renderer.with_api(config, &size_info, visual_bell_intensity, |mut api| {
// Clear screen to update whole background with new color
if background_color_changed {
api.clear(background_color);
}
// Draw the grid
api.render_cells(terminal.renderable_cells(config, selection), glyph_cache);
});
}
// Draw render timer
if self.render_timer {
let timing = format!("{:.3} usec", self.meter.average());
let color = Rgb { r: 0xd5, g: 0x4e, b: 0x53 };
self.renderer.with_api(config, &size_info, visual_bell_intensity, |mut api| {
api.render_string(&timing[..], glyph_cache, color);
});
}
}
// Unlock the terminal mutex; following call to swap_buffers() may block
drop(terminal);
self.window
.swap_buffers()
.expect("swap buffers");
// Clear after swap_buffers when terminal mutex isn't held. Mesa for
// some reason takes a long time to call glClear(). The driver descends
// into xcb_connect_to_fd() which ends up calling __poll_nocancel()
// which blocks for a while.
//
// By keeping this outside of the critical region, the Mesa bug is
// worked around to some extent. Since this doesn't actually address the
// issue of glClear being slow, less time is available for input
// handling and rendering.
self.renderer.with_api(config, &size_info, visual_bell_intensity, |api| {
api.clear(background_color);
});
}
pub fn get_window_id(&self) -> Option<usize> {
self.window.get_window_id()
}
/// Adjust the XIM editor position according to the new location of the cursor
pub fn update_ime_position(&mut self, terminal: &Term) {
use index::{Point, Line, Column};
use term::SizeInfo;
let Point{line: Line(row), col: Column(col)} = terminal.cursor().point;
let SizeInfo{cell_width: cw,
cell_height: ch,
padding_x: px,
padding_y: py, ..} = *terminal.size_info();
let nspot_y = (py + (row + 1) as f32 * ch) as i16;
let nspot_x = (px + col as f32 * cw) as i16;
self.window().send_xim_spot(nspot_x, nspot_y);
}
}
Fix typo and whitespace
// Copyright 2016 Joe Wilm, The Alacritty Project Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The display subsystem including window management, font rasterization, and
//! GPU drawing.
use std::sync::mpsc;
use parking_lot::{MutexGuard};
use Rgb;
use cli;
use config::Config;
use font::{self, Rasterize};
use meter::Meter;
use renderer::{self, GlyphCache, QuadRenderer};
use selection::Selection;
use term::{Term, SizeInfo};
use window::{self, Size, Pixels, Window, SetInnerSize};
#[derive(Debug)]
pub enum Error {
/// Error with window management
Window(window::Error),
/// Error dealing with fonts
Font(font::Error),
/// Error in renderer
Render(renderer::Error),
}
impl ::std::error::Error for Error {
fn cause(&self) -> Option<&::std::error::Error> {
match *self {
Error::Window(ref err) => Some(err),
Error::Font(ref err) => Some(err),
Error::Render(ref err) => Some(err),
}
}
fn description(&self) -> &str {
match *self {
Error::Window(ref err) => err.description(),
Error::Font(ref err) => err.description(),
Error::Render(ref err) => err.description(),
}
}
}
impl ::std::fmt::Display for Error {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
Error::Window(ref err) => err.fmt(f),
Error::Font(ref err) => err.fmt(f),
Error::Render(ref err) => err.fmt(f),
}
}
}
impl From<window::Error> for Error {
fn from(val: window::Error) -> Error {
Error::Window(val)
}
}
impl From<font::Error> for Error {
fn from(val: font::Error) -> Error {
Error::Font(val)
}
}
impl From<renderer::Error> for Error {
fn from(val: renderer::Error) -> Error {
Error::Render(val)
}
}
/// The display wraps a window, font rasterizer, and GPU renderer
pub struct Display {
window: Window,
renderer: QuadRenderer,
glyph_cache: GlyphCache,
render_timer: bool,
rx: mpsc::Receiver<(u32, u32)>,
tx: mpsc::Sender<(u32, u32)>,
meter: Meter,
font_size_modifier: i8,
size_info: SizeInfo,
last_background_color: Rgb,
}
/// Can wakeup the render loop from other threads
pub struct Notifier(window::Proxy);
/// Types that are interested in when the display is resized
pub trait OnResize {
fn on_resize(&mut self, size: &SizeInfo);
}
impl Notifier {
pub fn notify(&self) {
self.0.wakeup_event_loop();
}
}
impl Display {
pub fn notifier(&self) -> Notifier {
Notifier(self.window.create_window_proxy())
}
pub fn update_config(&mut self, config: &Config) {
self.render_timer = config.render_timer();
}
/// Get size info about the display
pub fn size(&self) -> &SizeInfo {
&self.size_info
}
pub fn new(
config: &Config,
options: &cli::Options,
) -> Result<Display, Error> {
// Extract some properties from config
let render_timer = config.render_timer();
// Create the window where Alacritty will be displayed
let mut window = Window::new(&options.title)?;
// get window properties for initializing the other subsystems
let mut viewport_size = window.inner_size_pixels()
.expect("glutin returns window size");
let dpr = window.hidpi_factor();
info!("device_pixel_ratio: {}", dpr);
// Create renderer
let mut renderer = QuadRenderer::new(&config, viewport_size)?;
let (glyph_cache, cell_width, cell_height) =
Self::new_glyph_cache(&window, &mut renderer, config, 0)?;
let dimensions = options.dimensions()
.unwrap_or_else(|| config.dimensions());
// Resize window to specified dimensions unless one or both dimensions are 0
if dimensions.columns_u32() > 0 && dimensions.lines_u32() > 0 {
let width = cell_width as u32 * dimensions.columns_u32();
let height = cell_height as u32 * dimensions.lines_u32();
let new_viewport_size = Size {
width: Pixels(width + 2 * config.padding().x as u32),
height: Pixels(height + 2 * config.padding().y as u32),
};
window.set_inner_size(&new_viewport_size);
renderer.resize(new_viewport_size.width.0 as _, new_viewport_size.height.0 as _);
viewport_size = new_viewport_size
}
info!("Cell Size: ({} x {})", cell_width, cell_height);
let size_info = SizeInfo {
width: viewport_size.width.0 as f32,
height: viewport_size.height.0 as f32,
cell_width: cell_width as f32,
cell_height: cell_height as f32,
padding_x: config.padding().x.floor(),
padding_y: config.padding().y.floor(),
};
// Channel for resize events
//
// macOS has a callback for getting resize events, the channel is used
// to queue resize events until the next draw call. Unfortunately, it
// seems that the event loop is blocked until the window is done
// resizing. If any drawing were to happen during a resize, it would
// need to be in the callback.
let (tx, rx) = mpsc::channel();
// Clear screen
let background_color = config.colors().primary.background;
renderer.with_api(config, &size_info, 0. /* visual bell intensity */, |api| {
api.clear(background_color);
});
Ok(Display {
window: window,
renderer: renderer,
glyph_cache: glyph_cache,
render_timer: render_timer,
tx: tx,
rx: rx,
meter: Meter::new(),
font_size_modifier: 0,
size_info: size_info,
last_background_color: background_color,
})
}
fn new_glyph_cache(window : &Window, renderer : &mut QuadRenderer,
config: &Config, font_size_delta: i8)
-> Result<(GlyphCache, f32, f32), Error>
{
let font = config.font().clone().with_size_delta(font_size_delta as f32);
let dpr = window.hidpi_factor();
let rasterizer = font::Rasterizer::new(dpr, config.use_thin_strokes())?;
// Initialize glyph cache
let glyph_cache = {
info!("Initializing glyph cache");
let init_start = ::std::time::Instant::now();
let cache = renderer.with_loader(|mut api| {
GlyphCache::new(rasterizer, &font, &mut api)
})?;
let stop = init_start.elapsed();
let stop_f = stop.as_secs() as f64 + stop.subsec_nanos() as f64 / 1_000_000_000f64;
info!("Finished initializing glyph cache in {}", stop_f);
cache
};
// Need font metrics to resize the window properly. This suggests to me the
// font metrics should be computed before creating the window in the first
// place so that a resize is not needed.
let metrics = glyph_cache.font_metrics();
let cell_width = (metrics.average_advance + font.offset().x as f64) as u32;
let cell_height = (metrics.line_height + font.offset().y as f64) as u32;
return Ok((glyph_cache, cell_width as f32, cell_height as f32));
}
pub fn update_glyph_cache(&mut self, config: &Config, font_size_delta: i8) {
let cache = &mut self.glyph_cache;
self.renderer.with_loader(|mut api| {
let _ = cache.update_font_size(config.font(), font_size_delta, &mut api);
});
let metrics = cache.font_metrics();
self.size_info.cell_width = ((metrics.average_advance + config.font().offset().x as f64) as f32).floor();
self.size_info.cell_height = ((metrics.line_height + config.font().offset().y as f64) as f32).floor();
}
#[inline]
pub fn resize_channel(&self) -> mpsc::Sender<(u32, u32)> {
self.tx.clone()
}
pub fn window(&mut self) -> &mut Window {
&mut self.window
}
/// Process pending resize events
pub fn handle_resize(
&mut self,
terminal: &mut MutexGuard<Term>,
config: &Config,
items: &mut [&mut OnResize]
) {
// Resize events new_size and are handled outside the poll_events
// iterator. This has the effect of coalescing multiple resize
// events into one.
let mut new_size = None;
// Take most recent resize event, if any
while let Ok(sz) = self.rx.try_recv() {
new_size = Some(sz);
}
if terminal.font_size_modifier != self.font_size_modifier {
// Font size modification detected
self.font_size_modifier = terminal.font_size_modifier;
self.update_glyph_cache(config, terminal.font_size_modifier);
if new_size == None {
// Force a resize to refresh things
new_size = Some((self.size_info.width as u32,
self.size_info.height as u32));
}
}
// Receive any resize events; only call gl::Viewport on last
// available
if let Some((w, h)) = new_size.take() {
self.size_info.width = w as f32;
self.size_info.height = h as f32;
let size = &self.size_info;
terminal.resize(size);
for item in items {
item.on_resize(size)
}
self.window.resize(w, h);
self.renderer.resize(w as i32, h as i32);
}
}
/// Draw the screen
///
/// A reference to Term whose state is being drawn must be provided.
///
/// This call may block if vsync is enabled
pub fn draw(&mut self, mut terminal: MutexGuard<Term>, config: &Config, selection: Option<&Selection>) {
// Clear dirty flag
terminal.dirty = !terminal.visual_bell.completed();
if let Some(title) = terminal.get_next_title() {
self.window.set_title(&title);
}
if let Some(is_urgent) = terminal.next_is_urgent.take() {
// We don't need to set the urgent flag if we already have the
// user's attention.
if !is_urgent || !self.window.is_focused {
self.window.set_urgent(is_urgent);
}
}
let size_info = *terminal.size_info();
let visual_bell_intensity = terminal.visual_bell.intensity();
let background_color = terminal.background_color();
let background_color_changed = background_color != self.last_background_color;
self.last_background_color = background_color;
{
let glyph_cache = &mut self.glyph_cache;
// Draw grid
{
let _sampler = self.meter.sampler();
// Make a copy of size_info since the closure passed here
// borrows terminal mutably
//
// TODO I wonder if the renderable cells iter could avoid the
// mutable borrow
self.renderer.with_api(config, &size_info, visual_bell_intensity, |mut api| {
// Clear screen to update whole background with new color
if background_color_changed {
api.clear(background_color);
}
// Draw the grid
api.render_cells(terminal.renderable_cells(config, selection), glyph_cache);
});
}
// Draw render timer
if self.render_timer {
let timing = format!("{:.3} usec", self.meter.average());
let color = Rgb { r: 0xd5, g: 0x4e, b: 0x53 };
self.renderer.with_api(config, &size_info, visual_bell_intensity, |mut api| {
api.render_string(&timing[..], glyph_cache, color);
});
}
}
// Unlock the terminal mutex; following call to swap_buffers() may block
drop(terminal);
self.window
.swap_buffers()
.expect("swap buffers");
// Clear after swap_buffers when terminal mutex isn't held. Mesa for
// some reason takes a long time to call glClear(). The driver descends
// into xcb_connect_to_fd() which ends up calling __poll_nocancel()
// which blocks for a while.
//
// By keeping this outside of the critical region, the Mesa bug is
// worked around to some extent. Since this doesn't actually address the
// issue of glClear being slow, less time is available for input
// handling and rendering.
self.renderer.with_api(config, &size_info, visual_bell_intensity, |api| {
api.clear(background_color);
});
}
pub fn get_window_id(&self) -> Option<usize> {
self.window.get_window_id()
}
/// Adjust the XIM editor position according to the new location of the cursor
pub fn update_ime_position(&mut self, terminal: &Term) {
use index::{Point, Line, Column};
use term::SizeInfo;
let Point{line: Line(row), col: Column(col)} = terminal.cursor().point;
let SizeInfo{cell_width: cw,
cell_height: ch,
padding_x: px,
padding_y: py, ..} = *terminal.size_info();
let nspot_y = (py + (row + 1) as f32 * ch) as i16;
let nspot_x = (px + col as f32 * cw) as i16;
self.window().send_xim_spot(nspot_x, nspot_y);
}
}
|
use std::fs::File;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
use std::fmt;
use std::slice;
#[repr(C)]
#[derive(Clone, PartialEq)]
pub struct Sym {
pub st_name: u32, // Symbol name (string tbl index)
pub st_info: u8, // Symbol type and binding
pub st_other: u8, // Symbol visibility
pub st_shndx: u16, // Section index
pub st_value: u64, // Symbol value
pub st_size: u64, // Symbol size
}
pub const SIZEOF_SYM: usize = 4 + 1 + 1 + 2 + 8 + 8;
#[inline(always)]
pub fn st_bind(info: u8) -> u8 {
info >> 4
}
#[inline(always)]
pub fn st_type(info: u8) -> u8 {
info & 0xf
}
#[inline(always)]
pub fn is_import(sym: &Sym) -> bool {
let binding = st_bind(sym.st_info);
binding == STB_GLOBAL && sym.st_value == 0
}
// sym bindings
pub const STB_LOCAL: u8 = 0; // Local symbol
pub const STB_GLOBAL: u8 = 1; // Global symbol
pub const STB_WEAK: u8 = 2; // Weak symbol
pub const STB_NUM: u8 = 3; // Number of defined types.
pub const STB_LOOS: u8 = 10; // Start of OS-specific
pub const STB_GNU_UNIQUE: u8 = 10; // Unique symbol.
pub const STB_HIOS: u8 = 12; // End of OS-specific
pub const STB_LOPROC: u8 = 13; // Start of processor-specific
pub const STB_HIPROC: u8 = 15; // End of processor-specific
#[inline]
pub fn bind_to_str(typ: u8) -> &'static str {
match typ {
STB_LOCAL => "LOCAL",
STB_GLOBAL => "GLOBAL",
STB_WEAK => "WEAK",
STB_NUM => "NUM",
STB_GNU_UNIQUE => "GNU_UNIQUE",
_ => "UNKNOWN_STB",
}
}
// sym types
pub const STT_NOTYPE: u8 = 0; // Symbol type is unspecified
pub const STT_OBJECT: u8 = 1; // Symbol is a data object
pub const STT_FUNC: u8 = 2; // Symbol is a code object
pub const STT_SECTION: u8 = 3; // Symbol associated with a section
pub const STT_FILE: u8 = 4; // Symbol's name is file name
pub const STT_COMMON: u8 = 5; // Symbol is a common data object
pub const STT_TLS: u8 = 6; // Symbol is thread-local data object
pub const STT_NUM: u8 = 7; // Number of defined types
pub const STT_LOOS: u8 = 10; // Start of OS-specific
pub const STT_GNU_IFUNC: u8 = 10; // Symbol is indirect code object
pub const STT_HIOS: u8 = 12; // End of OS-specific
pub const STT_LOPROC: u8 = 13; // Start of processor-specific
pub const STT_HIPROC: u8 = 15; // End of processor-specific
#[inline]
pub fn type_to_str(typ: u8) -> &'static str {
match typ {
STT_NOTYPE => "NOTYPE",
STT_OBJECT => "OBJECT",
STT_FUNC => "FUNC",
STT_SECTION => "SECTION",
STT_FILE => "FILE",
STT_COMMON => "COMMON",
STT_TLS => "TLS",
STT_NUM => "NUM",
STT_GNU_IFUNC => "GNU_IFUNC",
_ => "UNKNOWN_STT",
}
}
impl fmt::Debug for Sym {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let bind = st_bind(self.st_info);
let typ = st_type(self.st_info);
write!(f,
"st_name: {} {} {} st_other: {} st_shndx: {} st_value: {:x} st_size: {}",
self.st_name,
bind_to_str(bind),
type_to_str(typ),
self.st_other,
self.st_shndx,
self.st_value,
self.st_size)
}
}
pub unsafe fn from_raw<'a>(symp: *const Sym, count: usize) -> &'a [Sym] {
slice::from_raw_parts(symp, count)
}
pub fn from_fd<'a>(fd: &mut File, offset: usize, count: usize) -> io::Result<Vec<Sym>> {
let mut bytes = vec![0u8; count * SIZEOF_SYM]; // afaik this shouldn't work, since i pass in a byte size...
try!(fd.seek(Start(offset as u64)));
try!(fd.read(&mut bytes));
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut Sym, count) };
let mut syms = Vec::with_capacity(count);
syms.extend_from_slice(bytes);
syms.dedup();
Ok(syms)
}
elf.sym: add Copy derived trait
use std::fs::File;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
use std::fmt;
use std::slice;
#[repr(C)]
#[derive(Clone, Copy, PartialEq)]
pub struct Sym {
pub st_name: u32, // Symbol name (string tbl index)
pub st_info: u8, // Symbol type and binding
pub st_other: u8, // Symbol visibility
pub st_shndx: u16, // Section index
pub st_value: u64, // Symbol value
pub st_size: u64, // Symbol size
}
pub const SIZEOF_SYM: usize = 4 + 1 + 1 + 2 + 8 + 8;
#[inline(always)]
pub fn st_bind(info: u8) -> u8 {
info >> 4
}
#[inline(always)]
pub fn st_type(info: u8) -> u8 {
info & 0xf
}
#[inline(always)]
pub fn is_import(sym: &Sym) -> bool {
let binding = st_bind(sym.st_info);
binding == STB_GLOBAL && sym.st_value == 0
}
// sym bindings
pub const STB_LOCAL: u8 = 0; // Local symbol
pub const STB_GLOBAL: u8 = 1; // Global symbol
pub const STB_WEAK: u8 = 2; // Weak symbol
pub const STB_NUM: u8 = 3; // Number of defined types.
pub const STB_LOOS: u8 = 10; // Start of OS-specific
pub const STB_GNU_UNIQUE: u8 = 10; // Unique symbol.
pub const STB_HIOS: u8 = 12; // End of OS-specific
pub const STB_LOPROC: u8 = 13; // Start of processor-specific
pub const STB_HIPROC: u8 = 15; // End of processor-specific
#[inline]
pub fn bind_to_str(typ: u8) -> &'static str {
match typ {
STB_LOCAL => "LOCAL",
STB_GLOBAL => "GLOBAL",
STB_WEAK => "WEAK",
STB_NUM => "NUM",
STB_GNU_UNIQUE => "GNU_UNIQUE",
_ => "UNKNOWN_STB",
}
}
// sym types
pub const STT_NOTYPE: u8 = 0; // Symbol type is unspecified
pub const STT_OBJECT: u8 = 1; // Symbol is a data object
pub const STT_FUNC: u8 = 2; // Symbol is a code object
pub const STT_SECTION: u8 = 3; // Symbol associated with a section
pub const STT_FILE: u8 = 4; // Symbol's name is file name
pub const STT_COMMON: u8 = 5; // Symbol is a common data object
pub const STT_TLS: u8 = 6; // Symbol is thread-local data object
pub const STT_NUM: u8 = 7; // Number of defined types
pub const STT_LOOS: u8 = 10; // Start of OS-specific
pub const STT_GNU_IFUNC: u8 = 10; // Symbol is indirect code object
pub const STT_HIOS: u8 = 12; // End of OS-specific
pub const STT_LOPROC: u8 = 13; // Start of processor-specific
pub const STT_HIPROC: u8 = 15; // End of processor-specific
#[inline]
pub fn type_to_str(typ: u8) -> &'static str {
match typ {
STT_NOTYPE => "NOTYPE",
STT_OBJECT => "OBJECT",
STT_FUNC => "FUNC",
STT_SECTION => "SECTION",
STT_FILE => "FILE",
STT_COMMON => "COMMON",
STT_TLS => "TLS",
STT_NUM => "NUM",
STT_GNU_IFUNC => "GNU_IFUNC",
_ => "UNKNOWN_STT",
}
}
impl fmt::Debug for Sym {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let bind = st_bind(self.st_info);
let typ = st_type(self.st_info);
write!(f,
"st_name: {} {} {} st_other: {} st_shndx: {} st_value: {:x} st_size: {}",
self.st_name,
bind_to_str(bind),
type_to_str(typ),
self.st_other,
self.st_shndx,
self.st_value,
self.st_size)
}
}
pub unsafe fn from_raw<'a>(symp: *const Sym, count: usize) -> &'a [Sym] {
slice::from_raw_parts(symp, count)
}
pub fn from_fd<'a>(fd: &mut File, offset: usize, count: usize) -> io::Result<Vec<Sym>> {
let mut bytes = vec![0u8; count * SIZEOF_SYM]; // afaik this shouldn't work, since i pass in a byte size...
try!(fd.seek(Start(offset as u64)));
try!(fd.read(&mut bytes));
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut Sym, count) };
let mut syms = Vec::with_capacity(count);
syms.extend_from_slice(bytes);
syms.dedup();
Ok(syms)
}
|
use std::collections::HashMap;
use std::fmt;
use std::io::IoError as StdIoError;
use std::iter::repeat;
use serialize;
use super::{Data, StrVal, Bool, VecVal, Map};
pub use self::Error::*;
pub struct Encoder<'a> {
pub data: Vec<Data<'a>>,
}
impl<'a> Encoder<'a> {
pub fn new() -> Encoder<'a> {
Encoder { data: Vec::new() }
}
}
#[derive(PartialEq)]
pub enum Error {
UnsupportedType,
InvalidStr,
MissingElements,
KeyIsNotString,
IoError(StdIoError),
}
impl fmt::Show for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
UnsupportedType => "unsupported type".fmt(f),
InvalidStr => "invalid str".fmt(f),
MissingElements => "no elements in value".fmt(f),
KeyIsNotString => "key is not a string".fmt(f),
IoError(ref err) => err.fmt(f),
}
}
}
pub type EncoderResult = Result<(), Error>;
impl<'a> serialize::Encoder for Encoder<'a> {
type Error = Error;
fn emit_nil(&mut self) -> EncoderResult { Err(UnsupportedType) }
fn emit_int(&mut self, v: isize) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_uint(&mut self, v: usize) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_u64(&mut self, v: u64) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_u32(&mut self, v: u32) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_u16(&mut self, v: u16) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_u8(&mut self, v: u8) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_i64(&mut self, v: i64) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_i32(&mut self, v: i32) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_i16(&mut self, v: i16) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_i8(&mut self, v: i8) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_bool(&mut self, v: bool) -> EncoderResult { self.data.push(Bool(v)); Ok(()) }
fn emit_f64(&mut self, v: f64) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_f32(&mut self, v: f32) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_char(&mut self, v: char) -> EncoderResult {
self.data.push(StrVal(repeat(v).take(1).collect::<String>()));
Ok(())
}
fn emit_str(&mut self, v: &str) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_enum< F >(&mut self, _name: &str, _f: F ) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_enum_variant< F >(&mut self,
_name: &str,
_id: usize,
_len: usize,
_f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_enum_variant_arg< F >(&mut self,
_a_idx: usize,
_f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_enum_struct_variant< F >(&mut self,
_v_name: &str,
_v_id: usize,
_len: usize,
_f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_enum_struct_variant_field< F >(&mut self,
_f_name: &str,
_f_idx: usize,
_f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_struct< F >(&mut self,
_name: &str,
_len: usize,
f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.data.push(Map(HashMap::new()));
f(self)
}
fn emit_struct_field< F >(&mut self,
name: &str,
_idx: usize,
f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
let mut m = match self.data.pop() {
Some(Map(m)) => m,
_ => { return Err(UnsupportedType); }
};
try!(f(self));
let data = match self.data.pop() {
Some(d) => d,
_ => { return Err(UnsupportedType); }
};
m.insert(name.to_string(), data);
self.data.push(Map(m));
Ok(())
}
fn emit_tuple< F >(&mut self,
len: usize,
f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.emit_seq(len, f)
}
fn emit_tuple_arg< F >(&mut self, idx: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.emit_seq_elt(idx, f)
}
fn emit_tuple_struct< F >(&mut self,
_name: &str,
len: usize,
f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.emit_seq(len, f)
}
fn emit_tuple_struct_arg< F >(&mut self, idx: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.emit_seq_elt(idx, f)
}
// Specialized types:
fn emit_option< F >(&mut self, _f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_option_none(&mut self) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_option_some< F >(&mut self, _f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_seq< F >(&mut self, _len: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.data.push(VecVal(Vec::new()));
f(self)
}
fn emit_seq_elt< F >(&mut self, _idx: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
let mut v = match self.data.pop() {
Some(VecVal(v)) => v,
_ => { return Err(UnsupportedType); }
};
try!(f(self));
let data = match self.data.pop() {
Some(d) => d,
_ => { return Err(UnsupportedType); }
};
v.push(data);
self.data.push(VecVal(v));
Ok(())
}
fn emit_map< F >(&mut self, _len: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.data.push(Map(HashMap::new()));
f(self)
}
fn emit_map_elt_key< F >(&mut self, _idx: usize, mut f: F) -> EncoderResult
where F : FnMut(&mut Encoder<'a>) -> EncoderResult {
try!(f(self));
let last = match self.data.last() {
Some(d) => d,
None => { return Err(MissingElements); }
};
match *last {
StrVal(_) => Ok(()),
_ => Err(KeyIsNotString),
}
}
fn emit_map_elt_val< F >(&mut self, _idx: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
let k = match self.data.pop() {
Some(StrVal(s)) => s,
_ => { return Err(KeyIsNotString); }
};
let mut m = match self.data.pop() {
Some(Map(m)) => m,
_ => panic!("Expected a map"),
};
try!(f(self));
let popped = match self.data.pop() {
Some(p) => p,
None => panic!("Error: Nothing to pop!"),
};
m.insert(k, popped);
self.data.push(Map(m));
Ok(())
}
}
pub fn encode<'a, T: serialize::Encodable>(data: &T) -> Result<Data<'a>, Error> {
let mut encoder = Encoder::new();
try!(data.encode(&mut encoder));
assert_eq!(encoder.data.len(), 1);
match encoder.data.pop() {
Some(data) => Ok(data),
None => panic!("Error: Nothing to pop!"),
}
}
Update for latest nightly.
use std::collections::HashMap;
use std::fmt;
use std::io::IoError as StdIoError;
use std::iter::repeat;
use serialize;
use super::{Data, StrVal, Bool, VecVal, Map};
pub use self::Error::*;
pub struct Encoder<'a> {
pub data: Vec<Data<'a>>,
}
impl<'a> Encoder<'a> {
pub fn new() -> Encoder<'a> {
Encoder { data: Vec::new() }
}
}
#[derive(PartialEq)]
pub enum Error {
UnsupportedType,
InvalidStr,
MissingElements,
KeyIsNotString,
IoError(StdIoError),
}
impl fmt::Show for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
UnsupportedType => "unsupported type".fmt(f),
InvalidStr => "invalid str".fmt(f),
MissingElements => "no elements in value".fmt(f),
KeyIsNotString => "key is not a string".fmt(f),
IoError(ref err) => err.fmt(f),
}
}
}
pub type EncoderResult = Result<(), Error>;
impl<'a> serialize::Encoder for Encoder<'a> {
type Error = Error;
fn emit_nil(&mut self) -> EncoderResult { Err(UnsupportedType) }
fn emit_int(&mut self, v: isize) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_uint(&mut self, v: usize) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_u64(&mut self, v: u64) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_u32(&mut self, v: u32) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_u16(&mut self, v: u16) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_u8(&mut self, v: u8) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_i64(&mut self, v: i64) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_i32(&mut self, v: i32) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_i16(&mut self, v: i16) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_i8(&mut self, v: i8) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_bool(&mut self, v: bool) -> EncoderResult { self.data.push(Bool(v)); Ok(()) }
fn emit_f64(&mut self, v: f64) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_f32(&mut self, v: f32) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_char(&mut self, v: char) -> EncoderResult {
self.data.push(StrVal(repeat(v).take(1).collect::<String>()));
Ok(())
}
fn emit_str(&mut self, v: &str) -> EncoderResult { self.data.push(StrVal(v.to_string())); Ok(()) }
fn emit_enum< F >(&mut self, _name: &str, _f: F ) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_enum_variant< F >(&mut self,
_name: &str,
_id: usize,
_len: usize,
_f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_enum_variant_arg< F >(&mut self,
_a_idx: usize,
_f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_enum_struct_variant< F >(&mut self,
_v_name: &str,
_v_id: usize,
_len: usize,
_f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_enum_struct_variant_field< F >(&mut self,
_f_name: &str,
_f_idx: usize,
_f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_struct< F >(&mut self,
_name: &str,
_len: usize,
f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.data.push(Map(HashMap::new()));
f(self)
}
fn emit_struct_field< F >(&mut self,
name: &str,
_idx: usize,
f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
let mut m = match self.data.pop() {
Some(Map(m)) => m,
_ => { return Err(UnsupportedType); }
};
try!(f(self));
let data = match self.data.pop() {
Some(d) => d,
_ => { return Err(UnsupportedType); }
};
m.insert(name.to_string(), data);
self.data.push(Map(m));
Ok(())
}
fn emit_tuple< F >(&mut self,
len: usize,
f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.emit_seq(len, f)
}
fn emit_tuple_arg< F >(&mut self, idx: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.emit_seq_elt(idx, f)
}
fn emit_tuple_struct< F >(&mut self,
_name: &str,
len: usize,
f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.emit_seq(len, f)
}
fn emit_tuple_struct_arg< F >(&mut self, idx: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.emit_seq_elt(idx, f)
}
// Specialized types:
fn emit_option< F >(&mut self, _f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_option_none(&mut self) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_option_some< F >(&mut self, _f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
Err(UnsupportedType)
}
fn emit_seq< F >(&mut self, _len: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.data.push(VecVal(Vec::new()));
f(self)
}
fn emit_seq_elt< F >(&mut self, _idx: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
let mut v = match self.data.pop() {
Some(VecVal(v)) => v,
_ => { return Err(UnsupportedType); }
};
try!(f(self));
let data = match self.data.pop() {
Some(d) => d,
_ => { return Err(UnsupportedType); }
};
v.push(data);
self.data.push(VecVal(v));
Ok(())
}
fn emit_map< F >(&mut self, _len: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
self.data.push(Map(HashMap::new()));
f(self)
}
fn emit_map_elt_key< F >(&mut self, _idx: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
try!(f(self));
let last = match self.data.last() {
Some(d) => d,
None => { return Err(MissingElements); }
};
match *last {
StrVal(_) => Ok(()),
_ => Err(KeyIsNotString),
}
}
fn emit_map_elt_val< F >(&mut self, _idx: usize, f: F) -> EncoderResult
where F : FnOnce(&mut Encoder<'a>) -> EncoderResult {
let k = match self.data.pop() {
Some(StrVal(s)) => s,
_ => { return Err(KeyIsNotString); }
};
let mut m = match self.data.pop() {
Some(Map(m)) => m,
_ => panic!("Expected a map"),
};
try!(f(self));
let popped = match self.data.pop() {
Some(p) => p,
None => panic!("Error: Nothing to pop!"),
};
m.insert(k, popped);
self.data.push(Map(m));
Ok(())
}
}
pub fn encode<'a, T: serialize::Encodable>(data: &T) -> Result<Data<'a>, Error> {
let mut encoder = Encoder::new();
try!(data.encode(&mut encoder));
assert_eq!(encoder.data.len(), 1);
match encoder.data.pop() {
Some(data) => Ok(data),
None => panic!("Error: Nothing to pop!"),
}
}
|
use rustc_serialize::{Encoder,Encodable};
use types::{Value,BasicValue,Struct,Signature};
pub struct DBusEncoder {
val: Vec<Value>,
signature: String,
handling_key: bool
}
#[derive(Debug,PartialEq)]
pub enum EncoderError {
BadKeyType,
Unsupported,
EmptyArray
}
impl DBusEncoder {
fn handle_struct (&self) -> Result<(),EncoderError> {
let objs = Vec::new();
for i in self.val.into_iter() {
objs.push(i);
}
let s = Struct {
objects: objs,
signature: Signature(self.signature.to_string())
};
self.signature = "".to_string();
self.val.push(Value::Struct(s));
Ok(())
}
pub fn new() -> DBusEncoder {
DBusEncoder {
val: Vec::new(),
signature: "".to_string(),
handling_key: false
}
}
pub fn encode<T: Encodable>(obj: &T) -> Result<Value,EncoderError> {
let mut encoder = DBusEncoder::new();
try!(obj.encode(&mut encoder));
Ok(encoder.val.take().unwrap())
}
}
impl Encoder for DBusEncoder {
type Error = EncoderError;
fn emit_nil(&mut self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_usize(&mut self, v: usize) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Uint64(v as u64)));
self.signature.push_str("n");
Ok(())
}
fn emit_u64(&mut self, v: u64) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Uint64(v)));
self.signature.push_str("n");
Ok(())
}
fn emit_u32(&mut self, v: u32) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Uint32(v)));
self.signature.push_str("u");
Ok(())
}
fn emit_u16(&mut self, v: u16) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Uint16(v)));
self.signature.push_str("q");
Ok(())
}
fn emit_u8(&mut self, v: u8) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Byte(v)));
self.signature.push_str("y");
Ok(())
}
fn emit_isize(&mut self, v: isize) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Int64(v as i64)));
self.signature.push_str("x");
Ok(())
}
fn emit_i64(&mut self, v: i64) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Int64(v)));
self.signature.push_str("x");
Ok(())
}
fn emit_i32(&mut self, v: i32) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Int32(v)));
self.signature.push_str("i");
Ok(())
}
fn emit_i16(&mut self, v: i16) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Int16(v)));
self.signature.push_str("n");
Ok(())
}
fn emit_i8(&mut self, _v: i8) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_bool(&mut self, v: bool) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Boolean(v)));
self.signature.push_str("b");
Ok(())
}
fn emit_f64(&mut self, v: f64) -> Result<(), Self::Error> {
self.val.push(Value::Double(v));
self.signature.push_str("d");
Ok(())
}
fn emit_f32(&mut self, v: f32) -> Result<(), Self::Error> {
self.val.push(Value::Double(v as f64));
self.signature.push_str("d");
Ok(())
}
fn emit_char(&mut self, v: char) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Byte(v as u8)));
self.signature.push_str("y");
Ok(())
}
fn emit_str(&mut self, v: &str) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::String(v.to_string())));
self.signature.push_str("s");
Ok(())
}
fn emit_struct<F>(&mut self, _name: &str, _len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
try!(f(self));
self.handle_struct()
}
fn emit_struct_field<F>(&mut self, _f_name: &str, _f_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
f(self)
}
fn emit_tuple<F>(&mut self, _len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
try!(f(self));
self.handle_struct()
}
fn emit_tuple_arg<F>(&mut self, _idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
f(self)
}
fn emit_tuple_struct<F>(&mut self, _name: &str, _len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
try!(f(self));
self.handle_struct()
}
fn emit_tuple_struct_arg<F>(&mut self, _f_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
f(self)
}
fn emit_seq<F>(&mut self, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
if len == 0 {
return Err(EncoderError::EmptyArray);
}
self.handle_dbus_array(f)
}
fn emit_seq_elt<F>(&mut self, _idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
let val = f(self);
val
}
fn emit_map<F>(&mut self, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
if len == 0 {
return Err(EncoderError::EmptyArray);
}
self.handle_dbus_array(f)
}
fn emit_map_elt_key<F>(&mut self, _idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
self.add_to_sig("{");
self.handling_key = true;
let val = try!(f(self));
self.handling_key = false;
Ok(val)
}
fn emit_map_elt_val<F>(&mut self, _idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
let val = try!(f(self));
self.add_to_sig("}");
Ok(val)
}
fn emit_option<F>(&mut self, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_option_none(&mut self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_option_some<F>(&mut self, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_enum<F>(&mut self, _name: &str, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_enum_variant<F>(&mut self, _v_name: &str, _v_id: usize, _len: usize, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_enum_variant_arg<F>(&mut self, _a_idx: usize, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_enum_struct_variant<F>(&mut self, _v_name: &str, _v_id: usize, _len: usize, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_enum_struct_variant_field<F>(&mut self, _f_name: &str, _f_idx: usize, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
}
encoder.rs: implement map support
use std::collections::HashMap;
use rustc_serialize::{Encoder,Encodable};
use types::{Value,BasicValue,Struct,Signature,Dictionary};
pub struct DBusEncoder {
val: Vec<Value>,
signature: String,
key: Option<BasicValue>
}
#[derive(Debug,PartialEq)]
pub enum EncoderError {
BadKeyType,
Unsupported,
EmptyArray
}
impl DBusEncoder {
fn handle_struct (&mut self) -> Result<(),EncoderError> {
let mut objs = Vec::new();
{
let val = &self.val;
for i in val {
objs.push(i.clone());
}
}
let s = Struct {
objects: objs,
signature: Signature("(".to_string() + &self.signature + ")")
};
self.signature = "".to_string();
self.val.push(Value::Struct(s));
Ok(())
}
fn handle_array (&mut self) -> Result<(),EncoderError> {
let mut objs = Vec::new();
for i in 0..self.val.len() {
self.val.push(Value::BasicValue(BasicValue::Byte(0)));
objs.push(self.val.swap_remove(i));
}
self.val.clear();
self.val.push(Value::Array(objs));
Ok(())
}
pub fn new() -> DBusEncoder {
DBusEncoder {
val: Vec::new(),
signature: "".to_string(),
key: None
}
}
pub fn encode<T: Encodable>(obj: &T) -> Result<Value,EncoderError> {
let mut encoder = DBusEncoder::new();
try!(obj.encode(&mut encoder));
Ok(encoder.val.remove(0))
}
}
impl Encoder for DBusEncoder {
type Error = EncoderError;
fn emit_nil(&mut self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_usize(&mut self, v: usize) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Uint64(v as u64)));
self.signature.push_str("n");
Ok(())
}
fn emit_u64(&mut self, v: u64) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Uint64(v)));
self.signature.push_str("n");
Ok(())
}
fn emit_u32(&mut self, v: u32) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Uint32(v)));
self.signature.push_str("u");
Ok(())
}
fn emit_u16(&mut self, v: u16) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Uint16(v)));
self.signature.push_str("q");
Ok(())
}
fn emit_u8(&mut self, v: u8) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Byte(v)));
self.signature.push_str("y");
Ok(())
}
fn emit_isize(&mut self, v: isize) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Int64(v as i64)));
self.signature.push_str("x");
Ok(())
}
fn emit_i64(&mut self, v: i64) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Int64(v)));
self.signature.push_str("x");
Ok(())
}
fn emit_i32(&mut self, v: i32) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Int32(v)));
self.signature.push_str("i");
Ok(())
}
fn emit_i16(&mut self, v: i16) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Int16(v)));
self.signature.push_str("n");
Ok(())
}
fn emit_i8(&mut self, _v: i8) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_bool(&mut self, v: bool) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Boolean(v)));
self.signature.push_str("b");
Ok(())
}
fn emit_f64(&mut self, v: f64) -> Result<(), Self::Error> {
self.val.push(Value::Double(v));
self.signature.push_str("d");
Ok(())
}
fn emit_f32(&mut self, v: f32) -> Result<(), Self::Error> {
self.val.push(Value::Double(v as f64));
self.signature.push_str("d");
Ok(())
}
fn emit_char(&mut self, v: char) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::Byte(v as u8)));
self.signature.push_str("y");
Ok(())
}
fn emit_str(&mut self, v: &str) -> Result<(), Self::Error> {
self.val.push(Value::BasicValue(BasicValue::String(v.to_string())));
self.signature.push_str("s");
Ok(())
}
fn emit_struct<F>(&mut self, _name: &str, _len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
try!(f(self));
self.handle_struct()
}
fn emit_struct_field<F>(&mut self, _f_name: &str, _f_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
f(self)
}
fn emit_tuple<F>(&mut self, _len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
try!(f(self));
self.handle_struct()
}
fn emit_tuple_arg<F>(&mut self, _idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
f(self)
}
fn emit_tuple_struct<F>(&mut self, _name: &str, _len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
try!(f(self));
self.handle_struct()
}
fn emit_tuple_struct_arg<F>(&mut self, _f_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
f(self)
}
fn emit_seq<F>(&mut self, _len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
try!(f(self));
self.handle_array()
}
fn emit_seq_elt<F>(&mut self, _idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
f(self)
}
fn emit_map<F>(&mut self, _len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
let map : Dictionary = HashMap::new();
self.val.push(Value::Dictionary(map));
f(self)
}
fn emit_map_elt_key<F>(&mut self, _idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
try!(f(self));
self.key = match self.val.pop().unwrap() {
Value::BasicValue(x) => Some(x),
_ => return Err(EncoderError::BadKeyType)
};
Ok(())
}
fn emit_map_elt_val<F>(&mut self, _idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
let key : BasicValue = self.key.take().unwrap();
try!(f(self));
let val : Value = self.val.pop().unwrap();
let mut map = self.val.pop().unwrap();
match map {
Value::Dictionary(ref mut x) => x.insert(key, val),
_ => panic!("No dictionary on stack")
};
self.val.push(map);
Ok(())
}
fn emit_option<F>(&mut self, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_option_none(&mut self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_option_some<F>(&mut self, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_enum<F>(&mut self, _name: &str, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_enum_variant<F>(&mut self, _v_name: &str, _v_id: usize, _len: usize, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_enum_variant_arg<F>(&mut self, _a_idx: usize, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_enum_struct_variant<F>(&mut self, _v_name: &str, _v_id: usize, _len: usize, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
fn emit_enum_struct_variant_field<F>(&mut self, _f_name: &str, _f_idx: usize, _f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(EncoderError::Unsupported)
}
}
#[test]
fn test_array() {
let array : Vec<u32> = vec![1,2,3];
let v = DBusEncoder::encode(&array).ok().unwrap();
let a2 = vec![
Value::BasicValue(BasicValue::Uint32(1)),
Value::BasicValue(BasicValue::Uint32(2)),
Value::BasicValue(BasicValue::Uint32(3)),
];
assert_eq!(v, Value::Array(a2));
}
|
// const _GRAMMAR: &'static str = include_str!("grammar.pest");
#[derive(Parser)]
#[grammar = "grammar.pest"]
pub struct HandlebarsParser;
#[cfg(test)]
use pest::Parser;
#[cfg(test)]
macro_rules! assert_rule {
($rule:expr, $in:expr) => {
assert_eq!(
HandlebarsParser::parse($rule, $in)
.unwrap()
.last()
.unwrap()
.as_span()
.end(),
$in.len()
);
};
}
#[cfg(test)]
macro_rules! assert_not_rule {
($rule:expr, $in:expr) => {
assert!(
HandlebarsParser::parse($rule, $in).is_err()
|| HandlebarsParser::parse($rule, $in)
.unwrap()
.last()
.unwrap()
.as_span()
.end()
!= $in.len()
);
};
}
#[cfg(test)]
macro_rules! assert_rule_match {
($rule:expr, $in:expr) => {
assert!(HandlebarsParser::parse($rule, $in).is_ok());
};
}
#[test]
fn test_raw_text() {
let s = vec![
"<h1> helloworld </h1> ",
r"hello\{{world}}",
r"hello\{{#if world}}nice\{{/if}}",
r"hello \{{{{raw}}}}hello\{{{{/raw}}}}",
];
for i in s.iter() {
assert_rule!(Rule::raw_text, i);
}
let s_not_escape = vec![r"\\{{hello}}"];
for i in s_not_escape.iter() {
assert_not_rule!(Rule::raw_text, i);
}
}
#[test]
fn test_raw_block_text() {
let s = "<h1> {{hello}} </h1>";
assert_rule!(Rule::raw_block_text, s);
}
#[test]
fn test_reference() {
let s = vec![
"a",
"abc",
"../a",
"a.b",
"@abc",
"a.[abc]",
"aBc.[abc]",
"abc.[0].[nice]",
"some-name",
"this.[0].ok",
];
for i in s.iter() {
assert_rule!(Rule::reference, i);
}
}
#[test]
fn test_name() {
let s = vec!["if", "(abc)"];
for i in s.iter() {
assert_rule!(Rule::name, i);
}
}
#[test]
fn test_param() {
let s = vec!["hello", "\"json literal\""];
for i in s.iter() {
assert_rule!(Rule::param, i);
}
}
#[test]
fn test_hash() {
let s = vec![
"hello=world",
"hello=\"world\"",
"hello=(world)",
"hello=(world 0)",
];
for i in s.iter() {
assert_rule!(Rule::hash, i);
}
}
#[test]
fn test_json_literal() {
let s = vec![
"\"json string\"",
"\"quot: \\\"\"",
"'json string'",
"'quot: \\''",
"[]",
"[\"hello\"]",
"[1,2,3,4,true]",
"{\"hello\": \"world\"}",
"{}",
"{\"a\":1, \"b\":2 }",
];
for i in s.iter() {
assert_rule!(Rule::literal, i);
}
}
#[test]
fn test_comment() {
let s = vec!["{{!-- <hello {{ a-b c-d}} {{d-c}} ok --}}",
"{{!--
<li><a href=\"{{up-dir nest-count}}{{base-url}}index.html\">{{this.title}}</a></li>
--}}"];
for i in s.iter() {
assert_rule!(Rule::hbs_comment, i);
}
let s2 = vec!["{{! hello }}", "{{! test me }}"];
for i in s2.iter() {
assert_rule!(Rule::hbs_comment_compact, i);
}
}
#[test]
fn test_subexpression() {
let s = vec!["(sub)", "(sub 0)", "(sub a=1)"];
for i in s.iter() {
assert_rule!(Rule::subexpression, i);
}
}
#[test]
fn test_expression() {
let s = vec![
"{{exp}}",
"{{(exp)}}",
"{{../exp}}",
"{{exp 1}}",
"{{exp \"literal\"}}",
"{{exp ref}}",
"{{exp (sub)}}",
"{{exp (sub 123)}}",
"{{exp []}}",
"{{exp {}}}",
"{{exp key=1}}",
"{{exp key=ref}}",
"{{exp key=(sub)}}",
"{{exp key=(sub 0)}}",
"{{exp key=(sub 0 key=1)}}",
];
for i in s.iter() {
assert_rule!(Rule::expression, i);
}
}
#[test]
fn test_identifier_with_dash() {
let s = vec!["{{exp-foo}}"];
for i in s.iter() {
assert_rule!(Rule::expression, i);
}
}
#[test]
fn test_html_expression() {
let s = vec!["{{{html}}}", "{{{(html)}}}", "{{{(html)}}}"];
for i in s.iter() {
assert_rule!(Rule::html_expression, i);
}
}
#[test]
fn test_helper_start() {
let s = vec![
"{{#if hello}}",
"{{#if (hello)}}",
"{{#if hello=world}}",
"{{#if hello hello=world}}",
"{{#if []}}",
"{{#if {}}}",
"{{#if}}",
"{{~#if hello~}}",
"{{#each people as |person|}}",
"{{#each-obj obj as |val key|}}",
"{{#each assets}}",
];
for i in s.iter() {
assert_rule!(Rule::helper_block_start, i);
}
}
#[test]
fn test_helper_end() {
let s = vec!["{{/if}}", "{{~/if}}", "{{~/if ~}}", "{{/if ~}}"];
for i in s.iter() {
assert_rule!(Rule::helper_block_end, i);
}
}
#[test]
fn test_helper_block() {
let s = vec![
"{{#if hello}}hello{{/if}}",
"{{#if true}}hello{{/if}}",
"{{#if nice ok=1}}hello{{/if}}",
"{{#if}}hello{{else}}world{{/if}}",
"{{#if}}hello{{^}}world{{/if}}",
"{{#if}}{{#if}}hello{{/if}}{{/if}}",
"{{#if}}hello{{~else}}world{{/if}}",
"{{#if}}hello{{else~}}world{{/if}}",
"{{#if}}hello{{~^~}}world{{/if}}",
"{{#if}}{{/if}}",
];
for i in s.iter() {
assert_rule!(Rule::helper_block, i);
}
}
#[test]
fn test_raw_block() {
let s = vec![
"{{{{if hello}}}}good {{hello}}{{{{/if}}}}",
"{{{{if hello}}}}{{#if nice}}{{/if}}{{{{/if}}}}",
];
for i in s.iter() {
assert_rule!(Rule::raw_block, i);
}
}
#[test]
fn test_block_param() {
let s = vec!["as |person|", "as |val key|"];
for i in s.iter() {
assert_rule!(Rule::block_param, i);
}
}
#[test]
fn test_path() {
let s = vec![
"a",
"a.b.c.d",
"a.[0].[1].[2]",
"a.[abc]",
"a/v/c.d.s",
"a.[0]/b/c/../d",
"a.[bb c]/b/c/../d",
"a.[0].[#hello]",
"../a/b.[0].[1]",
"./this.[0]/[1]/this/../a",
"./this_name",
"./goo/[/bar]",
"a.[你好]",
"a.[10].[#comment]",
"a.[]", // empty key
"././[/foo]",
"[foo]",
"@root/a/b",
];
for i in s.iter() {
assert_rule_match!(Rule::path, i);
}
}
#[test]
fn test_directive_expression() {
let s = vec!["{{* ssh}}", "{{~* ssh}}"];
for i in s.iter() {
assert_rule!(Rule::directive_expression, i);
}
}
#[test]
fn test_directive_block() {
let s = vec![
"{{#* inline}}something{{/inline}}",
"{{~#* inline}}hello{{/inline}}",
"{{#* inline \"partialname\"}}something{{/inline}}",
];
for i in s.iter() {
assert_rule!(Rule::directive_block, i);
}
}
#[test]
fn test_partial_expression() {
let s = vec![
"{{> hello}}",
"{{> (hello)}}",
"{{~> hello a}}",
"{{> hello a=1}}",
];
for i in s.iter() {
assert_rule!(Rule::partial_expression, i);
}
}
#[test]
fn test_partial_block() {
let s = vec!["{{#> hello}}nice{{/hello}}"];
for i in s.iter() {
assert_rule!(Rule::partial_block, i);
}
}
(test) fix test for path test
// const _GRAMMAR: &'static str = include_str!("grammar.pest");
#[derive(Parser)]
#[grammar = "grammar.pest"]
pub struct HandlebarsParser;
#[cfg(test)]
use pest::Parser;
#[cfg(test)]
macro_rules! assert_rule {
($rule:expr, $in:expr) => {
assert_eq!(
HandlebarsParser::parse($rule, $in)
.unwrap()
.last()
.unwrap()
.as_span()
.end(),
$in.len()
);
};
}
#[cfg(test)]
macro_rules! assert_not_rule {
($rule:expr, $in:expr) => {
assert!(
HandlebarsParser::parse($rule, $in).is_err()
|| HandlebarsParser::parse($rule, $in)
.unwrap()
.last()
.unwrap()
.as_span()
.end()
!= $in.len()
);
};
}
#[cfg(test)]
macro_rules! assert_rule_match {
($rule:expr, $in:expr) => {
assert!(HandlebarsParser::parse($rule, $in).is_ok());
};
}
#[test]
fn test_raw_text() {
let s = vec![
"<h1> helloworld </h1> ",
r"hello\{{world}}",
r"hello\{{#if world}}nice\{{/if}}",
r"hello \{{{{raw}}}}hello\{{{{/raw}}}}",
];
for i in s.iter() {
assert_rule!(Rule::raw_text, i);
}
let s_not_escape = vec![r"\\{{hello}}"];
for i in s_not_escape.iter() {
assert_not_rule!(Rule::raw_text, i);
}
}
#[test]
fn test_raw_block_text() {
let s = "<h1> {{hello}} </h1>";
assert_rule!(Rule::raw_block_text, s);
}
#[test]
fn test_reference() {
let s = vec![
"a",
"abc",
"../a",
"a.b",
"@abc",
"a.[abc]",
"aBc.[abc]",
"abc.[0].[nice]",
"some-name",
"this.[0].ok",
];
for i in s.iter() {
assert_rule!(Rule::reference, i);
}
}
#[test]
fn test_name() {
let s = vec!["if", "(abc)"];
for i in s.iter() {
assert_rule!(Rule::name, i);
}
}
#[test]
fn test_param() {
let s = vec!["hello", "\"json literal\""];
for i in s.iter() {
assert_rule!(Rule::param, i);
}
}
#[test]
fn test_hash() {
let s = vec![
"hello=world",
"hello=\"world\"",
"hello=(world)",
"hello=(world 0)",
];
for i in s.iter() {
assert_rule!(Rule::hash, i);
}
}
#[test]
fn test_json_literal() {
let s = vec![
"\"json string\"",
"\"quot: \\\"\"",
"'json string'",
"'quot: \\''",
"[]",
"[\"hello\"]",
"[1,2,3,4,true]",
"{\"hello\": \"world\"}",
"{}",
"{\"a\":1, \"b\":2 }",
];
for i in s.iter() {
assert_rule!(Rule::literal, i);
}
}
#[test]
fn test_comment() {
let s = vec!["{{!-- <hello {{ a-b c-d}} {{d-c}} ok --}}",
"{{!--
<li><a href=\"{{up-dir nest-count}}{{base-url}}index.html\">{{this.title}}</a></li>
--}}"];
for i in s.iter() {
assert_rule!(Rule::hbs_comment, i);
}
let s2 = vec!["{{! hello }}", "{{! test me }}"];
for i in s2.iter() {
assert_rule!(Rule::hbs_comment_compact, i);
}
}
#[test]
fn test_subexpression() {
let s = vec!["(sub)", "(sub 0)", "(sub a=1)"];
for i in s.iter() {
assert_rule!(Rule::subexpression, i);
}
}
#[test]
fn test_expression() {
let s = vec![
"{{exp}}",
"{{(exp)}}",
"{{../exp}}",
"{{exp 1}}",
"{{exp \"literal\"}}",
"{{exp ref}}",
"{{exp (sub)}}",
"{{exp (sub 123)}}",
"{{exp []}}",
"{{exp {}}}",
"{{exp key=1}}",
"{{exp key=ref}}",
"{{exp key=(sub)}}",
"{{exp key=(sub 0)}}",
"{{exp key=(sub 0 key=1)}}",
];
for i in s.iter() {
assert_rule!(Rule::expression, i);
}
}
#[test]
fn test_identifier_with_dash() {
let s = vec!["{{exp-foo}}"];
for i in s.iter() {
assert_rule!(Rule::expression, i);
}
}
#[test]
fn test_html_expression() {
let s = vec!["{{{html}}}", "{{{(html)}}}", "{{{(html)}}}"];
for i in s.iter() {
assert_rule!(Rule::html_expression, i);
}
}
#[test]
fn test_helper_start() {
let s = vec![
"{{#if hello}}",
"{{#if (hello)}}",
"{{#if hello=world}}",
"{{#if hello hello=world}}",
"{{#if []}}",
"{{#if {}}}",
"{{#if}}",
"{{~#if hello~}}",
"{{#each people as |person|}}",
"{{#each-obj obj as |val key|}}",
"{{#each assets}}",
];
for i in s.iter() {
assert_rule!(Rule::helper_block_start, i);
}
}
#[test]
fn test_helper_end() {
let s = vec!["{{/if}}", "{{~/if}}", "{{~/if ~}}", "{{/if ~}}"];
for i in s.iter() {
assert_rule!(Rule::helper_block_end, i);
}
}
#[test]
fn test_helper_block() {
let s = vec![
"{{#if hello}}hello{{/if}}",
"{{#if true}}hello{{/if}}",
"{{#if nice ok=1}}hello{{/if}}",
"{{#if}}hello{{else}}world{{/if}}",
"{{#if}}hello{{^}}world{{/if}}",
"{{#if}}{{#if}}hello{{/if}}{{/if}}",
"{{#if}}hello{{~else}}world{{/if}}",
"{{#if}}hello{{else~}}world{{/if}}",
"{{#if}}hello{{~^~}}world{{/if}}",
"{{#if}}{{/if}}",
];
for i in s.iter() {
assert_rule!(Rule::helper_block, i);
}
}
#[test]
fn test_raw_block() {
let s = vec![
"{{{{if hello}}}}good {{hello}}{{{{/if}}}}",
"{{{{if hello}}}}{{#if nice}}{{/if}}{{{{/if}}}}",
];
for i in s.iter() {
assert_rule!(Rule::raw_block, i);
}
}
#[test]
fn test_block_param() {
let s = vec!["as |person|", "as |val key|"];
for i in s.iter() {
assert_rule!(Rule::block_param, i);
}
}
#[test]
fn test_path() {
let s = vec![
"a",
"a.b.c.d",
"a.[0].[1].[2]",
"a.[abc]",
"a/v/c.d.s",
"a.[0]/b/c/d",
"a.[bb c]/b/c/d",
"a.[0].[#hello]",
"../a/b.[0].[1]",
"./this.[0]/[1]/this/a",
"./this_name",
"./goo/[/bar]",
"a.[你好]",
"a.[10].[#comment]",
"a.[]", // empty key
"././[/foo]",
"[foo]",
"@root/a/b",
];
for i in s.iter() {
assert_rule_match!(Rule::path, i);
}
}
#[test]
fn test_directive_expression() {
let s = vec!["{{* ssh}}", "{{~* ssh}}"];
for i in s.iter() {
assert_rule!(Rule::directive_expression, i);
}
}
#[test]
fn test_directive_block() {
let s = vec![
"{{#* inline}}something{{/inline}}",
"{{~#* inline}}hello{{/inline}}",
"{{#* inline \"partialname\"}}something{{/inline}}",
];
for i in s.iter() {
assert_rule!(Rule::directive_block, i);
}
}
#[test]
fn test_partial_expression() {
let s = vec![
"{{> hello}}",
"{{> (hello)}}",
"{{~> hello a}}",
"{{> hello a=1}}",
];
for i in s.iter() {
assert_rule!(Rule::partial_expression, i);
}
}
#[test]
fn test_partial_block() {
let s = vec!["{{#> hello}}nice{{/hello}}"];
for i in s.iter() {
assert_rule!(Rule::partial_block, i);
}
}
|
use std::cell::RefCell;
use std::collections::HashMap;
use std::rc::Rc;
use bootstrap::{Action, EventHandler, EventSig, MouseButton};
use color::ColorAlpha;
use overlay::{Disc, Quad, Renderer, RenderInput, Text, Texture2D, Triangle, Vert};
use scene::Scene;
type Time = f32;
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Viewport {
pub x: f32,
pub y: f32,
pub w: f32,
pub h: f32
}
pub struct GUI<'a> {
// common
renderer: Renderer,
widgets: HashMap<String, Box<Widget<'a> + 'a>>,
// event stuff
last_cursor: Option<[f64; 2]>,
last_mouse_left_down: Option<[f64; 2]>,
last_mouse_left_up: Option<[f64; 2]>,
focused_widgets: HashMap<Focus, String>
}
impl<'a> GUI<'a> {
pub fn new(viewport: Viewport, scene: &mut Scene) -> Self {
GUI {
renderer: Renderer::new(viewport.w.ceil() as u32, viewport.h.ceil() as u32, 1024, 1024, 1024, scene),
widgets: HashMap::new(),
last_cursor: None,
last_mouse_left_down: None,
last_mouse_left_up: None,
focused_widgets: HashMap::new(),
}
}
pub fn add_widget<W>(&mut self, id: &str, widget: &W) where W: 'a + Clone + Widget<'a> {
self.widgets.insert(id.to_owned(), Box::new(widget.clone()));
}
pub fn remove_widget(&mut self, id: &str) {
self.widgets.remove(id);
}
pub fn render(&self) -> &Texture2D {
let mut tris = Vec::new();
let mut quads = Vec::new();
let mut discs = Vec::new();
let mut texts = Vec::new();
for widget in self.widgets.values() {
for prim in widget.unwidget() {
match prim {
WidgetPrim::Triangle(ref tri) => tris.push(*tri),
WidgetPrim::Quad(ref quad) => quads.push(*quad),
WidgetPrim::Disc(ref disc) => discs.push(*disc),
WidgetPrim::Text(ref text) => texts.push(*text)
}
}
}
let render_input = RenderInput::new()
.triangles(&tris)
.quads(&quads)
.discs(&discs)
.texts(&texts, 1.);
self.renderer.render(render_input)
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
enum Focus {
MouseButton(MouseButton, Action),
Drag
}
impl<'a> EventHandler for GUI<'a> {
fn on_mouse_button(&mut self, button: MouseButton, action: Action) -> EventSig {
if let MouseButton::Button1 = button {
let last_cursor = self.last_cursor.unwrap();
match action {
Action::Press => {
self.last_mouse_left_down = Some(last_cursor);
self.last_mouse_left_up = None;
for (key, widget) in &mut self.widgets {
if widget.on_mouse_button(button, action) == EventSig::Focused {
self.focused_widgets.insert(Focus::MouseButton(button, action), key.clone());
break;
}
}
},
Action::Release => {
// check whether it’s a click
if px_dist(self.last_mouse_left_down.unwrap(), last_cursor) <= 5. {
// it’s a click
for widget in self.widgets.values() {
widget.on_click(last_cursor);
}
}
self.last_mouse_left_down = None;
self.last_mouse_left_up = Some(last_cursor);
self.focused_widgets.remove(&Focus::MouseButton(button, Action::Press));
self.focused_widgets.remove(&Focus::Drag);
},
_ => ()
}
}
EventSig::Handled
}
// TODO: change the implementation to take into account widget focus
fn on_cursor_move(&mut self, cursor: [f64; 2]) -> EventSig {
self.last_cursor = Some(cursor);
if let Some(key) = self.focused_widgets.get(&Focus::Drag).cloned() {
let focused = self.widgets.get(&key).unwrap();
let down_cursor = self.last_mouse_left_down.unwrap();
focused.on_drag(cursor, down_cursor);
} else if let Some(key) = self.focused_widgets.get(&Focus::MouseButton(MouseButton::Button1, Action::Press)).cloned() {
let focused = self.widgets.get(&key).unwrap();
let down_cursor = self.last_mouse_left_down.unwrap();
if px_dist(down_cursor, cursor) > 5. {
self.focused_widgets.insert(Focus::Drag, key);
focused.on_drag(cursor, down_cursor);
}
}
EventSig::Handled
}
}
/// Widget primitives.
///
/// A widget primitive is used as primary a tool to build up widgets. A widget is basically just a
/// sum of widget primitives used to represent it.
pub enum WidgetPrim<'a> {
Triangle(Triangle),
Quad(Quad),
Disc(Disc),
Text(Text<'a>)
}
pub trait Widget<'a>: EventHandler {
fn unwidget(&self) -> Vec<WidgetPrim<'a>>;
fn on_click(&self, _: [f64; 2]) {}
fn on_drag(&self, _: [f64; 2], _: [f64; 2]) {}
}
pub struct ProgressBar {
w: f32,
progress_quad: Quad,
inactive_quad: Quad,
recip_dur_sec: f32,
listeners: HashMap<String, Rc<RefCell<ProgressBarListener>>>
}
pub enum ProgressBarEvent {
Set(Time),
Toggle
}
pub trait ProgressBarListener {
fn on(&mut self, e: ProgressBarEvent) -> bool;
}
impl ProgressBar {
pub fn new(w: f32, h: f32, progress_color: ColorAlpha, inactive_color: ColorAlpha, dur_sec: f32) -> Rc<RefCell<Self>> {
let pcol = *progress_color.as_ref();
let icol = *inactive_color.as_ref();
let progress_quad = Quad(
Vert::new([0., 0., 0.], pcol),
Vert::new([0., h, 0.], pcol),
Vert::new([0., 0., 0.], pcol),
Vert::new([0., h, 0.], pcol)
);
let inactive_quad = Quad(
Vert::new([w, 0., 0.], icol),
Vert::new([w, h, 0.], icol),
Vert::new([0., 0., 0.], icol),
Vert::new([0., h, 0.], icol)
);
Rc::new(RefCell::new(ProgressBar {
w: w,
progress_quad: progress_quad,
inactive_quad: inactive_quad,
recip_dur_sec: 1. / dur_sec,
listeners: HashMap::new()
}))
}
/// Add a listener.
pub fn add_listener(&mut self, key: &str, listener: &Rc<RefCell<ProgressBarListener>>) {
self.listeners.insert(key.to_owned(), listener.clone());
}
/// Remove a listener.
pub fn remove_listener(&mut self, key: &str) {
self.listeners.remove(key);
}
/// Set the cursor (seconds).
pub fn set(&mut self, cursor: f32) {
let c = cursor * self.recip_dur_sec * self.w;
// update the quads
self.progress_quad.0.pos[0] = c;
self.progress_quad.1.pos[0] = c;
self.inactive_quad.2.pos[0] = c;
self.inactive_quad.3.pos[0] = c;
for l in self.listeners.values() {
l.borrow_mut().on(ProgressBarEvent::Set(cursor));
}
}
pub fn toggle(&mut self) {
for l in self.listeners.values() {
l.borrow_mut().on(ProgressBarEvent::Toggle);
}
}
pub fn on_cursor_change(&mut self, cursor: [f64; 2]) {
let c = cursor[0] as f32;
// update the quads
self.progress_quad.0.pos[0] = c;
self.progress_quad.1.pos[0] = c;
self.inactive_quad.2.pos[0] = c;
self.inactive_quad.3.pos[0] = c;
for l in self.listeners.values() {
l.borrow_mut().on(ProgressBarEvent::Set(c / (self.recip_dur_sec * self.w)));
}
}
}
impl EventHandler for Rc<RefCell<ProgressBar>> {
fn on_mouse_button(&mut self, _: MouseButton, action: Action) -> EventSig {
if action == Action::Press {
EventSig::Focused
} else {
EventSig::Ignored
}
}
}
impl<'a> Widget<'a> for Rc<RefCell<ProgressBar>> {
fn unwidget(&self) -> Vec<WidgetPrim<'a>> {
let bar = &self.borrow();
vec![WidgetPrim::Quad(bar.progress_quad), WidgetPrim::Quad(bar.inactive_quad)]
}
fn on_click(&self, cursor: [f64; 2]) {
self.borrow_mut().on_cursor_change(cursor);
}
fn on_drag(&self, cursor: [f64; 2], down_cursor: [f64; 2]) {
self.borrow_mut().on_cursor_change([cursor[0], down_cursor[1]]);
}
}
/// Pixel distance between two points.
fn px_dist(a: [f64; 2], b: [f64; 2]) -> f64 {
let x = b[0] - a[0];
let y = b[1] - a[1];
f64::sqrt(x*x + y*y)
}
Removed useless ProgressBarEvent.
use std::cell::RefCell;
use std::collections::HashMap;
use std::rc::Rc;
use bootstrap::{Action, EventHandler, EventSig, MouseButton};
use color::ColorAlpha;
use overlay::{Disc, Quad, Renderer, RenderInput, Text, Texture2D, Triangle, Vert};
use scene::Scene;
type Time = f32;
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Viewport {
pub x: f32,
pub y: f32,
pub w: f32,
pub h: f32
}
pub struct GUI<'a> {
// common
renderer: Renderer,
widgets: HashMap<String, Box<Widget<'a> + 'a>>,
// event stuff
last_cursor: Option<[f64; 2]>,
last_mouse_left_down: Option<[f64; 2]>,
last_mouse_left_up: Option<[f64; 2]>,
focused_widgets: HashMap<Focus, String>
}
impl<'a> GUI<'a> {
pub fn new(viewport: Viewport, scene: &mut Scene) -> Self {
GUI {
renderer: Renderer::new(viewport.w.ceil() as u32, viewport.h.ceil() as u32, 1024, 1024, 1024, scene),
widgets: HashMap::new(),
last_cursor: None,
last_mouse_left_down: None,
last_mouse_left_up: None,
focused_widgets: HashMap::new(),
}
}
pub fn add_widget<W>(&mut self, id: &str, widget: &W) where W: 'a + Clone + Widget<'a> {
self.widgets.insert(id.to_owned(), Box::new(widget.clone()));
}
pub fn remove_widget(&mut self, id: &str) {
self.widgets.remove(id);
}
pub fn render(&self) -> &Texture2D {
let mut tris = Vec::new();
let mut quads = Vec::new();
let mut discs = Vec::new();
let mut texts = Vec::new();
for widget in self.widgets.values() {
for prim in widget.unwidget() {
match prim {
WidgetPrim::Triangle(ref tri) => tris.push(*tri),
WidgetPrim::Quad(ref quad) => quads.push(*quad),
WidgetPrim::Disc(ref disc) => discs.push(*disc),
WidgetPrim::Text(ref text) => texts.push(*text)
}
}
}
let render_input = RenderInput::new()
.triangles(&tris)
.quads(&quads)
.discs(&discs)
.texts(&texts, 1.);
self.renderer.render(render_input)
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
enum Focus {
MouseButton(MouseButton, Action),
Drag
}
impl<'a> EventHandler for GUI<'a> {
fn on_mouse_button(&mut self, button: MouseButton, action: Action) -> EventSig {
if let MouseButton::Button1 = button {
let last_cursor = self.last_cursor.unwrap();
match action {
Action::Press => {
self.last_mouse_left_down = Some(last_cursor);
self.last_mouse_left_up = None;
for (key, widget) in &mut self.widgets {
if widget.on_mouse_button(button, action) == EventSig::Focused {
self.focused_widgets.insert(Focus::MouseButton(button, action), key.clone());
break;
}
}
},
Action::Release => {
// check whether it’s a click
if px_dist(self.last_mouse_left_down.unwrap(), last_cursor) <= 5. {
// it’s a click
for widget in self.widgets.values() {
widget.on_click(last_cursor);
}
}
self.last_mouse_left_down = None;
self.last_mouse_left_up = Some(last_cursor);
self.focused_widgets.remove(&Focus::MouseButton(button, Action::Press));
self.focused_widgets.remove(&Focus::Drag);
},
_ => ()
}
}
EventSig::Handled
}
// TODO: change the implementation to take into account widget focus
fn on_cursor_move(&mut self, cursor: [f64; 2]) -> EventSig {
self.last_cursor = Some(cursor);
if let Some(key) = self.focused_widgets.get(&Focus::Drag).cloned() {
let focused = self.widgets.get(&key).unwrap();
let down_cursor = self.last_mouse_left_down.unwrap();
focused.on_drag(cursor, down_cursor);
} else if let Some(key) = self.focused_widgets.get(&Focus::MouseButton(MouseButton::Button1, Action::Press)).cloned() {
let focused = self.widgets.get(&key).unwrap();
let down_cursor = self.last_mouse_left_down.unwrap();
if px_dist(down_cursor, cursor) > 5. {
self.focused_widgets.insert(Focus::Drag, key);
focused.on_drag(cursor, down_cursor);
}
}
EventSig::Handled
}
}
/// Widget primitives.
///
/// A widget primitive is used as primary a tool to build up widgets. A widget is basically just a
/// sum of widget primitives used to represent it.
pub enum WidgetPrim<'a> {
Triangle(Triangle),
Quad(Quad),
Disc(Disc),
Text(Text<'a>)
}
pub trait Widget<'a>: EventHandler {
fn unwidget(&self) -> Vec<WidgetPrim<'a>>;
fn on_click(&self, _: [f64; 2]) {}
fn on_drag(&self, _: [f64; 2], _: [f64; 2]) {}
}
pub struct ProgressBar {
w: f32,
progress_quad: Quad,
inactive_quad: Quad,
recip_dur_sec: f32,
listeners: HashMap<String, Rc<RefCell<ProgressBarListener>>>
}
pub trait ProgressBarListener {
fn on_set(&mut self, t: Time) -> bool;
}
impl ProgressBar {
pub fn new(w: f32, h: f32, progress_color: ColorAlpha, inactive_color: ColorAlpha, dur_sec: f32) -> Rc<RefCell<Self>> {
let pcol = *progress_color.as_ref();
let icol = *inactive_color.as_ref();
let progress_quad = Quad(
Vert::new([0., 0., 0.], pcol),
Vert::new([0., h, 0.], pcol),
Vert::new([0., 0., 0.], pcol),
Vert::new([0., h, 0.], pcol)
);
let inactive_quad = Quad(
Vert::new([w, 0., 0.], icol),
Vert::new([w, h, 0.], icol),
Vert::new([0., 0., 0.], icol),
Vert::new([0., h, 0.], icol)
);
Rc::new(RefCell::new(ProgressBar {
w: w,
progress_quad: progress_quad,
inactive_quad: inactive_quad,
recip_dur_sec: 1. / dur_sec,
listeners: HashMap::new()
}))
}
/// Add a listener.
pub fn add_listener(&mut self, key: &str, listener: &Rc<RefCell<ProgressBarListener>>) {
self.listeners.insert(key.to_owned(), listener.clone());
}
/// Remove a listener.
pub fn remove_listener(&mut self, key: &str) {
self.listeners.remove(key);
}
/// Set the cursor (seconds).
pub fn set(&mut self, cursor: f32) {
let c = cursor * self.recip_dur_sec * self.w;
// update the quads
self.progress_quad.0.pos[0] = c;
self.progress_quad.1.pos[0] = c;
self.inactive_quad.2.pos[0] = c;
self.inactive_quad.3.pos[0] = c;
for l in self.listeners.values() {
l.borrow_mut().on_set(cursor);
}
}
pub fn on_cursor_change(&mut self, cursor: [f64; 2]) {
let c = cursor[0] as f32;
// update the quads
self.progress_quad.0.pos[0] = c;
self.progress_quad.1.pos[0] = c;
self.inactive_quad.2.pos[0] = c;
self.inactive_quad.3.pos[0] = c;
for l in self.listeners.values() {
l.borrow_mut().on_set(c / (self.recip_dur_sec * self.w));
}
}
}
impl EventHandler for Rc<RefCell<ProgressBar>> {
fn on_mouse_button(&mut self, _: MouseButton, action: Action) -> EventSig {
if action == Action::Press {
EventSig::Focused
} else {
EventSig::Ignored
}
}
}
impl<'a> Widget<'a> for Rc<RefCell<ProgressBar>> {
fn unwidget(&self) -> Vec<WidgetPrim<'a>> {
let bar = &self.borrow();
vec![WidgetPrim::Quad(bar.progress_quad), WidgetPrim::Quad(bar.inactive_quad)]
}
fn on_click(&self, cursor: [f64; 2]) {
self.borrow_mut().on_cursor_change(cursor);
}
fn on_drag(&self, cursor: [f64; 2], down_cursor: [f64; 2]) {
self.borrow_mut().on_cursor_change([cursor[0], down_cursor[1]]);
}
}
/// Pixel distance between two points.
fn px_dist(a: [f64; 2], b: [f64; 2]) -> f64 {
let x = b[0] - a[0];
let y = b[1] - a[1];
f64::sqrt(x*x + y*y)
}
|
#![crate_name = "hiredis"]
#![crate_type = "lib"]
#![feature(globs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(dead_code)]
extern crate libc;
use std::mem::transmute;
pub mod api;
pub struct Reply {
reply: *const api::Reply
}
pub enum ReplyCode {
String = 1,
Array,
Integer,
Nil,
Status,
Error,
Unknown
}
impl Reply {
unsafe fn new(reply: *const api::Reply) -> Reply {
Reply {
reply: reply
}
}
fn typename(&self) -> ReplyCode {
unsafe {
match (*self.reply)._type {
1 => String,
2 => Array,
3 => Integer,
4 => Nil,
5 => Status,
6 => Error,
_ => Unknown
}
}
}
}
impl Drop for Reply {
fn drop(&mut self) {
unsafe {
api::freeReplyObject(transmute(self.reply))
}
}
}
pub struct Redis {
context: *const api::Context
}
impl Redis {
pub fn new(ip: &str, port: i32) -> Redis {
unsafe {
Redis {
context: api::redisConnect(
ip.to_c_str().as_ptr(),
port
)
}
}
}
/* TODO: Move to IoResult using context.err */
pub fn exec(&self, command: &str) -> Option<Reply> {
command.with_c_str(|v| {
unsafe {
let result = api::redisCommand(self.context, v);
/* Fail if the command errored for some reason. */
if result == 0 as *const ::libc::c_void {
return None;
}
/* Otherwise transmute the void pointer memory into a pointer
* to a reply structure and return it. */
Some(Reply::new(transmute(result)))
}
})
}
pub fn receive(&self) -> Option<Reply> {
unsafe {
let reply: Reply = std::mem::uninitialized();
let result = api::redisGetReply(self.context, transmute(&reply));
Some(reply)
}
}
}
impl Drop for Redis {
fn drop(&mut self) {
unsafe {
api::redisFree(self.context)
}
}
}
Fix public function
#![crate_name = "hiredis"]
#![crate_type = "lib"]
#![feature(globs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(dead_code)]
extern crate libc;
use std::mem::transmute;
pub mod api;
pub struct Reply {
reply: *const api::Reply
}
pub enum ReplyCode {
String = 1,
Array,
Integer,
Nil,
Status,
Error,
Unknown
}
impl Reply {
unsafe fn new(reply: *const api::Reply) -> Reply {
Reply {
reply: reply
}
}
pub fn typename(&self) -> ReplyCode {
unsafe {
match (*self.reply)._type {
1 => String,
2 => Array,
3 => Integer,
4 => Nil,
5 => Status,
6 => Error,
_ => Unknown
}
}
}
}
impl Drop for Reply {
fn drop(&mut self) {
unsafe {
api::freeReplyObject(transmute(self.reply))
}
}
}
pub struct Redis {
context: *const api::Context
}
impl Redis {
pub fn new(ip: &str, port: i32) -> Redis {
unsafe {
Redis {
context: api::redisConnect(
ip.to_c_str().as_ptr(),
port
)
}
}
}
/* TODO: Move to IoResult using context.err */
pub fn exec(&self, command: &str) -> Option<Reply> {
command.with_c_str(|v| {
unsafe {
let result = api::redisCommand(self.context, v);
/* Fail if the command errored for some reason. */
if result == 0 as *const ::libc::c_void {
return None;
}
/* Otherwise transmute the void pointer memory into a pointer
* to a reply structure and return it. */
Some(Reply::new(transmute(result)))
}
})
}
pub fn receive(&self) -> Option<Reply> {
unsafe {
let reply: Reply = std::mem::uninitialized();
let result = api::redisGetReply(self.context, transmute(&reply));
Some(reply)
}
}
}
impl Drop for Redis {
fn drop(&mut self) {
unsafe {
api::redisFree(self.context)
}
}
}
|
use std::ffi::CStr;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::os::raw::{c_char, c_int};
use std::slice;
use c_ares_sys;
use c_types;
use itertools::Itertools;
use types::AddressFamily;
use utils::address_family;
fn hostname(hostent: &c_types::hostent) -> &CStr {
unsafe { CStr::from_ptr(hostent.h_name) }
}
fn addresses(hostent: &c_types::hostent) -> HostAddressResultsIter {
// h_addrtype is `c_short` on windows, `c_int` on unix. Tell clippy to
// allow the identity conversion in the latter case.
#[cfg_attr(feature = "cargo-clippy", allow(identity_conversion))]
let addrtype = c_int::from(hostent.h_addrtype);
HostAddressResultsIter {
family: address_family(addrtype),
next: unsafe { &*(hostent.h_addr_list as *const _) },
}
}
fn aliases(hostent: &c_types::hostent) -> HostAliasResultsIter {
HostAliasResultsIter {
next: unsafe { &*(hostent.h_aliases as *const _) },
}
}
fn display(hostent: &c_types::hostent, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(
fmt,
"Hostname: {}, ",
hostname(hostent).to_str().unwrap_or("<not utf8>")
)?;
let addresses = addresses(hostent).format(", ");
write!(fmt, "Addresses: [{}]", addresses)?;
let aliases = aliases(hostent)
.map(|cstr| cstr.to_str().unwrap_or("<not utf8>"))
.format(", ");
write!(fmt, "Aliases: [{}]", aliases)
}
pub trait HasHostent<'a>: Sized {
fn hostent(self) -> &'a c_types::hostent;
fn hostname(self) -> &'a CStr {
let hostent = self.hostent();
hostname(hostent)
}
fn addresses(self) -> HostAddressResultsIter<'a> {
let hostent = self.hostent();
addresses(hostent)
}
fn aliases(self) -> HostAliasResultsIter<'a> {
let hostent = self.hostent();
HostAliasResultsIter {
next: unsafe { &*(hostent.h_aliases as *const _) },
}
}
}
#[derive(Debug)]
pub struct HostentOwned {
inner: *mut c_types::hostent,
phantom: PhantomData<c_types::hostent>,
}
impl HostentOwned {
pub fn new(hostent: *mut c_types::hostent) -> HostentOwned {
HostentOwned {
inner: hostent,
phantom: PhantomData,
}
}
}
impl<'a> HasHostent<'a> for &'a HostentOwned {
fn hostent(self) -> &'a c_types::hostent {
unsafe { &*self.inner }
}
}
impl fmt::Display for HostentOwned {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let hostent = self.hostent();
display(hostent, fmt)
}
}
impl Drop for HostentOwned {
fn drop(&mut self) {
unsafe {
c_ares_sys::ares_free_hostent(self.inner);
}
}
}
unsafe impl Send for HostentOwned {}
unsafe impl Sync for HostentOwned {}
#[derive(Clone, Copy)]
pub struct HostentBorrowed<'a> {
inner: &'a c_types::hostent,
}
impl<'a> HostentBorrowed<'a> {
pub fn new(hostent: &'a c_types::hostent) -> HostentBorrowed<'a> {
HostentBorrowed { inner: hostent }
}
}
impl<'a> HasHostent<'a> for HostentBorrowed<'a> {
fn hostent(self) -> &'a c_types::hostent {
self.inner
}
}
impl<'a> fmt::Display for HostentBorrowed<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let hostent = self.hostent();
display(hostent, fmt)
}
}
unsafe impl<'a> Send for HostentBorrowed<'a> {}
unsafe impl<'a> Sync for HostentBorrowed<'a> {}
// Get an IpAddr from a family and an array of bytes, as found in a `hostent`.
unsafe fn ip_address_from_bytes(family: AddressFamily, h_addr: *const u8) -> Option<IpAddr> {
match family {
AddressFamily::INET => {
let source = slice::from_raw_parts(h_addr, 4);
let mut bytes: [u8; 4] = mem::uninitialized();
bytes.copy_from_slice(source);
let ipv4 = Ipv4Addr::from(bytes);
Some(IpAddr::V4(ipv4))
}
AddressFamily::INET6 => {
let source = slice::from_raw_parts(h_addr, 16);
let mut bytes: [u8; 16] = mem::uninitialized();
bytes.copy_from_slice(source);
let ipv6 = Ipv6Addr::from(bytes);
Some(IpAddr::V6(ipv6))
}
_ => None,
}
}
/// Iterator of `IpAddr`s.
#[derive(Clone, Copy, Debug)]
pub struct HostAddressResultsIter<'a> {
family: Option<AddressFamily>,
next: &'a *const c_char,
}
impl<'a> Iterator for HostAddressResultsIter<'a> {
type Item = IpAddr;
fn next(&mut self) -> Option<Self::Item> {
let h_addr = *self.next;
if h_addr.is_null() {
None
} else {
unsafe {
self.next = &*(self.next as *const *const c_char).offset(1);
self.family
.and_then(|family| ip_address_from_bytes(family, h_addr as *const u8))
}
}
}
}
unsafe impl<'a> Send for HostAddressResultsIter<'a> {}
unsafe impl<'a> Sync for HostAddressResultsIter<'a> {}
/// Iterator of `&'a CStr`s.
///
/// Each item is very likely to be a valid UTF-8 string, but the underlying `c-ares` library does
/// not guarantee this - so we leave it to users to decide whether they prefer a fallible
/// conversion, a lossy conversion, or something else altogether.
#[derive(Clone, Copy, Debug)]
pub struct HostAliasResultsIter<'a> {
next: &'a *const c_char,
}
impl<'a> Iterator for HostAliasResultsIter<'a> {
type Item = &'a CStr;
fn next(&mut self) -> Option<Self::Item> {
let h_alias = *self.next;
if h_alias.is_null() {
None
} else {
unsafe {
self.next = &*(self.next as *const *const c_char).offset(1);
let c_str = CStr::from_ptr(h_alias);
Some(c_str)
}
}
}
}
unsafe impl<'a> Send for HostAliasResultsIter<'a> {}
unsafe impl<'a> Sync for HostAliasResultsIter<'a> {}
missed one
use std::ffi::CStr;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::os::raw::{c_char, c_int};
use std::slice;
use c_ares_sys;
use c_types;
use itertools::Itertools;
use types::AddressFamily;
use utils::address_family;
fn hostname(hostent: &c_types::hostent) -> &CStr {
unsafe { CStr::from_ptr(hostent.h_name) }
}
fn addresses(hostent: &c_types::hostent) -> HostAddressResultsIter {
// h_addrtype is `c_short` on windows, `c_int` on unix. Tell clippy to
// allow the identity conversion in the latter case.
#[cfg_attr(feature = "cargo-clippy", allow(identity_conversion))]
let addrtype = c_int::from(hostent.h_addrtype);
HostAddressResultsIter {
family: address_family(addrtype),
next: unsafe { &*(hostent.h_addr_list as *const _) },
}
}
fn aliases(hostent: &c_types::hostent) -> HostAliasResultsIter {
HostAliasResultsIter {
next: unsafe { &*(hostent.h_aliases as *const _) },
}
}
fn display(hostent: &c_types::hostent, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(
fmt,
"Hostname: {}, ",
hostname(hostent).to_str().unwrap_or("<not utf8>")
)?;
let addresses = addresses(hostent).format(", ");
write!(fmt, "Addresses: [{}]", addresses)?;
let aliases = aliases(hostent)
.map(|cstr| cstr.to_str().unwrap_or("<not utf8>"))
.format(", ");
write!(fmt, "Aliases: [{}]", aliases)
}
pub trait HasHostent<'a>: Sized {
fn hostent(self) -> &'a c_types::hostent;
fn hostname(self) -> &'a CStr {
let hostent = self.hostent();
hostname(hostent)
}
fn addresses(self) -> HostAddressResultsIter<'a> {
let hostent = self.hostent();
addresses(hostent)
}
fn aliases(self) -> HostAliasResultsIter<'a> {
let hostent = self.hostent();
aliases(hostent)
}
}
#[derive(Debug)]
pub struct HostentOwned {
inner: *mut c_types::hostent,
phantom: PhantomData<c_types::hostent>,
}
impl HostentOwned {
pub fn new(hostent: *mut c_types::hostent) -> HostentOwned {
HostentOwned {
inner: hostent,
phantom: PhantomData,
}
}
}
impl<'a> HasHostent<'a> for &'a HostentOwned {
fn hostent(self) -> &'a c_types::hostent {
unsafe { &*self.inner }
}
}
impl fmt::Display for HostentOwned {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let hostent = self.hostent();
display(hostent, fmt)
}
}
impl Drop for HostentOwned {
fn drop(&mut self) {
unsafe {
c_ares_sys::ares_free_hostent(self.inner);
}
}
}
unsafe impl Send for HostentOwned {}
unsafe impl Sync for HostentOwned {}
#[derive(Clone, Copy)]
pub struct HostentBorrowed<'a> {
inner: &'a c_types::hostent,
}
impl<'a> HostentBorrowed<'a> {
pub fn new(hostent: &'a c_types::hostent) -> HostentBorrowed<'a> {
HostentBorrowed { inner: hostent }
}
}
impl<'a> HasHostent<'a> for HostentBorrowed<'a> {
fn hostent(self) -> &'a c_types::hostent {
self.inner
}
}
impl<'a> fmt::Display for HostentBorrowed<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let hostent = self.hostent();
display(hostent, fmt)
}
}
unsafe impl<'a> Send for HostentBorrowed<'a> {}
unsafe impl<'a> Sync for HostentBorrowed<'a> {}
// Get an IpAddr from a family and an array of bytes, as found in a `hostent`.
unsafe fn ip_address_from_bytes(family: AddressFamily, h_addr: *const u8) -> Option<IpAddr> {
match family {
AddressFamily::INET => {
let source = slice::from_raw_parts(h_addr, 4);
let mut bytes: [u8; 4] = mem::uninitialized();
bytes.copy_from_slice(source);
let ipv4 = Ipv4Addr::from(bytes);
Some(IpAddr::V4(ipv4))
}
AddressFamily::INET6 => {
let source = slice::from_raw_parts(h_addr, 16);
let mut bytes: [u8; 16] = mem::uninitialized();
bytes.copy_from_slice(source);
let ipv6 = Ipv6Addr::from(bytes);
Some(IpAddr::V6(ipv6))
}
_ => None,
}
}
/// Iterator of `IpAddr`s.
#[derive(Clone, Copy, Debug)]
pub struct HostAddressResultsIter<'a> {
family: Option<AddressFamily>,
next: &'a *const c_char,
}
impl<'a> Iterator for HostAddressResultsIter<'a> {
type Item = IpAddr;
fn next(&mut self) -> Option<Self::Item> {
let h_addr = *self.next;
if h_addr.is_null() {
None
} else {
unsafe {
self.next = &*(self.next as *const *const c_char).offset(1);
self.family
.and_then(|family| ip_address_from_bytes(family, h_addr as *const u8))
}
}
}
}
unsafe impl<'a> Send for HostAddressResultsIter<'a> {}
unsafe impl<'a> Sync for HostAddressResultsIter<'a> {}
/// Iterator of `&'a CStr`s.
///
/// Each item is very likely to be a valid UTF-8 string, but the underlying `c-ares` library does
/// not guarantee this - so we leave it to users to decide whether they prefer a fallible
/// conversion, a lossy conversion, or something else altogether.
#[derive(Clone, Copy, Debug)]
pub struct HostAliasResultsIter<'a> {
next: &'a *const c_char,
}
impl<'a> Iterator for HostAliasResultsIter<'a> {
type Item = &'a CStr;
fn next(&mut self) -> Option<Self::Item> {
let h_alias = *self.next;
if h_alias.is_null() {
None
} else {
unsafe {
self.next = &*(self.next as *const *const c_char).offset(1);
let c_str = CStr::from_ptr(h_alias);
Some(c_str)
}
}
}
}
unsafe impl<'a> Send for HostAliasResultsIter<'a> {}
unsafe impl<'a> Sync for HostAliasResultsIter<'a> {}
|
use crate::{
buffer::Buffer, connection::Connection, connection_status::ConnectionState, Error,
PromiseResolver, Result,
};
use amq_protocol::frame::{gen_frame, parse_frame, AMQPFrame, GenError};
use log::{error, trace};
use mio::{event::Source, Events, Interest, Poll, Token, Waker};
use std::{
collections::VecDeque,
io::{self, Read, Write},
sync::Arc,
thread::Builder as ThreadBuilder,
time::{Duration, Instant},
};
pub(crate) const SOCKET: Token = Token(1);
const WAKER: Token = Token(2);
const FRAMES_STORAGE: usize = 32;
#[derive(Debug, PartialEq)]
enum Status {
Initial,
Setup,
Stop,
}
pub struct IoLoop<T> {
connection: Connection,
socket: T,
status: Status,
poll: Poll,
waker: Arc<Waker>,
frame_size: usize,
receive_buffer: Buffer,
send_buffer: Buffer,
can_write: bool,
can_read: bool,
poll_timeout: Option<Duration>,
serialized_frames: VecDeque<(u64, Option<PromiseResolver<()>>)>,
last_write: Instant,
}
impl<T: Source + Read + Write + Send + 'static> IoLoop<T> {
pub(crate) fn new(
connection: Connection,
socket: T,
poll: Option<(Poll, Token)>,
) -> Result<Self> {
let (poll, registered) = poll
.map(|t| Ok((t.0, true)))
.unwrap_or_else(|| Poll::new().map(|poll| (poll, false)))?;
let frame_size = std::cmp::max(8192, connection.configuration().frame_max() as usize);
let waker = Arc::new(Waker::new(poll.registry(), WAKER)?);
let mut inner = Self {
connection,
socket,
status: Status::Initial,
poll,
waker,
frame_size,
receive_buffer: Buffer::with_capacity(FRAMES_STORAGE * frame_size),
send_buffer: Buffer::with_capacity(FRAMES_STORAGE * frame_size),
can_write: false,
can_read: false,
poll_timeout: None,
serialized_frames: VecDeque::default(),
last_write: Instant::now(),
};
if registered {
inner.poll.registry().reregister(
&mut inner.socket,
SOCKET,
Interest::READABLE | Interest::WRITABLE,
)?;
} else {
inner.poll.registry().register(
&mut inner.socket,
SOCKET,
Interest::READABLE | Interest::WRITABLE,
)?;
}
Ok(inner)
}
fn ensure_setup(&mut self) -> Result<()> {
if self.status != Status::Setup && self.connection.status().connected() {
let frame_max = self.connection.configuration().frame_max() as usize;
self.frame_size = std::cmp::max(self.frame_size, frame_max);
self.receive_buffer.grow(FRAMES_STORAGE * self.frame_size);
self.send_buffer.grow(FRAMES_STORAGE * self.frame_size);
let heartbeat = self.connection.configuration().heartbeat();
if heartbeat != 0 {
let heartbeat = Duration::from_millis(u64::from(heartbeat) * 500); // * 1000 (ms) / 2 (half the negociated timeout)
self.poll_timeout = Some(heartbeat);
}
self.status = Status::Setup;
}
Ok(())
}
fn has_data(&self) -> bool {
self.connection.has_pending_frames()
|| self.send_buffer.available_data() > 0
|| !self.serialized_frames.is_empty()
}
fn can_write(&self) -> bool {
self.can_write && self.has_data() && !self.connection.status().blocked()
}
fn can_read(&self) -> bool {
self.can_read
}
fn can_parse(&self) -> bool {
self.receive_buffer.available_data() > 0
}
fn should_continue(&self) -> bool {
let connection_status = self.connection.status();
(self.status == Status::Initial
|| connection_status.connected()
|| connection_status.closing())
&& self.status != Status::Stop
&& !connection_status.errored()
}
pub fn start(mut self) -> Result<()> {
let waker = self.waker.clone();
self.connection.clone().set_io_loop(
ThreadBuilder::new()
.name("io_loop".to_owned())
.spawn(move || {
let mut events = Events::with_capacity(1024);
while self.should_continue() {
if let Err(err) = self.run(&mut events) {
self.cancel_serilized_frames(err)?;
}
}
Ok(())
})?,
waker,
)
}
fn cancel_serilized_frames(&mut self, error: Error) -> Result<()> {
for (_, resolver) in std::mem::take(&mut self.serialized_frames) {
if let Some(resolver) = resolver {
resolver.swear(Err(error.clone()));
}
}
Err(error)
}
fn poll_timeout(&self) -> Option<Duration> {
self.poll_timeout.map(|timeout| {
timeout
.checked_sub(self.last_write.elapsed())
.unwrap_or_else(|| Duration::from_secs(0))
})
}
fn poll(&mut self, events: &mut Events) -> Result<()> {
trace!("io_loop poll");
self.poll.poll(events, self.poll_timeout())?;
trace!("io_loop poll done");
for event in events.iter() {
if event.token() == SOCKET {
trace!("Got mio event for socket: {:?}", event);
if event.is_read_closed() || event.is_write_closed() {
self.critical_error(io::Error::from(io::ErrorKind::ConnectionReset).into())?;
}
if event.is_error() {
self.critical_error(io::Error::from(io::ErrorKind::ConnectionAborted).into())?;
}
// Due to a bug in epoll/mio, it doesn't seem like we can trust this, it's sometimes missing when it should be there
/*
if event.is_readable() {
self.can_read = true;
}
*/
self.can_read = true;
if event.is_writable() {
self.can_write = true;
}
}
}
Ok(())
}
fn run(&mut self, events: &mut Events) -> Result<()> {
trace!("io_loop run");
self.ensure_setup()?;
self.poll(events)?;
self.do_run()
}
fn do_run(&mut self) -> Result<()> {
trace!(
"io_loop do_run; can_read={}, can_write={}, has_data={}",
self.can_read,
self.can_write,
self.has_data()
);
loop {
self.write()?;
if self.connection.status().closed() {
self.status = Status::Stop;
}
if self.should_continue() {
self.read()?;
}
self.parse()?;
self.connection.poll_internal_promises()?;
if self.should_heartbeat() {
self.connection.send_heartbeat()?;
// Update last_write so that if we cannot write yet to the socket, we don't enqueue countless heartbeats
self.last_write = Instant::now();
}
if self.stop_looping() {
self.maybe_continue()?;
break;
}
}
trace!(
"io_loop do_run done; can_read={}, can_write={}, has_data={}, status={:?}",
self.can_read,
self.can_write,
self.has_data(),
self.status
);
Ok(())
}
fn should_heartbeat(&self) -> bool {
if let Some(heartbeat_timeout) = self.poll_timeout {
self.last_write.elapsed() > heartbeat_timeout
} else {
false
}
}
fn stop_looping(&self) -> bool {
!self.can_read()
|| !self.can_write()
|| self.status == Status::Stop
|| self.connection.status().errored()
}
fn has_pending_operations(&self) -> bool {
self.status != Status::Stop && (self.can_read() || self.can_parse() || self.can_write())
}
fn maybe_continue(&mut self) -> Result<()> {
if self.has_pending_operations() {
trace!(
"io_loop send continue; can_read={}, can_write={}, has_data={}",
self.can_read,
self.can_write,
self.has_data()
);
self.send_continue()?;
}
Ok(())
}
fn critical_error(&mut self, error: Error) -> Result<()> {
if let ConnectionState::SentProtocolHeader(resolver, ..) = self.connection.status().state()
{
resolver.swear(Err(error.clone()));
}
self.status = Status::Stop;
self.connection.set_error(error.clone())?;
Err(error)
}
fn write(&mut self) -> Result<()> {
if self.can_write() {
if let Err(e) = self.write_to_stream() {
if e.wouldblock() {
self.can_write = false
} else {
error!("error writing: {:?}", e);
self.critical_error(e)?;
}
}
}
Ok(())
}
fn read(&mut self) -> Result<()> {
if self.can_read() {
if let Err(e) = self.read_from_stream() {
if e.wouldblock() {
self.can_read = false
} else {
error!("error reading: {:?}", e);
self.critical_error(e)?;
}
}
self.receive_buffer.shift_unless_available(self.frame_size);
}
Ok(())
}
fn send_continue(&mut self) -> Result<()> {
self.waker.wake()?;
Ok(())
}
fn write_to_stream(&mut self) -> Result<()> {
self.serialize()?;
let sz = self.send_buffer.write_to(&mut self.socket)?;
if sz > 0 {
self.last_write = Instant::now();
trace!("wrote {} bytes", sz);
self.send_buffer.consume(sz, true);
let mut written = sz as u64;
while written > 0 {
if let Some((to_write, resolver)) = self.serialized_frames.pop_front() {
if written < to_write {
self.serialized_frames
.push_front((to_write - written, resolver));
trace!("{} to write to complete this frame", to_write - written);
written = 0;
} else {
if let Some(resolver) = resolver {
resolver.swear(Ok(()));
}
written -= to_write;
}
} else {
error!(
"We've written {} but didn't expect to write anything",
written
);
break;
}
}
if self.send_buffer.available_data() > 0 {
// We didn't write all the data yet
trace!("Still {} to write", self.send_buffer.available_data());
self.send_continue()?;
} else {
self.socket.flush()?;
}
} else {
error!("Socket was writable but we wrote 0, marking as wouldblock");
self.can_write = false;
}
Ok(())
}
fn read_from_stream(&mut self) -> Result<()> {
match self.connection.status().state() {
ConnectionState::Closed => Ok(()),
ConnectionState::Error => Err(Error::InvalidConnectionState(ConnectionState::Error)),
_ => {
self.receive_buffer
.read_from(&mut self.socket, false)
.map(|sz| {
trace!("read {} bytes", sz);
self.receive_buffer.fill(sz, false);
})?;
Ok(())
}
}
}
fn serialize(&mut self) -> Result<()> {
if let Some((next_msg, resolver)) = self.connection.next_frame() {
// FIXME: having the body here is too verbose, impl Display on frame instead: trace!("will write to buffer: {:?}", next_msg);
trace!("Will write to buffer");
let checkpoint = self.send_buffer.checkpoint();
let res = gen_frame(&next_msg)((&mut self.send_buffer).into());
match res.map(|w| w.into_inner().1) {
Ok(sz) => {
self.serialized_frames.push_back((sz, resolver));
Ok(())
}
Err(e) => {
self.send_buffer.rollback(checkpoint);
match e {
GenError::BufferTooSmall(_) => {
// Requeue msg
self.connection.requeue_frame((next_msg, resolver))?;
Ok(())
}
e => {
error!("error generating frame: {:?}", e);
let error = Error::SerialisationError(Arc::new(e));
self.connection.set_error(error.clone())?;
Err(error)
}
}
}
}
} else {
Ok(())
}
}
fn parse(&mut self) -> Result<()> {
if self.can_parse() {
if let Some(frame) = self.do_parse()? {
self.connection.handle_frame(frame)?;
}
}
Ok(())
}
fn do_parse(&mut self) -> Result<Option<AMQPFrame>> {
match parse_frame(self.receive_buffer.data()) {
Ok((i, f)) => {
let consumed = self.receive_buffer.offset(i);
self.receive_buffer.consume(consumed, false);
Ok(Some(f))
}
Err(e) => {
if e.is_incomplete() {
self.receive_buffer.shift();
Ok(None)
} else {
error!("parse error: {:?}", e);
let error = Error::ParsingError(e);
self.connection.set_error(error.clone())?;
Err(error)
}
}
}
}
}
flush more often
Signed-off-by: Marc-Antoine Perennou <07f76cf0511c79b361712839686f3cee8c75791c@Perennou.com>
use crate::{
buffer::Buffer, connection::Connection, connection_status::ConnectionState, Error,
PromiseResolver, Result,
};
use amq_protocol::frame::{gen_frame, parse_frame, AMQPFrame, GenError};
use log::{error, trace};
use mio::{event::Source, Events, Interest, Poll, Token, Waker};
use std::{
collections::VecDeque,
io::{self, Read, Write},
sync::Arc,
thread::Builder as ThreadBuilder,
time::{Duration, Instant},
};
pub(crate) const SOCKET: Token = Token(1);
const WAKER: Token = Token(2);
const FRAMES_STORAGE: usize = 32;
#[derive(Debug, PartialEq)]
enum Status {
Initial,
Setup,
Stop,
}
pub struct IoLoop<T> {
connection: Connection,
socket: T,
status: Status,
poll: Poll,
waker: Arc<Waker>,
frame_size: usize,
receive_buffer: Buffer,
send_buffer: Buffer,
can_write: bool,
can_read: bool,
poll_timeout: Option<Duration>,
serialized_frames: VecDeque<(u64, Option<PromiseResolver<()>>)>,
last_write: Instant,
}
impl<T: Source + Read + Write + Send + 'static> IoLoop<T> {
pub(crate) fn new(
connection: Connection,
socket: T,
poll: Option<(Poll, Token)>,
) -> Result<Self> {
let (poll, registered) = poll
.map(|t| Ok((t.0, true)))
.unwrap_or_else(|| Poll::new().map(|poll| (poll, false)))?;
let frame_size = std::cmp::max(8192, connection.configuration().frame_max() as usize);
let waker = Arc::new(Waker::new(poll.registry(), WAKER)?);
let mut inner = Self {
connection,
socket,
status: Status::Initial,
poll,
waker,
frame_size,
receive_buffer: Buffer::with_capacity(FRAMES_STORAGE * frame_size),
send_buffer: Buffer::with_capacity(FRAMES_STORAGE * frame_size),
can_write: false,
can_read: false,
poll_timeout: None,
serialized_frames: VecDeque::default(),
last_write: Instant::now(),
};
if registered {
inner.poll.registry().reregister(
&mut inner.socket,
SOCKET,
Interest::READABLE | Interest::WRITABLE,
)?;
} else {
inner.poll.registry().register(
&mut inner.socket,
SOCKET,
Interest::READABLE | Interest::WRITABLE,
)?;
}
Ok(inner)
}
fn ensure_setup(&mut self) -> Result<()> {
if self.status != Status::Setup && self.connection.status().connected() {
let frame_max = self.connection.configuration().frame_max() as usize;
self.frame_size = std::cmp::max(self.frame_size, frame_max);
self.receive_buffer.grow(FRAMES_STORAGE * self.frame_size);
self.send_buffer.grow(FRAMES_STORAGE * self.frame_size);
let heartbeat = self.connection.configuration().heartbeat();
if heartbeat != 0 {
let heartbeat = Duration::from_millis(u64::from(heartbeat) * 500); // * 1000 (ms) / 2 (half the negociated timeout)
self.poll_timeout = Some(heartbeat);
}
self.status = Status::Setup;
}
Ok(())
}
fn has_data(&self) -> bool {
self.connection.has_pending_frames()
|| self.send_buffer.available_data() > 0
|| !self.serialized_frames.is_empty()
}
fn can_write(&self) -> bool {
self.can_write && self.has_data() && !self.connection.status().blocked()
}
fn can_read(&self) -> bool {
self.can_read
}
fn can_parse(&self) -> bool {
self.receive_buffer.available_data() > 0
}
fn should_continue(&self) -> bool {
let connection_status = self.connection.status();
(self.status == Status::Initial
|| connection_status.connected()
|| connection_status.closing())
&& self.status != Status::Stop
&& !connection_status.errored()
}
pub fn start(mut self) -> Result<()> {
let waker = self.waker.clone();
self.connection.clone().set_io_loop(
ThreadBuilder::new()
.name("io_loop".to_owned())
.spawn(move || {
let mut events = Events::with_capacity(1024);
while self.should_continue() {
if let Err(err) = self.run(&mut events) {
self.cancel_serilized_frames(err)?;
}
}
Ok(())
})?,
waker,
)
}
fn cancel_serilized_frames(&mut self, error: Error) -> Result<()> {
for (_, resolver) in std::mem::take(&mut self.serialized_frames) {
if let Some(resolver) = resolver {
resolver.swear(Err(error.clone()));
}
}
Err(error)
}
fn poll_timeout(&self) -> Option<Duration> {
self.poll_timeout.map(|timeout| {
timeout
.checked_sub(self.last_write.elapsed())
.unwrap_or_else(|| Duration::from_secs(0))
})
}
fn poll(&mut self, events: &mut Events) -> Result<()> {
trace!("io_loop poll");
self.poll.poll(events, self.poll_timeout())?;
trace!("io_loop poll done");
for event in events.iter() {
if event.token() == SOCKET {
trace!("Got mio event for socket: {:?}", event);
if event.is_read_closed() || event.is_write_closed() {
self.critical_error(io::Error::from(io::ErrorKind::ConnectionReset).into())?;
}
if event.is_error() {
self.critical_error(io::Error::from(io::ErrorKind::ConnectionAborted).into())?;
}
// Due to a bug in epoll/mio, it doesn't seem like we can trust this, it's sometimes missing when it should be there
/*
if event.is_readable() {
self.can_read = true;
}
*/
self.can_read = true;
if event.is_writable() {
self.can_write = true;
}
}
}
Ok(())
}
fn run(&mut self, events: &mut Events) -> Result<()> {
trace!("io_loop run");
self.ensure_setup()?;
self.poll(events)?;
self.do_run()
}
fn do_run(&mut self) -> Result<()> {
trace!(
"io_loop do_run; can_read={}, can_write={}, has_data={}",
self.can_read,
self.can_write,
self.has_data()
);
loop {
self.write()?;
if self.connection.status().closed() {
self.status = Status::Stop;
}
if self.should_continue() {
self.read()?;
}
self.parse()?;
self.connection.poll_internal_promises()?;
if self.should_heartbeat() {
self.connection.send_heartbeat()?;
// Update last_write so that if we cannot write yet to the socket, we don't enqueue countless heartbeats
self.last_write = Instant::now();
}
if self.stop_looping() {
self.maybe_continue()?;
break;
}
}
trace!(
"io_loop do_run done; can_read={}, can_write={}, has_data={}, status={:?}",
self.can_read,
self.can_write,
self.has_data(),
self.status
);
Ok(())
}
fn should_heartbeat(&self) -> bool {
if let Some(heartbeat_timeout) = self.poll_timeout {
self.last_write.elapsed() > heartbeat_timeout
} else {
false
}
}
fn stop_looping(&self) -> bool {
!self.can_read()
|| !self.can_write()
|| self.status == Status::Stop
|| self.connection.status().errored()
}
fn has_pending_operations(&self) -> bool {
self.status != Status::Stop && (self.can_read() || self.can_parse() || self.can_write())
}
fn maybe_continue(&mut self) -> Result<()> {
if self.has_pending_operations() {
trace!(
"io_loop send continue; can_read={}, can_write={}, has_data={}",
self.can_read,
self.can_write,
self.has_data()
);
self.send_continue()?;
}
Ok(())
}
fn critical_error(&mut self, error: Error) -> Result<()> {
if let ConnectionState::SentProtocolHeader(resolver, ..) = self.connection.status().state()
{
resolver.swear(Err(error.clone()));
}
self.status = Status::Stop;
self.connection.set_error(error.clone())?;
Err(error)
}
fn write(&mut self) -> Result<()> {
if self.can_write() {
if let Err(e) = self.write_to_stream() {
if e.wouldblock() {
self.can_write = false
} else {
error!("error writing: {:?}", e);
self.critical_error(e)?;
}
}
}
Ok(())
}
fn read(&mut self) -> Result<()> {
if self.can_read() {
if let Err(e) = self.read_from_stream() {
if e.wouldblock() {
self.can_read = false
} else {
error!("error reading: {:?}", e);
self.critical_error(e)?;
}
}
self.receive_buffer.shift_unless_available(self.frame_size);
}
Ok(())
}
fn send_continue(&mut self) -> Result<()> {
self.waker.wake()?;
Ok(())
}
fn write_to_stream(&mut self) -> Result<()> {
self.socket.flush()?;
self.serialize()?;
let sz = self.send_buffer.write_to(&mut self.socket)?;
if sz > 0 {
self.last_write = Instant::now();
trace!("wrote {} bytes", sz);
self.send_buffer.consume(sz, true);
let mut written = sz as u64;
while written > 0 {
if let Some((to_write, resolver)) = self.serialized_frames.pop_front() {
if written < to_write {
self.serialized_frames
.push_front((to_write - written, resolver));
trace!("{} to write to complete this frame", to_write - written);
written = 0;
} else {
if let Some(resolver) = resolver {
resolver.swear(Ok(()));
}
written -= to_write;
}
} else {
error!(
"We've written {} but didn't expect to write anything",
written
);
break;
}
}
if self.send_buffer.available_data() > 0 {
// We didn't write all the data yet
trace!("Still {} to write", self.send_buffer.available_data());
self.send_continue()?;
}
self.socket.flush()?;
} else {
error!("Socket was writable but we wrote 0, marking as wouldblock");
self.can_write = false;
}
Ok(())
}
fn read_from_stream(&mut self) -> Result<()> {
match self.connection.status().state() {
ConnectionState::Closed => Ok(()),
ConnectionState::Error => Err(Error::InvalidConnectionState(ConnectionState::Error)),
_ => {
self.receive_buffer
.read_from(&mut self.socket, false)
.map(|sz| {
trace!("read {} bytes", sz);
self.receive_buffer.fill(sz, false);
})?;
Ok(())
}
}
}
fn serialize(&mut self) -> Result<()> {
if let Some((next_msg, resolver)) = self.connection.next_frame() {
// FIXME: having the body here is too verbose, impl Display on frame instead: trace!("will write to buffer: {:?}", next_msg);
trace!("Will write to buffer");
let checkpoint = self.send_buffer.checkpoint();
let res = gen_frame(&next_msg)((&mut self.send_buffer).into());
match res.map(|w| w.into_inner().1) {
Ok(sz) => {
self.serialized_frames.push_back((sz, resolver));
Ok(())
}
Err(e) => {
self.send_buffer.rollback(checkpoint);
match e {
GenError::BufferTooSmall(_) => {
// Requeue msg
self.connection.requeue_frame((next_msg, resolver))?;
Ok(())
}
e => {
error!("error generating frame: {:?}", e);
let error = Error::SerialisationError(Arc::new(e));
self.connection.set_error(error.clone())?;
Err(error)
}
}
}
}
} else {
Ok(())
}
}
fn parse(&mut self) -> Result<()> {
if self.can_parse() {
if let Some(frame) = self.do_parse()? {
self.connection.handle_frame(frame)?;
}
}
Ok(())
}
fn do_parse(&mut self) -> Result<Option<AMQPFrame>> {
match parse_frame(self.receive_buffer.data()) {
Ok((i, f)) => {
let consumed = self.receive_buffer.offset(i);
self.receive_buffer.consume(consumed, false);
Ok(Some(f))
}
Err(e) => {
if e.is_incomplete() {
self.receive_buffer.shift();
Ok(None)
} else {
error!("parse error: {:?}", e);
let error = Error::ParsingError(e);
self.connection.set_error(error.clone())?;
Err(error)
}
}
}
}
}
|
//! IPC for way-cooler
use std::thread;
use std::env;
use std::path::PathBuf;
use std::fs;
use unix_socket::UnixListener;
mod channel;
mod command;
mod event;
#[cfg(test)]
mod tests;
/// Versions are incremented.
pub const VERSION: u64 = 0u64; // Increment to 1 on release.
/// Very much not cross-platform!
/// Submit an issue when Wayland is ported to Windoze.
pub const TEMP_FOLDER: &'static str = "/tmp/way-cooler/";
/// Socket over which synchronous communication is made with clients.
pub const COMMAND_SOCKET: &'static str = "command";
/// Socket over which events are sent to clients.
pub const EVENT_SOCKET: &'static str = "event";
/// Folder in which sockets are created
pub const PATH_VAR: &'static str = "WAY_COOLER_TEMPFOLDER";
/// We need random folder names to place sockets in, but they don't need
/// to be _that_ random.
pub fn unique_ish_id() -> u32 {
use std::hash::{Hash, Hasher, SipHasher};
use std::time::Instant;
// If you shift a u64 hash right by this you get a "checksum",
// a number which retains some of the entropy of the hash but
// is small enough to fit a more comfortable file name.
const MAGIC_SHIFT_NUMBER: u64 = 0b110000;
// Instant doesn't implement hash, and it's supposed to be an opaque
// struct, but it does implement debug...
let now = Instant::now();
let mut hasher = SipHasher::new();
format!("{:?}", now).hash(&mut hasher);
(hasher.finish() >> MAGIC_SHIFT_NUMBER) as u32
}
/// Initialize the Lua server.
pub fn init() {
trace!("Initializing way-cooler IPC...");
let id = unique_ish_id();
info!("Starting IPC with unique ID {}", id);
let mut path = PathBuf::from(TEMP_FOLDER);
path.push(id.to_string());
env::set_var(PATH_VAR, path.clone());
if let Err(ioerr) = fs::create_dir_all(path.clone()) {
// How can we handle not having a socket?
// In the future, we could log and continue.
// We could have a config option to not create/create-if-possible
error!("Unable to create temp folder: {:?}", ioerr);
return;
}
else {
let command_socket = UnixListener::bind(path.join(COMMAND_SOCKET))
.expect("Unable to open command socket!");
let event_socket = UnixListener::bind(path.join(EVENT_SOCKET))
.expect("Unable to open event socket!");
debug!("IPC initialized, now listening for clients.");
let _server_handle = thread::Builder::new()
.name("Command socket listener".to_string())
.spawn(move || { command_thread(command_socket) });
let _event_handle = thread::Builder::new()
.name("Event socket listener".to_string())
.spawn(move || { event_thread(event_socket) });
trace!("IPC initialized.");
}
}
fn command_thread(socket: UnixListener) {
for stream in socket.incoming() {
trace!("Sever: new connection: {:?}", stream);
match stream {
Ok(mut stream) => {
info!("Command: connected to {:?}", stream);
let _handle = thread::Builder::new()
.name("IPC server helper".to_string())
.spawn(move || command::thread(&mut stream));
},
Err(err) => {
info!("Error receiving a stream: {}", err);
}
}
}
}
fn event_thread(socket: UnixListener) {
for stream in socket.incoming() {
trace!("Event: new connection: {:?}", stream);
match stream {
Ok(mut stream) => {
info!("Event: connected to {:?}", stream);
let _handle = thread::Builder::new()
.name("IPC evemt helper".to_string())
.spawn(move || event::thread(&mut stream));
},
Err(err) => {
info!("Error receiving a stream: {}", err);
}
}
}
}
Change socket path to /run
//! IPC for way-cooler
use std::thread;
use std::env;
use std::path::PathBuf;
use std::fs;
use unix_socket::UnixListener;
mod channel;
mod command;
mod event;
#[cfg(test)]
mod tests;
/// Versions are incremented.
pub const VERSION: u64 = 0u64; // Increment to 1 on release.
/// Very much not cross-platform!
/// Submit an issue when Wayland is ported to Windoze.
pub const TEMP_FOLDER: &'static str = "/run/way-cooler/";
/// Socket over which synchronous communication is made with clients.
pub const COMMAND_SOCKET: &'static str = "command";
/// Socket over which events are sent to clients.
pub const EVENT_SOCKET: &'static str = "event";
/// Folder in which sockets are created
pub const PATH_VAR: &'static str = "WAY_COOLER_SOCKET_FOLDER";
/// We need random folder names to place sockets in, but they don't need
/// to be _that_ random.
pub fn unique_ish_id() -> u32 {
use std::hash::{Hash, Hasher, SipHasher};
use std::time::Instant;
// If you shift a u64 hash right by this you get a "checksum",
// a number which retains some of the entropy of the hash but
// is small enough to fit a more comfortable file name.
const MAGIC_SHIFT_NUMBER: u64 = 0b110000;
// Instant doesn't implement hash, and it's supposed to be an opaque
// struct, but it does implement debug...
let now = Instant::now();
let mut hasher = SipHasher::new();
format!("{:?}", now).hash(&mut hasher);
(hasher.finish() >> MAGIC_SHIFT_NUMBER) as u32
}
/// Initialize the Lua server.
pub fn init() {
trace!("Initializing way-cooler IPC...");
let id = unique_ish_id();
info!("Starting IPC with unique ID {}", id);
let mut path = PathBuf::from(TEMP_FOLDER);
path.push(id.to_string());
if let Err(ioerr) = fs::create_dir_all(path.clone()) {
// How can we handle not having a socket?
// In the future, we could log and continue.
// We could have a config option to not create/create-if-possible
error!("Unable to create temp folder: {:?}", ioerr);
return;
}
else {
let command_socket = UnixListener::bind(path.join(COMMAND_SOCKET))
.expect("Unable to open command socket!");
let event_socket = UnixListener::bind(path.join(EVENT_SOCKET))
.expect("Unable to open event socket!");
env::set_var(PATH_VAR, path.clone());
debug!("IPC initialized, now listening for clients.");
let _server_handle = thread::Builder::new()
.name("Command socket listener".to_string())
.spawn(move || { command_thread(command_socket) });
let _event_handle = thread::Builder::new()
.name("Event socket listener".to_string())
.spawn(move || { event_thread(event_socket) });
trace!("IPC initialized.");
}
}
fn command_thread(socket: UnixListener) {
for stream in socket.incoming() {
trace!("Sever: new connection: {:?}", stream);
match stream {
Ok(mut stream) => {
info!("Command: connected to {:?}", stream);
let _handle = thread::Builder::new()
.name("IPC server helper".to_string())
.spawn(move || command::thread(&mut stream));
},
Err(err) => {
info!("Error receiving a stream: {}", err);
}
}
}
}
fn event_thread(socket: UnixListener) {
for stream in socket.incoming() {
trace!("Event: new connection: {:?}", stream);
match stream {
Ok(mut stream) => {
info!("Event: connected to {:?}", stream);
let _handle = thread::Builder::new()
.name("IPC evemt helper".to_string())
.spawn(move || event::thread(&mut stream));
},
Err(err) => {
info!("Error receiving a stream: {}", err);
}
}
}
}
|
//! Compound types (unions and structs) in our intermediate representation.
use super::analysis::Sizedness;
use super::annotations::Annotations;
use super::context::{BindgenContext, FunctionId, ItemId, TypeId, VarId};
use super::dot::DotAttributes;
use super::item::{IsOpaque, Item};
use super::layout::Layout;
use super::template::TemplateParameters;
use super::traversal::{EdgeKind, Trace, Tracer};
use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use crate::clang;
use crate::codegen::struct_layout::{align_to, bytes_from_bits_pow2};
use crate::ir::derive::CanDeriveCopy;
use crate::parse::{ClangItemParser, ParseError};
use crate::HashMap;
use peeking_take_while::PeekableExt;
use std::cmp;
use std::io;
use std::mem;
/// The kind of compound type.
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum CompKind {
/// A struct.
Struct,
/// A union.
Union,
}
/// The kind of C++ method.
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum MethodKind {
/// A constructor. We represent it as method for convenience, to avoid code
/// duplication.
Constructor,
/// A destructor.
Destructor,
/// A virtual destructor.
VirtualDestructor {
/// Whether it's pure virtual.
pure_virtual: bool,
},
/// A static method.
Static,
/// A normal method.
Normal,
/// A virtual method.
Virtual {
/// Whether it's pure virtual.
pure_virtual: bool,
},
}
impl MethodKind {
/// Is this a destructor method?
pub fn is_destructor(&self) -> bool {
match *self {
MethodKind::Destructor | MethodKind::VirtualDestructor { .. } => {
true
}
_ => false,
}
}
/// Is this a pure virtual method?
pub fn is_pure_virtual(&self) -> bool {
match *self {
MethodKind::Virtual { pure_virtual } |
MethodKind::VirtualDestructor { pure_virtual } => pure_virtual,
_ => false,
}
}
}
/// A struct representing a C++ method, either static, normal, or virtual.
#[derive(Debug)]
pub struct Method {
kind: MethodKind,
/// The signature of the method. Take into account this is not a `Type`
/// item, but a `Function` one.
///
/// This is tricky and probably this field should be renamed.
signature: FunctionId,
is_const: bool,
}
impl Method {
/// Construct a new `Method`.
pub fn new(
kind: MethodKind,
signature: FunctionId,
is_const: bool,
) -> Self {
Method {
kind,
signature,
is_const,
}
}
/// What kind of method is this?
pub fn kind(&self) -> MethodKind {
self.kind
}
/// Is this a constructor?
pub fn is_constructor(&self) -> bool {
self.kind == MethodKind::Constructor
}
/// Is this a virtual method?
pub fn is_virtual(&self) -> bool {
match self.kind {
MethodKind::Virtual { .. } |
MethodKind::VirtualDestructor { .. } => true,
_ => false,
}
}
/// Is this a static method?
pub fn is_static(&self) -> bool {
self.kind == MethodKind::Static
}
/// Get the id for the `Function` signature for this method.
pub fn signature(&self) -> FunctionId {
self.signature
}
/// Is this a const qualified method?
pub fn is_const(&self) -> bool {
self.is_const
}
}
/// Methods common to the various field types.
pub trait FieldMethods {
/// Get the name of this field.
fn name(&self) -> Option<&str>;
/// Get the type of this field.
fn ty(&self) -> TypeId;
/// Get the comment for this field.
fn comment(&self) -> Option<&str>;
/// If this is a bitfield, how many bits does it need?
fn bitfield_width(&self) -> Option<u32>;
/// Is this field marked as `mutable`?
fn is_mutable(&self) -> bool;
/// Get the annotations for this field.
fn annotations(&self) -> &Annotations;
/// The offset of the field (in bits)
fn offset(&self) -> Option<usize>;
}
/// A contiguous set of logical bitfields that live within the same physical
/// allocation unit. See 9.2.4 [class.bit] in the C++ standard and [section
/// 2.4.II.1 in the Itanium C++
/// ABI](http://itanium-cxx-abi.github.io/cxx-abi/abi.html#class-types).
#[derive(Debug)]
pub struct BitfieldUnit {
nth: usize,
layout: Layout,
bitfields: Vec<Bitfield>,
}
impl BitfieldUnit {
/// Get the 1-based index of this bitfield unit within its containing
/// struct. Useful for generating a Rust struct's field name for this unit
/// of bitfields.
pub fn nth(&self) -> usize {
self.nth
}
/// Get the layout within which these bitfields reside.
pub fn layout(&self) -> Layout {
self.layout
}
/// Get the bitfields within this unit.
pub fn bitfields(&self) -> &[Bitfield] {
&self.bitfields
}
}
/// A struct representing a C++ field.
#[derive(Debug)]
pub enum Field {
/// A normal data member.
DataMember(FieldData),
/// A physical allocation unit containing many logical bitfields.
Bitfields(BitfieldUnit),
}
impl Field {
/// Get this field's layout.
pub fn layout(&self, ctx: &BindgenContext) -> Option<Layout> {
match *self {
Field::Bitfields(BitfieldUnit { layout, .. }) => Some(layout),
Field::DataMember(ref data) => {
ctx.resolve_type(data.ty).layout(ctx)
}
}
}
}
impl Trace for Field {
type Extra = ();
fn trace<T>(&self, _: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
match *self {
Field::DataMember(ref data) => {
tracer.visit_kind(data.ty.into(), EdgeKind::Field);
}
Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => {
for bf in bitfields {
tracer.visit_kind(bf.ty().into(), EdgeKind::Field);
}
}
}
}
}
impl DotAttributes for Field {
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
match *self {
Field::DataMember(ref data) => data.dot_attributes(ctx, out),
Field::Bitfields(BitfieldUnit {
layout,
ref bitfields,
..
}) => {
writeln!(
out,
r#"<tr>
<td>bitfield unit</td>
<td>
<table border="0">
<tr>
<td>unit.size</td><td>{}</td>
</tr>
<tr>
<td>unit.align</td><td>{}</td>
</tr>
"#,
layout.size, layout.align
)?;
for bf in bitfields {
bf.dot_attributes(ctx, out)?;
}
writeln!(out, "</table></td></tr>")
}
}
}
}
impl DotAttributes for FieldData {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(
out,
"<tr><td>{}</td><td>{:?}</td></tr>",
self.name().unwrap_or("(anonymous)"),
self.ty()
)
}
}
impl DotAttributes for Bitfield {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(
out,
"<tr><td>{} : {}</td><td>{:?}</td></tr>",
self.name().unwrap_or("(anonymous)"),
self.width(),
self.ty()
)
}
}
/// A logical bitfield within some physical bitfield allocation unit.
#[derive(Debug)]
pub struct Bitfield {
/// Index of the bit within this bitfield's allocation unit where this
/// bitfield's bits begin.
offset_into_unit: usize,
/// The field data for this bitfield.
data: FieldData,
/// Name of the generated Rust getter for this bitfield.
///
/// Should be assigned before codegen.
getter_name: Option<String>,
/// Name of the generated Rust setter for this bitfield.
///
/// Should be assigned before codegen.
setter_name: Option<String>,
}
impl Bitfield {
/// Construct a new bitfield.
fn new(offset_into_unit: usize, raw: RawField) -> Bitfield {
assert!(raw.bitfield_width().is_some());
Bitfield {
offset_into_unit,
data: raw.0,
getter_name: None,
setter_name: None,
}
}
/// Get the index of the bit within this bitfield's allocation unit where
/// this bitfield begins.
pub fn offset_into_unit(&self) -> usize {
self.offset_into_unit
}
/// Get the mask value that when &'ed with this bitfield's allocation unit
/// produces this bitfield's value.
pub fn mask(&self) -> u64 {
use std::u64;
let unoffseted_mask =
if self.width() as u64 == mem::size_of::<u64>() as u64 * 8 {
u64::MAX
} else {
(1u64 << self.width()) - 1u64
};
unoffseted_mask << self.offset_into_unit()
}
/// Get the bit width of this bitfield.
pub fn width(&self) -> u32 {
self.data.bitfield_width().unwrap()
}
/// Name of the generated Rust getter for this bitfield.
///
/// Panics if called before assigning bitfield accessor names or if
/// this bitfield have no name.
pub fn getter_name(&self) -> &str {
assert!(
self.name().is_some(),
"`Bitfield::getter_name` called on anonymous field"
);
self.getter_name.as_ref().expect(
"`Bitfield::getter_name` should only be called after\
assigning bitfield accessor names",
)
}
/// Name of the generated Rust setter for this bitfield.
///
/// Panics if called before assigning bitfield accessor names or if
/// this bitfield have no name.
pub fn setter_name(&self) -> &str {
assert!(
self.name().is_some(),
"`Bitfield::setter_name` called on anonymous field"
);
self.setter_name.as_ref().expect(
"`Bitfield::setter_name` should only be called\
after assigning bitfield accessor names",
)
}
}
impl FieldMethods for Bitfield {
fn name(&self) -> Option<&str> {
self.data.name()
}
fn ty(&self) -> TypeId {
self.data.ty()
}
fn comment(&self) -> Option<&str> {
self.data.comment()
}
fn bitfield_width(&self) -> Option<u32> {
self.data.bitfield_width()
}
fn is_mutable(&self) -> bool {
self.data.is_mutable()
}
fn annotations(&self) -> &Annotations {
self.data.annotations()
}
fn offset(&self) -> Option<usize> {
self.data.offset()
}
}
/// A raw field might be either of a plain data member or a bitfield within a
/// bitfield allocation unit, but we haven't processed it and determined which
/// yet (which would involve allocating it into a bitfield unit if it is a
/// bitfield).
#[derive(Debug)]
struct RawField(FieldData);
impl RawField {
/// Construct a new `RawField`.
fn new(
name: Option<String>,
ty: TypeId,
comment: Option<String>,
annotations: Option<Annotations>,
bitfield_width: Option<u32>,
mutable: bool,
offset: Option<usize>,
) -> RawField {
RawField(FieldData {
name,
ty,
comment,
annotations: annotations.unwrap_or_default(),
bitfield_width,
mutable,
offset,
})
}
}
impl FieldMethods for RawField {
fn name(&self) -> Option<&str> {
self.0.name()
}
fn ty(&self) -> TypeId {
self.0.ty()
}
fn comment(&self) -> Option<&str> {
self.0.comment()
}
fn bitfield_width(&self) -> Option<u32> {
self.0.bitfield_width()
}
fn is_mutable(&self) -> bool {
self.0.is_mutable()
}
fn annotations(&self) -> &Annotations {
self.0.annotations()
}
fn offset(&self) -> Option<usize> {
self.0.offset()
}
}
/// Convert the given ordered set of raw fields into a list of either plain data
/// members, and/or bitfield units containing multiple bitfields.
///
/// If we do not have the layout for a bitfield's type, then we can't reliably
/// compute its allocation unit. In such cases, we return an error.
fn raw_fields_to_fields_and_bitfield_units<I>(
ctx: &BindgenContext,
raw_fields: I,
packed: bool,
) -> Result<(Vec<Field>, bool), ()>
where
I: IntoIterator<Item = RawField>,
{
let mut raw_fields = raw_fields.into_iter().fuse().peekable();
let mut fields = vec![];
let mut bitfield_unit_count = 0;
loop {
// While we have plain old data members, just keep adding them to our
// resulting fields. We introduce a scope here so that we can use
// `raw_fields` again after the `by_ref` iterator adaptor is dropped.
{
let non_bitfields = raw_fields
.by_ref()
.peeking_take_while(|f| f.bitfield_width().is_none())
.map(|f| Field::DataMember(f.0));
fields.extend(non_bitfields);
}
// Now gather all the consecutive bitfields. Only consecutive bitfields
// may potentially share a bitfield allocation unit with each other in
// the Itanium C++ ABI.
let mut bitfields = raw_fields
.by_ref()
.peeking_take_while(|f| f.bitfield_width().is_some())
.peekable();
if bitfields.peek().is_none() {
break;
}
bitfields_to_allocation_units(
ctx,
&mut bitfield_unit_count,
&mut fields,
bitfields,
packed,
)?;
}
assert!(
raw_fields.next().is_none(),
"The above loop should consume all items in `raw_fields`"
);
Ok((fields, bitfield_unit_count != 0))
}
/// Given a set of contiguous raw bitfields, group and allocate them into
/// (potentially multiple) bitfield units.
fn bitfields_to_allocation_units<E, I>(
ctx: &BindgenContext,
bitfield_unit_count: &mut usize,
fields: &mut E,
raw_bitfields: I,
packed: bool,
) -> Result<(), ()>
where
E: Extend<Field>,
I: IntoIterator<Item = RawField>,
{
assert!(ctx.collected_typerefs());
// NOTE: What follows is reverse-engineered from LLVM's
// lib/AST/RecordLayoutBuilder.cpp
//
// FIXME(emilio): There are some differences between Microsoft and the
// Itanium ABI, but we'll ignore those and stick to Itanium for now.
//
// Also, we need to handle packed bitfields and stuff.
//
// TODO(emilio): Take into account C++'s wide bitfields, and
// packing, sigh.
fn flush_allocation_unit<E>(
fields: &mut E,
bitfield_unit_count: &mut usize,
unit_size_in_bits: usize,
unit_align_in_bits: usize,
bitfields: Vec<Bitfield>,
packed: bool,
) where
E: Extend<Field>,
{
*bitfield_unit_count += 1;
let align = if packed {
1
} else {
bytes_from_bits_pow2(unit_align_in_bits)
};
let size = align_to(unit_size_in_bits, align * 8) / 8;
let layout = Layout::new(size, align);
fields.extend(Some(Field::Bitfields(BitfieldUnit {
nth: *bitfield_unit_count,
layout,
bitfields,
})));
}
let mut max_align = 0;
let mut unfilled_bits_in_unit = 0;
let mut unit_size_in_bits = 0;
let mut unit_align = 0;
let mut bitfields_in_unit = vec![];
// TODO(emilio): Determine this from attributes or pragma ms_struct
// directives. Also, perhaps we should check if the target is MSVC?
const is_ms_struct: bool = false;
for bitfield in raw_bitfields {
let bitfield_width = bitfield.bitfield_width().unwrap() as usize;
let bitfield_layout =
ctx.resolve_type(bitfield.ty()).layout(ctx).ok_or(())?;
let bitfield_size = bitfield_layout.size;
let bitfield_align = bitfield_layout.align;
let mut offset = unit_size_in_bits;
if !packed {
if is_ms_struct {
if unit_size_in_bits != 0 &&
(bitfield_width == 0 ||
bitfield_width > unfilled_bits_in_unit)
{
// We've reached the end of this allocation unit, so flush it
// and its bitfields.
unit_size_in_bits =
align_to(unit_size_in_bits, unit_align * 8);
flush_allocation_unit(
fields,
bitfield_unit_count,
unit_size_in_bits,
unit_align,
mem::replace(&mut bitfields_in_unit, vec![]),
packed,
);
// Now we're working on a fresh bitfield allocation unit, so reset
// the current unit size and alignment.
offset = 0;
unit_align = 0;
}
} else {
if offset != 0 &&
(bitfield_width == 0 ||
(offset & (bitfield_align * 8 - 1)) +
bitfield_width >
bitfield_size * 8)
{
offset = align_to(offset, bitfield_align * 8);
}
}
}
// According to the x86[-64] ABI spec: "Unnamed bit-fields’ types do not
// affect the alignment of a structure or union". This makes sense: such
// bit-fields are only used for padding, and we can't perform an
// un-aligned read of something we can't read because we can't even name
// it.
if bitfield.name().is_some() {
max_align = cmp::max(max_align, bitfield_align);
// NB: The `bitfield_width` here is completely, absolutely
// intentional. Alignment of the allocation unit is based on the
// maximum bitfield width, not (directly) on the bitfields' types'
// alignment.
unit_align = cmp::max(unit_align, bitfield_width);
}
// Always keep all bitfields around. While unnamed bitifields are used
// for padding (and usually not needed hereafter), large unnamed
// bitfields over their types size cause weird allocation size behavior from clang.
// Therefore, all bitfields needed to be kept around in order to check for this
// and make the struct opaque in this case
bitfields_in_unit.push(Bitfield::new(offset, bitfield));
unit_size_in_bits = offset + bitfield_width;
// Compute what the physical unit's final size would be given what we
// have seen so far, and use that to compute how many bits are still
// available in the unit.
let data_size = align_to(unit_size_in_bits, bitfield_align * 8);
unfilled_bits_in_unit = data_size - unit_size_in_bits;
}
if unit_size_in_bits != 0 {
// Flush the last allocation unit and its bitfields.
flush_allocation_unit(
fields,
bitfield_unit_count,
unit_size_in_bits,
unit_align,
bitfields_in_unit,
packed,
);
}
Ok(())
}
/// A compound structure's fields are initially raw, and have bitfields that
/// have not been grouped into allocation units. During this time, the fields
/// are mutable and we build them up during parsing.
///
/// Then, once resolving typerefs is completed, we compute all structs' fields'
/// bitfield allocation units, and they remain frozen and immutable forever
/// after.
#[derive(Debug)]
enum CompFields {
BeforeComputingBitfieldUnits(Vec<RawField>),
AfterComputingBitfieldUnits {
fields: Vec<Field>,
has_bitfield_units: bool,
},
ErrorComputingBitfieldUnits,
}
impl Default for CompFields {
fn default() -> CompFields {
CompFields::BeforeComputingBitfieldUnits(vec![])
}
}
impl CompFields {
fn append_raw_field(&mut self, raw: RawField) {
match *self {
CompFields::BeforeComputingBitfieldUnits(ref mut raws) => {
raws.push(raw);
}
_ => {
panic!(
"Must not append new fields after computing bitfield allocation units"
);
}
}
}
fn compute_bitfield_units(&mut self, ctx: &BindgenContext, packed: bool) {
let raws = match *self {
CompFields::BeforeComputingBitfieldUnits(ref mut raws) => {
mem::replace(raws, vec![])
}
_ => {
panic!("Already computed bitfield units");
}
};
let result = raw_fields_to_fields_and_bitfield_units(ctx, raws, packed);
match result {
Ok((fields, has_bitfield_units)) => {
mem::replace(
self,
CompFields::AfterComputingBitfieldUnits {
fields,
has_bitfield_units,
},
);
}
Err(()) => {
mem::replace(self, CompFields::ErrorComputingBitfieldUnits);
}
}
}
fn deanonymize_fields(&mut self, ctx: &BindgenContext, methods: &[Method]) {
let fields = match *self {
CompFields::AfterComputingBitfieldUnits {
ref mut fields, ..
} => fields,
// Nothing to do here.
CompFields::ErrorComputingBitfieldUnits => return,
CompFields::BeforeComputingBitfieldUnits(_) => {
panic!("Not yet computed bitfield units.");
}
};
fn has_method(
methods: &[Method],
ctx: &BindgenContext,
name: &str,
) -> bool {
methods.iter().any(|method| {
let method_name = ctx.resolve_func(method.signature()).name();
method_name == name || ctx.rust_mangle(&method_name) == name
})
}
struct AccessorNamesPair {
getter: String,
setter: String,
}
let mut accessor_names: HashMap<String, AccessorNamesPair> = fields
.iter()
.flat_map(|field| match *field {
Field::Bitfields(ref bu) => &*bu.bitfields,
Field::DataMember(_) => &[],
})
.filter_map(|bitfield| bitfield.name())
.map(|bitfield_name| {
let bitfield_name = bitfield_name.to_string();
let getter = {
let mut getter =
ctx.rust_mangle(&bitfield_name).to_string();
if has_method(methods, ctx, &getter) {
getter.push_str("_bindgen_bitfield");
}
getter
};
let setter = {
let setter = format!("set_{}", bitfield_name);
let mut setter = ctx.rust_mangle(&setter).to_string();
if has_method(methods, ctx, &setter) {
setter.push_str("_bindgen_bitfield");
}
setter
};
(bitfield_name, AccessorNamesPair { getter, setter })
})
.collect();
let mut anon_field_counter = 0;
for field in fields.iter_mut() {
match *field {
Field::DataMember(FieldData { ref mut name, .. }) => {
if let Some(_) = *name {
continue;
}
anon_field_counter += 1;
let generated_name =
format!("__bindgen_anon_{}", anon_field_counter);
*name = Some(generated_name);
}
Field::Bitfields(ref mut bu) => {
for bitfield in &mut bu.bitfields {
if bitfield.name().is_none() {
continue;
}
if let Some(AccessorNamesPair { getter, setter }) =
accessor_names.remove(bitfield.name().unwrap())
{
bitfield.getter_name = Some(getter);
bitfield.setter_name = Some(setter);
}
}
}
}
}
}
}
impl Trace for CompFields {
type Extra = ();
fn trace<T>(&self, context: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
match *self {
CompFields::ErrorComputingBitfieldUnits => {}
CompFields::BeforeComputingBitfieldUnits(ref fields) => {
for f in fields {
tracer.visit_kind(f.ty().into(), EdgeKind::Field);
}
}
CompFields::AfterComputingBitfieldUnits { ref fields, .. } => {
for f in fields {
f.trace(context, tracer, &());
}
}
}
}
}
/// Common data shared across different field types.
#[derive(Clone, Debug)]
pub struct FieldData {
/// The name of the field, empty if it's an unnamed bitfield width.
name: Option<String>,
/// The inner type.
ty: TypeId,
/// The doc comment on the field if any.
comment: Option<String>,
/// Annotations for this field, or the default.
annotations: Annotations,
/// If this field is a bitfield, and how many bits does it contain if it is.
bitfield_width: Option<u32>,
/// If the C++ field is marked as `mutable`
mutable: bool,
/// The offset of the field (in bits)
offset: Option<usize>,
}
impl FieldMethods for FieldData {
fn name(&self) -> Option<&str> {
self.name.as_ref().map(|n| &**n)
}
fn ty(&self) -> TypeId {
self.ty
}
fn comment(&self) -> Option<&str> {
self.comment.as_ref().map(|c| &**c)
}
fn bitfield_width(&self) -> Option<u32> {
self.bitfield_width
}
fn is_mutable(&self) -> bool {
self.mutable
}
fn annotations(&self) -> &Annotations {
&self.annotations
}
fn offset(&self) -> Option<usize> {
self.offset
}
}
/// The kind of inheritance a base class is using.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BaseKind {
/// Normal inheritance, like:
///
/// ```cpp
/// class A : public B {};
/// ```
Normal,
/// Virtual inheritance, like:
///
/// ```cpp
/// class A: public virtual B {};
/// ```
Virtual,
}
/// A base class.
#[derive(Clone, Debug)]
pub struct Base {
/// The type of this base class.
pub ty: TypeId,
/// The kind of inheritance we're doing.
pub kind: BaseKind,
/// Name of the field in which this base should be stored.
pub field_name: String,
}
impl Base {
/// Whether this base class is inheriting virtually.
pub fn is_virtual(&self) -> bool {
self.kind == BaseKind::Virtual
}
/// Whether this base class should have it's own field for storage.
pub fn requires_storage(&self, ctx: &BindgenContext) -> bool {
// Virtual bases are already taken into account by the vtable
// pointer.
//
// FIXME(emilio): Is this always right?
if self.is_virtual() {
return false;
}
// NB: We won't include zero-sized types in our base chain because they
// would contribute to our size given the dummy field we insert for
// zero-sized types.
if self.ty.is_zero_sized(ctx) {
return false;
}
true
}
}
/// A compound type.
///
/// Either a struct or union, a compound type is built up from the combination
/// of fields which also are associated with their own (potentially compound)
/// type.
#[derive(Debug)]
pub struct CompInfo {
/// Whether this is a struct or a union.
kind: CompKind,
/// The members of this struct or union.
fields: CompFields,
/// The abstract template parameters of this class. Note that these are NOT
/// concrete template arguments, and should always be a
/// `Type(TypeKind::TypeParam(name))`. For concrete template arguments, see
/// `TypeKind::TemplateInstantiation`.
template_params: Vec<TypeId>,
/// The method declarations inside this class, if in C++ mode.
methods: Vec<Method>,
/// The different constructors this struct or class contains.
constructors: Vec<FunctionId>,
/// The destructor of this type. The bool represents whether this destructor
/// is virtual.
destructor: Option<(MethodKind, FunctionId)>,
/// Vector of classes this one inherits from.
base_members: Vec<Base>,
/// The inner types that were declared inside this class, in something like:
///
/// class Foo {
/// typedef int FooTy;
/// struct Bar {
/// int baz;
/// };
/// }
///
/// static Foo::Bar const = {3};
inner_types: Vec<TypeId>,
/// Set of static constants declared inside this class.
inner_vars: Vec<VarId>,
/// Whether this type should generate an vtable (TODO: Should be able to
/// look at the virtual methods and ditch this field).
has_own_virtual_method: bool,
/// Whether this type has destructor.
has_destructor: bool,
/// Whether this type has a base type with more than one member.
///
/// TODO: We should be able to compute this.
has_nonempty_base: bool,
/// If this type has a template parameter which is not a type (e.g.: a
/// size_t)
has_non_type_template_params: bool,
/// Whether we saw `__attribute__((packed))` on or within this type.
packed_attr: bool,
/// Used to know if we've found an opaque attribute that could cause us to
/// generate a type with invalid layout. This is explicitly used to avoid us
/// generating bad alignments when parsing types like max_align_t.
///
/// It's not clear what the behavior should be here, if generating the item
/// and pray, or behave as an opaque type.
found_unknown_attr: bool,
/// Used to indicate when a struct has been forward declared. Usually used
/// in headers so that APIs can't modify them directly.
is_forward_declaration: bool,
}
impl CompInfo {
/// Construct a new compound type.
pub fn new(kind: CompKind) -> Self {
CompInfo {
kind,
fields: CompFields::default(),
template_params: vec![],
methods: vec![],
constructors: vec![],
destructor: None,
base_members: vec![],
inner_types: vec![],
inner_vars: vec![],
has_own_virtual_method: false,
has_destructor: false,
has_nonempty_base: false,
has_non_type_template_params: false,
packed_attr: false,
found_unknown_attr: false,
is_forward_declaration: false,
}
}
/// Compute the layout of this type.
///
/// This is called as a fallback under some circumstances where LLVM doesn't
/// give us the correct layout.
///
/// If we're a union without known layout, we try to compute it from our
/// members. This is not ideal, but clang fails to report the size for these
/// kind of unions, see test/headers/template_union.hpp
pub fn layout(&self, ctx: &BindgenContext) -> Option<Layout> {
// We can't do better than clang here, sorry.
if self.kind == CompKind::Struct {
return None;
}
// By definition, we don't have the right layout information here if
// we're a forward declaration.
if self.is_forward_declaration() {
return None;
}
// empty union case
if self.fields().is_empty() {
return None;
}
let mut max_size = 0;
// Don't allow align(0)
let mut max_align = 1;
for field in self.fields() {
let field_layout = field.layout(ctx);
if let Some(layout) = field_layout {
max_size = cmp::max(max_size, layout.size);
max_align = cmp::max(max_align, layout.align);
}
}
Some(Layout::new(max_size, max_align))
}
/// Get this type's set of fields.
pub fn fields(&self) -> &[Field] {
match self.fields {
CompFields::ErrorComputingBitfieldUnits => &[],
CompFields::AfterComputingBitfieldUnits { ref fields, .. } => {
fields
}
CompFields::BeforeComputingBitfieldUnits(_) => {
panic!("Should always have computed bitfield units first");
}
}
}
fn has_bitfields(&self) -> bool {
match self.fields {
CompFields::ErrorComputingBitfieldUnits => false,
CompFields::AfterComputingBitfieldUnits {
has_bitfield_units,
..
} => has_bitfield_units,
CompFields::BeforeComputingBitfieldUnits(_) => {
panic!("Should always have computed bitfield units first");
}
}
}
/// Returns whether we have a too large bitfield unit, in which case we may
/// not be able to derive some of the things we should be able to normally
/// derive.
pub fn has_too_large_bitfield_unit(&self) -> bool {
if !self.has_bitfields() {
return false;
}
self.fields().iter().any(|field| match *field {
Field::DataMember(..) => false,
Field::Bitfields(ref unit) => {
unit.layout.size > RUST_DERIVE_IN_ARRAY_LIMIT
}
})
}
/// Does this type have any template parameters that aren't types
/// (e.g. int)?
pub fn has_non_type_template_params(&self) -> bool {
self.has_non_type_template_params
}
/// Do we see a virtual function during parsing?
/// Get the has_own_virtual_method boolean.
pub fn has_own_virtual_method(&self) -> bool {
self.has_own_virtual_method
}
/// Did we see a destructor when parsing this type?
pub fn has_own_destructor(&self) -> bool {
self.has_destructor
}
/// Get this type's set of methods.
pub fn methods(&self) -> &[Method] {
&self.methods
}
/// Get this type's set of constructors.
pub fn constructors(&self) -> &[FunctionId] {
&self.constructors
}
/// Get this type's destructor.
pub fn destructor(&self) -> Option<(MethodKind, FunctionId)> {
self.destructor
}
/// What kind of compound type is this?
pub fn kind(&self) -> CompKind {
self.kind
}
/// Is this a union?
pub fn is_union(&self) -> bool {
self.kind() == CompKind::Union
}
/// The set of types that this one inherits from.
pub fn base_members(&self) -> &[Base] {
&self.base_members
}
/// Construct a new compound type from a Clang type.
pub fn from_ty(
potential_id: ItemId,
ty: &clang::Type,
location: Option<clang::Cursor>,
ctx: &mut BindgenContext,
) -> Result<Self, ParseError> {
use clang_sys::*;
assert!(
ty.template_args().is_none(),
"We handle template instantiations elsewhere"
);
let mut cursor = ty.declaration();
let mut kind = Self::kind_from_cursor(&cursor);
if kind.is_err() {
if let Some(location) = location {
kind = Self::kind_from_cursor(&location);
cursor = location;
}
}
let kind = kind?;
debug!("CompInfo::from_ty({:?}, {:?})", kind, cursor);
let mut ci = CompInfo::new(kind);
ci.is_forward_declaration =
location.map_or(true, |cur| match cur.kind() {
CXCursor_StructDecl | CXCursor_UnionDecl |
CXCursor_ClassDecl => !cur.is_definition(),
_ => false,
});
let mut maybe_anonymous_struct_field = None;
cursor.visit(|cur| {
if cur.kind() != CXCursor_FieldDecl {
if let Some((ty, clang_ty, offset)) =
maybe_anonymous_struct_field.take()
{
if cur.kind() == CXCursor_TypedefDecl &&
cur.typedef_type().unwrap().canonical_type() ==
clang_ty
{
// Typedefs of anonymous structs appear later in the ast
// than the struct itself, that would otherwise be an
// anonymous field. Detect that case here, and do
// nothing.
} else {
let field = RawField::new(
None, ty, None, None, None, false, offset,
);
ci.fields.append_raw_field(field);
}
}
}
match cur.kind() {
CXCursor_FieldDecl => {
if let Some((ty, clang_ty, offset)) =
maybe_anonymous_struct_field.take()
{
let mut used = false;
cur.visit(|child| {
if child.cur_type() == clang_ty {
used = true;
}
CXChildVisit_Continue
});
if !used {
let field = RawField::new(
None, ty, None, None, None, false, offset,
);
ci.fields.append_raw_field(field);
}
}
let bit_width = cur.bit_width();
let field_type = Item::from_ty_or_ref(
cur.cur_type(),
cur,
Some(potential_id),
ctx,
);
let comment = cur.raw_comment();
let annotations = Annotations::new(&cur);
let name = cur.spelling();
let is_mutable = cursor.is_mutable_field();
let offset = cur.offset_of_field().ok();
// Name can be empty if there are bitfields, for example,
// see tests/headers/struct_with_bitfields.h
assert!(
!name.is_empty() || bit_width.is_some(),
"Empty field name?"
);
let name = if name.is_empty() { None } else { Some(name) };
let field = RawField::new(
name,
field_type,
comment,
annotations,
bit_width,
is_mutable,
offset,
);
ci.fields.append_raw_field(field);
// No we look for things like attributes and stuff.
cur.visit(|cur| {
if cur.kind() == CXCursor_UnexposedAttr {
ci.found_unknown_attr = true;
}
CXChildVisit_Continue
});
}
CXCursor_UnexposedAttr => {
ci.found_unknown_attr = true;
}
CXCursor_EnumDecl |
CXCursor_TypeAliasDecl |
CXCursor_TypeAliasTemplateDecl |
CXCursor_TypedefDecl |
CXCursor_StructDecl |
CXCursor_UnionDecl |
CXCursor_ClassTemplate |
CXCursor_ClassDecl => {
// We can find non-semantic children here, clang uses a
// StructDecl to note incomplete structs that haven't been
// forward-declared before, see [1].
//
// Also, clang seems to scope struct definitions inside
// unions, and other named struct definitions inside other
// structs to the whole translation unit.
//
// Let's just assume that if the cursor we've found is a
// definition, it's a valid inner type.
//
// [1]: https://github.com/rust-lang/rust-bindgen/issues/482
let is_inner_struct =
cur.semantic_parent() == cursor || cur.is_definition();
if !is_inner_struct {
return CXChildVisit_Continue;
}
// Even if this is a definition, we may not be the semantic
// parent, see #1281.
let inner = Item::parse(cur, Some(potential_id), ctx)
.expect("Inner ClassDecl");
let inner = inner.expect_type_id(ctx);
ci.inner_types.push(inner);
// A declaration of an union or a struct without name could
// also be an unnamed field, unfortunately.
if cur.spelling().is_empty() &&
cur.kind() != CXCursor_EnumDecl
{
let ty = cur.cur_type();
let offset = cur.offset_of_field().ok();
maybe_anonymous_struct_field =
Some((inner, ty, offset));
}
}
CXCursor_PackedAttr => {
ci.packed_attr = true;
}
CXCursor_TemplateTypeParameter => {
let param = Item::type_param(None, cur, ctx).expect(
"Item::type_param should't fail when pointing \
at a TemplateTypeParameter",
);
ci.template_params.push(param);
}
CXCursor_CXXBaseSpecifier => {
let is_virtual_base = cur.is_virtual_base();
ci.has_own_virtual_method |= is_virtual_base;
let kind = if is_virtual_base {
BaseKind::Virtual
} else {
BaseKind::Normal
};
let field_name = match ci.base_members.len() {
0 => "_base".into(),
n => format!("_base_{}", n),
};
let type_id =
Item::from_ty_or_ref(cur.cur_type(), cur, None, ctx);
ci.base_members.push(Base {
ty: type_id,
kind,
field_name,
});
}
CXCursor_Constructor | CXCursor_Destructor |
CXCursor_CXXMethod => {
let is_virtual = cur.method_is_virtual();
let is_static = cur.method_is_static();
debug_assert!(!(is_static && is_virtual), "How?");
ci.has_destructor |= cur.kind() == CXCursor_Destructor;
ci.has_own_virtual_method |= is_virtual;
// This used to not be here, but then I tried generating
// stylo bindings with this (without path filters), and
// cried a lot with a method in gfx/Point.h
// (ToUnknownPoint), that somehow was causing the same type
// to be inserted in the map two times.
//
// I couldn't make a reduced test case, but anyway...
// Methods of template functions not only used to be inlined,
// but also instantiated, and we wouldn't be able to call
// them, so just bail out.
if !ci.template_params.is_empty() {
return CXChildVisit_Continue;
}
// NB: This gets us an owned `Function`, not a
// `FunctionSig`.
let signature =
match Item::parse(cur, Some(potential_id), ctx) {
Ok(item)
if ctx
.resolve_item(item)
.kind()
.is_function() =>
{
item
}
_ => return CXChildVisit_Continue,
};
let signature = signature.expect_function_id(ctx);
match cur.kind() {
CXCursor_Constructor => {
ci.constructors.push(signature);
}
CXCursor_Destructor => {
let kind = if is_virtual {
MethodKind::VirtualDestructor {
pure_virtual: cur.method_is_pure_virtual(),
}
} else {
MethodKind::Destructor
};
ci.destructor = Some((kind, signature));
}
CXCursor_CXXMethod => {
let is_const = cur.method_is_const();
let method_kind = if is_static {
MethodKind::Static
} else if is_virtual {
MethodKind::Virtual {
pure_virtual: cur.method_is_pure_virtual(),
}
} else {
MethodKind::Normal
};
let method =
Method::new(method_kind, signature, is_const);
ci.methods.push(method);
}
_ => unreachable!("How can we see this here?"),
}
}
CXCursor_NonTypeTemplateParameter => {
ci.has_non_type_template_params = true;
}
CXCursor_VarDecl => {
let linkage = cur.linkage();
if linkage != CXLinkage_External &&
linkage != CXLinkage_UniqueExternal
{
return CXChildVisit_Continue;
}
let visibility = cur.visibility();
if visibility != CXVisibility_Default {
return CXChildVisit_Continue;
}
if let Ok(item) = Item::parse(cur, Some(potential_id), ctx)
{
ci.inner_vars.push(item.as_var_id_unchecked());
}
}
// Intentionally not handled
CXCursor_CXXAccessSpecifier |
CXCursor_CXXFinalAttr |
CXCursor_FunctionTemplate |
CXCursor_ConversionFunction => {}
_ => {
warn!(
"unhandled comp member `{}` (kind {:?}) in `{}` ({})",
cur.spelling(),
clang::kind_to_str(cur.kind()),
cursor.spelling(),
cur.location()
);
}
}
CXChildVisit_Continue
});
if let Some((ty, _, offset)) = maybe_anonymous_struct_field {
let field =
RawField::new(None, ty, None, None, None, false, offset);
ci.fields.append_raw_field(field);
}
Ok(ci)
}
fn kind_from_cursor(
cursor: &clang::Cursor,
) -> Result<CompKind, ParseError> {
use clang_sys::*;
Ok(match cursor.kind() {
CXCursor_UnionDecl => CompKind::Union,
CXCursor_ClassDecl | CXCursor_StructDecl => CompKind::Struct,
CXCursor_CXXBaseSpecifier |
CXCursor_ClassTemplatePartialSpecialization |
CXCursor_ClassTemplate => match cursor.template_kind() {
CXCursor_UnionDecl => CompKind::Union,
_ => CompKind::Struct,
},
_ => {
warn!("Unknown kind for comp type: {:?}", cursor);
return Err(ParseError::Continue);
}
})
}
/// Get the set of types that were declared within this compound type
/// (e.g. nested class definitions).
pub fn inner_types(&self) -> &[TypeId] {
&self.inner_types
}
/// Get the set of static variables declared within this compound type.
pub fn inner_vars(&self) -> &[VarId] {
&self.inner_vars
}
/// Have we found a field with an opaque type that could potentially mess up
/// the layout of this compound type?
pub fn found_unknown_attr(&self) -> bool {
self.found_unknown_attr
}
/// Is this compound type packed?
pub fn is_packed(
&self,
ctx: &BindgenContext,
layout: &Option<Layout>,
) -> bool {
if self.packed_attr {
return true;
}
// Even though `libclang` doesn't expose `#pragma packed(...)`, we can
// detect it through its effects.
if let Some(ref parent_layout) = *layout {
if self.fields().iter().any(|f| match *f {
Field::Bitfields(ref unit) => {
unit.layout().align > parent_layout.align
}
Field::DataMember(ref data) => {
let field_ty = ctx.resolve_type(data.ty());
field_ty.layout(ctx).map_or(false, |field_ty_layout| {
field_ty_layout.align > parent_layout.align
})
}
}) {
info!("Found a struct that was defined within `#pragma packed(...)`");
return true;
} else if self.has_own_virtual_method {
if parent_layout.align == 1 {
return true;
}
}
}
false
}
/// Returns true if compound type has been forward declared
pub fn is_forward_declaration(&self) -> bool {
self.is_forward_declaration
}
/// Compute this compound structure's bitfield allocation units.
pub fn compute_bitfield_units(&mut self, ctx: &BindgenContext) {
// TODO(emilio): If we could detect #pragma packed here we'd fix layout
// tests in divide-by-zero-in-struct-layout.rs
self.fields.compute_bitfield_units(ctx, self.packed_attr)
}
/// Assign for each anonymous field a generated name.
pub fn deanonymize_fields(&mut self, ctx: &BindgenContext) {
self.fields.deanonymize_fields(ctx, &self.methods);
}
/// Returns whether the current union can be represented as a Rust `union`
///
/// Requirements:
/// 1. Current RustTarget allows for `untagged_union`
/// 2. Each field can derive `Copy`
pub fn can_be_rust_union(&self, ctx: &BindgenContext) -> bool {
if !ctx.options().rust_features().untagged_union {
return false;
}
if self.is_forward_declaration() {
return false;
}
self.fields().iter().all(|f| match *f {
Field::DataMember(ref field_data) => {
field_data.ty().can_derive_copy(ctx)
}
Field::Bitfields(_) => true,
})
}
}
impl DotAttributes for CompInfo {
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(out, "<tr><td>CompKind</td><td>{:?}</td></tr>", self.kind)?;
if self.has_own_virtual_method {
writeln!(out, "<tr><td>has_vtable</td><td>true</td></tr>")?;
}
if self.has_destructor {
writeln!(out, "<tr><td>has_destructor</td><td>true</td></tr>")?;
}
if self.has_nonempty_base {
writeln!(out, "<tr><td>has_nonempty_base</td><td>true</td></tr>")?;
}
if self.has_non_type_template_params {
writeln!(
out,
"<tr><td>has_non_type_template_params</td><td>true</td></tr>"
)?;
}
if self.packed_attr {
writeln!(out, "<tr><td>packed_attr</td><td>true</td></tr>")?;
}
if self.is_forward_declaration {
writeln!(
out,
"<tr><td>is_forward_declaration</td><td>true</td></tr>"
)?;
}
if !self.fields().is_empty() {
writeln!(out, r#"<tr><td>fields</td><td><table border="0">"#)?;
for field in self.fields() {
field.dot_attributes(ctx, out)?;
}
writeln!(out, "</table></td></tr>")?;
}
Ok(())
}
}
impl IsOpaque for CompInfo {
type Extra = Option<Layout>;
fn is_opaque(&self, ctx: &BindgenContext, layout: &Option<Layout>) -> bool {
if self.has_non_type_template_params {
return true;
}
// When we do not have the layout for a bitfield's type (for example, it
// is a type parameter), then we can't compute bitfield units. We are
// left with no choice but to make the whole struct opaque, or else we
// might generate structs with incorrect sizes and alignments.
if let CompFields::ErrorComputingBitfieldUnits = self.fields {
return true;
}
// Bitfields with a width that is larger than their unit's width have
// some strange things going on, and the best we can do is make the
// whole struct opaque.
if self.fields().iter().any(|f| match *f {
Field::DataMember(_) => false,
Field::Bitfields(ref unit) => unit.bitfields().iter().any(|bf| {
let bitfield_layout = ctx
.resolve_type(bf.ty())
.layout(ctx)
.expect("Bitfield without layout? Gah!");
bf.width() / 8 > bitfield_layout.size as u32
}),
}) {
return true;
}
if !ctx.options().rust_features().repr_packed_n {
// If we don't have `#[repr(packed(N)]`, the best we can
// do is make this struct opaque.
//
// See https://github.com/rust-lang/rust-bindgen/issues/537 and
// https://github.com/rust-lang/rust/issues/33158
if self.is_packed(ctx, layout) &&
layout.map_or(false, |l| l.align > 1)
{
warn!("Found a type that is both packed and aligned to greater than \
1; Rust before version 1.33 doesn't have `#[repr(packed(N))]`, so we \
are treating it as opaque. You may wish to set bindgen's rust target \
version to 1.33 or later to enable `#[repr(packed(N))]` support.");
return true;
}
}
false
}
}
impl TemplateParameters for CompInfo {
fn self_template_params(&self, _ctx: &BindgenContext) -> Vec<TypeId> {
self.template_params.clone()
}
}
impl Trace for CompInfo {
type Extra = Item;
fn trace<T>(&self, context: &BindgenContext, tracer: &mut T, item: &Item)
where
T: Tracer,
{
for p in item.all_template_params(context) {
tracer.visit_kind(p.into(), EdgeKind::TemplateParameterDefinition);
}
for ty in self.inner_types() {
tracer.visit_kind(ty.into(), EdgeKind::InnerType);
}
for &var in self.inner_vars() {
tracer.visit_kind(var.into(), EdgeKind::InnerVar);
}
for method in self.methods() {
tracer.visit_kind(method.signature.into(), EdgeKind::Method);
}
if let Some((_kind, signature)) = self.destructor() {
tracer.visit_kind(signature.into(), EdgeKind::Destructor);
}
for ctor in self.constructors() {
tracer.visit_kind(ctor.into(), EdgeKind::Constructor);
}
// Base members and fields are not generated for opaque types (but all
// of the above things are) so stop here.
if item.is_opaque(context, &()) {
return;
}
for base in self.base_members() {
tracer.visit_kind(base.ty.into(), EdgeKind::BaseMember);
}
self.fields.trace(context, tracer, &());
}
}
Avoid needless `std::mem::replace`
In Rust 1.45.0, `std::mem::replace` gained the `#[must_use]` attribute,
causing a new diagnostic for some `bindgen` code :
error: unused return value of `std::mem::replace` that must be used
--> src/ir/comp.rs:751:17
|
751 | / mem::replace(
752 | | self,
753 | | CompFields::AfterComputingBitfieldUnits {
754 | | fields,
755 | | has_bitfield_units,
756 | | },
757 | | );
| |__________________^
|
= note: `-D unused-must-use` implied by `-D warnings`
= note: if you don't need the old value, you can just assign the new value directly
error: unused return value of `std::mem::replace` that must be used
--> src/ir/comp.rs:760:17
|
760 | mem::replace(self, CompFields::ErrorComputingBitfieldUnits);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: if you don't need the old value, you can just assign the new value directly
error: aborting due to 2 previous errors
//! Compound types (unions and structs) in our intermediate representation.
use super::analysis::Sizedness;
use super::annotations::Annotations;
use super::context::{BindgenContext, FunctionId, ItemId, TypeId, VarId};
use super::dot::DotAttributes;
use super::item::{IsOpaque, Item};
use super::layout::Layout;
use super::template::TemplateParameters;
use super::traversal::{EdgeKind, Trace, Tracer};
use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use crate::clang;
use crate::codegen::struct_layout::{align_to, bytes_from_bits_pow2};
use crate::ir::derive::CanDeriveCopy;
use crate::parse::{ClangItemParser, ParseError};
use crate::HashMap;
use peeking_take_while::PeekableExt;
use std::cmp;
use std::io;
use std::mem;
/// The kind of compound type.
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum CompKind {
/// A struct.
Struct,
/// A union.
Union,
}
/// The kind of C++ method.
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum MethodKind {
/// A constructor. We represent it as method for convenience, to avoid code
/// duplication.
Constructor,
/// A destructor.
Destructor,
/// A virtual destructor.
VirtualDestructor {
/// Whether it's pure virtual.
pure_virtual: bool,
},
/// A static method.
Static,
/// A normal method.
Normal,
/// A virtual method.
Virtual {
/// Whether it's pure virtual.
pure_virtual: bool,
},
}
impl MethodKind {
/// Is this a destructor method?
pub fn is_destructor(&self) -> bool {
match *self {
MethodKind::Destructor | MethodKind::VirtualDestructor { .. } => {
true
}
_ => false,
}
}
/// Is this a pure virtual method?
pub fn is_pure_virtual(&self) -> bool {
match *self {
MethodKind::Virtual { pure_virtual } |
MethodKind::VirtualDestructor { pure_virtual } => pure_virtual,
_ => false,
}
}
}
/// A struct representing a C++ method, either static, normal, or virtual.
#[derive(Debug)]
pub struct Method {
kind: MethodKind,
/// The signature of the method. Take into account this is not a `Type`
/// item, but a `Function` one.
///
/// This is tricky and probably this field should be renamed.
signature: FunctionId,
is_const: bool,
}
impl Method {
/// Construct a new `Method`.
pub fn new(
kind: MethodKind,
signature: FunctionId,
is_const: bool,
) -> Self {
Method {
kind,
signature,
is_const,
}
}
/// What kind of method is this?
pub fn kind(&self) -> MethodKind {
self.kind
}
/// Is this a constructor?
pub fn is_constructor(&self) -> bool {
self.kind == MethodKind::Constructor
}
/// Is this a virtual method?
pub fn is_virtual(&self) -> bool {
match self.kind {
MethodKind::Virtual { .. } |
MethodKind::VirtualDestructor { .. } => true,
_ => false,
}
}
/// Is this a static method?
pub fn is_static(&self) -> bool {
self.kind == MethodKind::Static
}
/// Get the id for the `Function` signature for this method.
pub fn signature(&self) -> FunctionId {
self.signature
}
/// Is this a const qualified method?
pub fn is_const(&self) -> bool {
self.is_const
}
}
/// Methods common to the various field types.
pub trait FieldMethods {
/// Get the name of this field.
fn name(&self) -> Option<&str>;
/// Get the type of this field.
fn ty(&self) -> TypeId;
/// Get the comment for this field.
fn comment(&self) -> Option<&str>;
/// If this is a bitfield, how many bits does it need?
fn bitfield_width(&self) -> Option<u32>;
/// Is this field marked as `mutable`?
fn is_mutable(&self) -> bool;
/// Get the annotations for this field.
fn annotations(&self) -> &Annotations;
/// The offset of the field (in bits)
fn offset(&self) -> Option<usize>;
}
/// A contiguous set of logical bitfields that live within the same physical
/// allocation unit. See 9.2.4 [class.bit] in the C++ standard and [section
/// 2.4.II.1 in the Itanium C++
/// ABI](http://itanium-cxx-abi.github.io/cxx-abi/abi.html#class-types).
#[derive(Debug)]
pub struct BitfieldUnit {
nth: usize,
layout: Layout,
bitfields: Vec<Bitfield>,
}
impl BitfieldUnit {
/// Get the 1-based index of this bitfield unit within its containing
/// struct. Useful for generating a Rust struct's field name for this unit
/// of bitfields.
pub fn nth(&self) -> usize {
self.nth
}
/// Get the layout within which these bitfields reside.
pub fn layout(&self) -> Layout {
self.layout
}
/// Get the bitfields within this unit.
pub fn bitfields(&self) -> &[Bitfield] {
&self.bitfields
}
}
/// A struct representing a C++ field.
#[derive(Debug)]
pub enum Field {
/// A normal data member.
DataMember(FieldData),
/// A physical allocation unit containing many logical bitfields.
Bitfields(BitfieldUnit),
}
impl Field {
/// Get this field's layout.
pub fn layout(&self, ctx: &BindgenContext) -> Option<Layout> {
match *self {
Field::Bitfields(BitfieldUnit { layout, .. }) => Some(layout),
Field::DataMember(ref data) => {
ctx.resolve_type(data.ty).layout(ctx)
}
}
}
}
impl Trace for Field {
type Extra = ();
fn trace<T>(&self, _: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
match *self {
Field::DataMember(ref data) => {
tracer.visit_kind(data.ty.into(), EdgeKind::Field);
}
Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => {
for bf in bitfields {
tracer.visit_kind(bf.ty().into(), EdgeKind::Field);
}
}
}
}
}
impl DotAttributes for Field {
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
match *self {
Field::DataMember(ref data) => data.dot_attributes(ctx, out),
Field::Bitfields(BitfieldUnit {
layout,
ref bitfields,
..
}) => {
writeln!(
out,
r#"<tr>
<td>bitfield unit</td>
<td>
<table border="0">
<tr>
<td>unit.size</td><td>{}</td>
</tr>
<tr>
<td>unit.align</td><td>{}</td>
</tr>
"#,
layout.size, layout.align
)?;
for bf in bitfields {
bf.dot_attributes(ctx, out)?;
}
writeln!(out, "</table></td></tr>")
}
}
}
}
impl DotAttributes for FieldData {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(
out,
"<tr><td>{}</td><td>{:?}</td></tr>",
self.name().unwrap_or("(anonymous)"),
self.ty()
)
}
}
impl DotAttributes for Bitfield {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(
out,
"<tr><td>{} : {}</td><td>{:?}</td></tr>",
self.name().unwrap_or("(anonymous)"),
self.width(),
self.ty()
)
}
}
/// A logical bitfield within some physical bitfield allocation unit.
#[derive(Debug)]
pub struct Bitfield {
/// Index of the bit within this bitfield's allocation unit where this
/// bitfield's bits begin.
offset_into_unit: usize,
/// The field data for this bitfield.
data: FieldData,
/// Name of the generated Rust getter for this bitfield.
///
/// Should be assigned before codegen.
getter_name: Option<String>,
/// Name of the generated Rust setter for this bitfield.
///
/// Should be assigned before codegen.
setter_name: Option<String>,
}
impl Bitfield {
/// Construct a new bitfield.
fn new(offset_into_unit: usize, raw: RawField) -> Bitfield {
assert!(raw.bitfield_width().is_some());
Bitfield {
offset_into_unit,
data: raw.0,
getter_name: None,
setter_name: None,
}
}
/// Get the index of the bit within this bitfield's allocation unit where
/// this bitfield begins.
pub fn offset_into_unit(&self) -> usize {
self.offset_into_unit
}
/// Get the mask value that when &'ed with this bitfield's allocation unit
/// produces this bitfield's value.
pub fn mask(&self) -> u64 {
use std::u64;
let unoffseted_mask =
if self.width() as u64 == mem::size_of::<u64>() as u64 * 8 {
u64::MAX
} else {
(1u64 << self.width()) - 1u64
};
unoffseted_mask << self.offset_into_unit()
}
/// Get the bit width of this bitfield.
pub fn width(&self) -> u32 {
self.data.bitfield_width().unwrap()
}
/// Name of the generated Rust getter for this bitfield.
///
/// Panics if called before assigning bitfield accessor names or if
/// this bitfield have no name.
pub fn getter_name(&self) -> &str {
assert!(
self.name().is_some(),
"`Bitfield::getter_name` called on anonymous field"
);
self.getter_name.as_ref().expect(
"`Bitfield::getter_name` should only be called after\
assigning bitfield accessor names",
)
}
/// Name of the generated Rust setter for this bitfield.
///
/// Panics if called before assigning bitfield accessor names or if
/// this bitfield have no name.
pub fn setter_name(&self) -> &str {
assert!(
self.name().is_some(),
"`Bitfield::setter_name` called on anonymous field"
);
self.setter_name.as_ref().expect(
"`Bitfield::setter_name` should only be called\
after assigning bitfield accessor names",
)
}
}
impl FieldMethods for Bitfield {
fn name(&self) -> Option<&str> {
self.data.name()
}
fn ty(&self) -> TypeId {
self.data.ty()
}
fn comment(&self) -> Option<&str> {
self.data.comment()
}
fn bitfield_width(&self) -> Option<u32> {
self.data.bitfield_width()
}
fn is_mutable(&self) -> bool {
self.data.is_mutable()
}
fn annotations(&self) -> &Annotations {
self.data.annotations()
}
fn offset(&self) -> Option<usize> {
self.data.offset()
}
}
/// A raw field might be either of a plain data member or a bitfield within a
/// bitfield allocation unit, but we haven't processed it and determined which
/// yet (which would involve allocating it into a bitfield unit if it is a
/// bitfield).
#[derive(Debug)]
struct RawField(FieldData);
impl RawField {
/// Construct a new `RawField`.
fn new(
name: Option<String>,
ty: TypeId,
comment: Option<String>,
annotations: Option<Annotations>,
bitfield_width: Option<u32>,
mutable: bool,
offset: Option<usize>,
) -> RawField {
RawField(FieldData {
name,
ty,
comment,
annotations: annotations.unwrap_or_default(),
bitfield_width,
mutable,
offset,
})
}
}
impl FieldMethods for RawField {
fn name(&self) -> Option<&str> {
self.0.name()
}
fn ty(&self) -> TypeId {
self.0.ty()
}
fn comment(&self) -> Option<&str> {
self.0.comment()
}
fn bitfield_width(&self) -> Option<u32> {
self.0.bitfield_width()
}
fn is_mutable(&self) -> bool {
self.0.is_mutable()
}
fn annotations(&self) -> &Annotations {
self.0.annotations()
}
fn offset(&self) -> Option<usize> {
self.0.offset()
}
}
/// Convert the given ordered set of raw fields into a list of either plain data
/// members, and/or bitfield units containing multiple bitfields.
///
/// If we do not have the layout for a bitfield's type, then we can't reliably
/// compute its allocation unit. In such cases, we return an error.
fn raw_fields_to_fields_and_bitfield_units<I>(
ctx: &BindgenContext,
raw_fields: I,
packed: bool,
) -> Result<(Vec<Field>, bool), ()>
where
I: IntoIterator<Item = RawField>,
{
let mut raw_fields = raw_fields.into_iter().fuse().peekable();
let mut fields = vec![];
let mut bitfield_unit_count = 0;
loop {
// While we have plain old data members, just keep adding them to our
// resulting fields. We introduce a scope here so that we can use
// `raw_fields` again after the `by_ref` iterator adaptor is dropped.
{
let non_bitfields = raw_fields
.by_ref()
.peeking_take_while(|f| f.bitfield_width().is_none())
.map(|f| Field::DataMember(f.0));
fields.extend(non_bitfields);
}
// Now gather all the consecutive bitfields. Only consecutive bitfields
// may potentially share a bitfield allocation unit with each other in
// the Itanium C++ ABI.
let mut bitfields = raw_fields
.by_ref()
.peeking_take_while(|f| f.bitfield_width().is_some())
.peekable();
if bitfields.peek().is_none() {
break;
}
bitfields_to_allocation_units(
ctx,
&mut bitfield_unit_count,
&mut fields,
bitfields,
packed,
)?;
}
assert!(
raw_fields.next().is_none(),
"The above loop should consume all items in `raw_fields`"
);
Ok((fields, bitfield_unit_count != 0))
}
/// Given a set of contiguous raw bitfields, group and allocate them into
/// (potentially multiple) bitfield units.
fn bitfields_to_allocation_units<E, I>(
ctx: &BindgenContext,
bitfield_unit_count: &mut usize,
fields: &mut E,
raw_bitfields: I,
packed: bool,
) -> Result<(), ()>
where
E: Extend<Field>,
I: IntoIterator<Item = RawField>,
{
assert!(ctx.collected_typerefs());
// NOTE: What follows is reverse-engineered from LLVM's
// lib/AST/RecordLayoutBuilder.cpp
//
// FIXME(emilio): There are some differences between Microsoft and the
// Itanium ABI, but we'll ignore those and stick to Itanium for now.
//
// Also, we need to handle packed bitfields and stuff.
//
// TODO(emilio): Take into account C++'s wide bitfields, and
// packing, sigh.
fn flush_allocation_unit<E>(
fields: &mut E,
bitfield_unit_count: &mut usize,
unit_size_in_bits: usize,
unit_align_in_bits: usize,
bitfields: Vec<Bitfield>,
packed: bool,
) where
E: Extend<Field>,
{
*bitfield_unit_count += 1;
let align = if packed {
1
} else {
bytes_from_bits_pow2(unit_align_in_bits)
};
let size = align_to(unit_size_in_bits, align * 8) / 8;
let layout = Layout::new(size, align);
fields.extend(Some(Field::Bitfields(BitfieldUnit {
nth: *bitfield_unit_count,
layout,
bitfields,
})));
}
let mut max_align = 0;
let mut unfilled_bits_in_unit = 0;
let mut unit_size_in_bits = 0;
let mut unit_align = 0;
let mut bitfields_in_unit = vec![];
// TODO(emilio): Determine this from attributes or pragma ms_struct
// directives. Also, perhaps we should check if the target is MSVC?
const is_ms_struct: bool = false;
for bitfield in raw_bitfields {
let bitfield_width = bitfield.bitfield_width().unwrap() as usize;
let bitfield_layout =
ctx.resolve_type(bitfield.ty()).layout(ctx).ok_or(())?;
let bitfield_size = bitfield_layout.size;
let bitfield_align = bitfield_layout.align;
let mut offset = unit_size_in_bits;
if !packed {
if is_ms_struct {
if unit_size_in_bits != 0 &&
(bitfield_width == 0 ||
bitfield_width > unfilled_bits_in_unit)
{
// We've reached the end of this allocation unit, so flush it
// and its bitfields.
unit_size_in_bits =
align_to(unit_size_in_bits, unit_align * 8);
flush_allocation_unit(
fields,
bitfield_unit_count,
unit_size_in_bits,
unit_align,
mem::replace(&mut bitfields_in_unit, vec![]),
packed,
);
// Now we're working on a fresh bitfield allocation unit, so reset
// the current unit size and alignment.
offset = 0;
unit_align = 0;
}
} else {
if offset != 0 &&
(bitfield_width == 0 ||
(offset & (bitfield_align * 8 - 1)) +
bitfield_width >
bitfield_size * 8)
{
offset = align_to(offset, bitfield_align * 8);
}
}
}
// According to the x86[-64] ABI spec: "Unnamed bit-fields’ types do not
// affect the alignment of a structure or union". This makes sense: such
// bit-fields are only used for padding, and we can't perform an
// un-aligned read of something we can't read because we can't even name
// it.
if bitfield.name().is_some() {
max_align = cmp::max(max_align, bitfield_align);
// NB: The `bitfield_width` here is completely, absolutely
// intentional. Alignment of the allocation unit is based on the
// maximum bitfield width, not (directly) on the bitfields' types'
// alignment.
unit_align = cmp::max(unit_align, bitfield_width);
}
// Always keep all bitfields around. While unnamed bitifields are used
// for padding (and usually not needed hereafter), large unnamed
// bitfields over their types size cause weird allocation size behavior from clang.
// Therefore, all bitfields needed to be kept around in order to check for this
// and make the struct opaque in this case
bitfields_in_unit.push(Bitfield::new(offset, bitfield));
unit_size_in_bits = offset + bitfield_width;
// Compute what the physical unit's final size would be given what we
// have seen so far, and use that to compute how many bits are still
// available in the unit.
let data_size = align_to(unit_size_in_bits, bitfield_align * 8);
unfilled_bits_in_unit = data_size - unit_size_in_bits;
}
if unit_size_in_bits != 0 {
// Flush the last allocation unit and its bitfields.
flush_allocation_unit(
fields,
bitfield_unit_count,
unit_size_in_bits,
unit_align,
bitfields_in_unit,
packed,
);
}
Ok(())
}
/// A compound structure's fields are initially raw, and have bitfields that
/// have not been grouped into allocation units. During this time, the fields
/// are mutable and we build them up during parsing.
///
/// Then, once resolving typerefs is completed, we compute all structs' fields'
/// bitfield allocation units, and they remain frozen and immutable forever
/// after.
#[derive(Debug)]
enum CompFields {
BeforeComputingBitfieldUnits(Vec<RawField>),
AfterComputingBitfieldUnits {
fields: Vec<Field>,
has_bitfield_units: bool,
},
ErrorComputingBitfieldUnits,
}
impl Default for CompFields {
fn default() -> CompFields {
CompFields::BeforeComputingBitfieldUnits(vec![])
}
}
impl CompFields {
fn append_raw_field(&mut self, raw: RawField) {
match *self {
CompFields::BeforeComputingBitfieldUnits(ref mut raws) => {
raws.push(raw);
}
_ => {
panic!(
"Must not append new fields after computing bitfield allocation units"
);
}
}
}
fn compute_bitfield_units(&mut self, ctx: &BindgenContext, packed: bool) {
let raws = match *self {
CompFields::BeforeComputingBitfieldUnits(ref mut raws) => {
mem::replace(raws, vec![])
}
_ => {
panic!("Already computed bitfield units");
}
};
let result = raw_fields_to_fields_and_bitfield_units(ctx, raws, packed);
match result {
Ok((fields, has_bitfield_units)) => {
*self = CompFields::AfterComputingBitfieldUnits {
fields,
has_bitfield_units,
};
}
Err(()) => {
*self = CompFields::ErrorComputingBitfieldUnits;
}
}
}
fn deanonymize_fields(&mut self, ctx: &BindgenContext, methods: &[Method]) {
let fields = match *self {
CompFields::AfterComputingBitfieldUnits {
ref mut fields, ..
} => fields,
// Nothing to do here.
CompFields::ErrorComputingBitfieldUnits => return,
CompFields::BeforeComputingBitfieldUnits(_) => {
panic!("Not yet computed bitfield units.");
}
};
fn has_method(
methods: &[Method],
ctx: &BindgenContext,
name: &str,
) -> bool {
methods.iter().any(|method| {
let method_name = ctx.resolve_func(method.signature()).name();
method_name == name || ctx.rust_mangle(&method_name) == name
})
}
struct AccessorNamesPair {
getter: String,
setter: String,
}
let mut accessor_names: HashMap<String, AccessorNamesPair> = fields
.iter()
.flat_map(|field| match *field {
Field::Bitfields(ref bu) => &*bu.bitfields,
Field::DataMember(_) => &[],
})
.filter_map(|bitfield| bitfield.name())
.map(|bitfield_name| {
let bitfield_name = bitfield_name.to_string();
let getter = {
let mut getter =
ctx.rust_mangle(&bitfield_name).to_string();
if has_method(methods, ctx, &getter) {
getter.push_str("_bindgen_bitfield");
}
getter
};
let setter = {
let setter = format!("set_{}", bitfield_name);
let mut setter = ctx.rust_mangle(&setter).to_string();
if has_method(methods, ctx, &setter) {
setter.push_str("_bindgen_bitfield");
}
setter
};
(bitfield_name, AccessorNamesPair { getter, setter })
})
.collect();
let mut anon_field_counter = 0;
for field in fields.iter_mut() {
match *field {
Field::DataMember(FieldData { ref mut name, .. }) => {
if let Some(_) = *name {
continue;
}
anon_field_counter += 1;
let generated_name =
format!("__bindgen_anon_{}", anon_field_counter);
*name = Some(generated_name);
}
Field::Bitfields(ref mut bu) => {
for bitfield in &mut bu.bitfields {
if bitfield.name().is_none() {
continue;
}
if let Some(AccessorNamesPair { getter, setter }) =
accessor_names.remove(bitfield.name().unwrap())
{
bitfield.getter_name = Some(getter);
bitfield.setter_name = Some(setter);
}
}
}
}
}
}
}
impl Trace for CompFields {
type Extra = ();
fn trace<T>(&self, context: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
match *self {
CompFields::ErrorComputingBitfieldUnits => {}
CompFields::BeforeComputingBitfieldUnits(ref fields) => {
for f in fields {
tracer.visit_kind(f.ty().into(), EdgeKind::Field);
}
}
CompFields::AfterComputingBitfieldUnits { ref fields, .. } => {
for f in fields {
f.trace(context, tracer, &());
}
}
}
}
}
/// Common data shared across different field types.
#[derive(Clone, Debug)]
pub struct FieldData {
/// The name of the field, empty if it's an unnamed bitfield width.
name: Option<String>,
/// The inner type.
ty: TypeId,
/// The doc comment on the field if any.
comment: Option<String>,
/// Annotations for this field, or the default.
annotations: Annotations,
/// If this field is a bitfield, and how many bits does it contain if it is.
bitfield_width: Option<u32>,
/// If the C++ field is marked as `mutable`
mutable: bool,
/// The offset of the field (in bits)
offset: Option<usize>,
}
impl FieldMethods for FieldData {
fn name(&self) -> Option<&str> {
self.name.as_ref().map(|n| &**n)
}
fn ty(&self) -> TypeId {
self.ty
}
fn comment(&self) -> Option<&str> {
self.comment.as_ref().map(|c| &**c)
}
fn bitfield_width(&self) -> Option<u32> {
self.bitfield_width
}
fn is_mutable(&self) -> bool {
self.mutable
}
fn annotations(&self) -> &Annotations {
&self.annotations
}
fn offset(&self) -> Option<usize> {
self.offset
}
}
/// The kind of inheritance a base class is using.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BaseKind {
/// Normal inheritance, like:
///
/// ```cpp
/// class A : public B {};
/// ```
Normal,
/// Virtual inheritance, like:
///
/// ```cpp
/// class A: public virtual B {};
/// ```
Virtual,
}
/// A base class.
#[derive(Clone, Debug)]
pub struct Base {
/// The type of this base class.
pub ty: TypeId,
/// The kind of inheritance we're doing.
pub kind: BaseKind,
/// Name of the field in which this base should be stored.
pub field_name: String,
}
impl Base {
/// Whether this base class is inheriting virtually.
pub fn is_virtual(&self) -> bool {
self.kind == BaseKind::Virtual
}
/// Whether this base class should have it's own field for storage.
pub fn requires_storage(&self, ctx: &BindgenContext) -> bool {
// Virtual bases are already taken into account by the vtable
// pointer.
//
// FIXME(emilio): Is this always right?
if self.is_virtual() {
return false;
}
// NB: We won't include zero-sized types in our base chain because they
// would contribute to our size given the dummy field we insert for
// zero-sized types.
if self.ty.is_zero_sized(ctx) {
return false;
}
true
}
}
/// A compound type.
///
/// Either a struct or union, a compound type is built up from the combination
/// of fields which also are associated with their own (potentially compound)
/// type.
#[derive(Debug)]
pub struct CompInfo {
/// Whether this is a struct or a union.
kind: CompKind,
/// The members of this struct or union.
fields: CompFields,
/// The abstract template parameters of this class. Note that these are NOT
/// concrete template arguments, and should always be a
/// `Type(TypeKind::TypeParam(name))`. For concrete template arguments, see
/// `TypeKind::TemplateInstantiation`.
template_params: Vec<TypeId>,
/// The method declarations inside this class, if in C++ mode.
methods: Vec<Method>,
/// The different constructors this struct or class contains.
constructors: Vec<FunctionId>,
/// The destructor of this type. The bool represents whether this destructor
/// is virtual.
destructor: Option<(MethodKind, FunctionId)>,
/// Vector of classes this one inherits from.
base_members: Vec<Base>,
/// The inner types that were declared inside this class, in something like:
///
/// class Foo {
/// typedef int FooTy;
/// struct Bar {
/// int baz;
/// };
/// }
///
/// static Foo::Bar const = {3};
inner_types: Vec<TypeId>,
/// Set of static constants declared inside this class.
inner_vars: Vec<VarId>,
/// Whether this type should generate an vtable (TODO: Should be able to
/// look at the virtual methods and ditch this field).
has_own_virtual_method: bool,
/// Whether this type has destructor.
has_destructor: bool,
/// Whether this type has a base type with more than one member.
///
/// TODO: We should be able to compute this.
has_nonempty_base: bool,
/// If this type has a template parameter which is not a type (e.g.: a
/// size_t)
has_non_type_template_params: bool,
/// Whether we saw `__attribute__((packed))` on or within this type.
packed_attr: bool,
/// Used to know if we've found an opaque attribute that could cause us to
/// generate a type with invalid layout. This is explicitly used to avoid us
/// generating bad alignments when parsing types like max_align_t.
///
/// It's not clear what the behavior should be here, if generating the item
/// and pray, or behave as an opaque type.
found_unknown_attr: bool,
/// Used to indicate when a struct has been forward declared. Usually used
/// in headers so that APIs can't modify them directly.
is_forward_declaration: bool,
}
impl CompInfo {
/// Construct a new compound type.
pub fn new(kind: CompKind) -> Self {
CompInfo {
kind,
fields: CompFields::default(),
template_params: vec![],
methods: vec![],
constructors: vec![],
destructor: None,
base_members: vec![],
inner_types: vec![],
inner_vars: vec![],
has_own_virtual_method: false,
has_destructor: false,
has_nonempty_base: false,
has_non_type_template_params: false,
packed_attr: false,
found_unknown_attr: false,
is_forward_declaration: false,
}
}
/// Compute the layout of this type.
///
/// This is called as a fallback under some circumstances where LLVM doesn't
/// give us the correct layout.
///
/// If we're a union without known layout, we try to compute it from our
/// members. This is not ideal, but clang fails to report the size for these
/// kind of unions, see test/headers/template_union.hpp
pub fn layout(&self, ctx: &BindgenContext) -> Option<Layout> {
// We can't do better than clang here, sorry.
if self.kind == CompKind::Struct {
return None;
}
// By definition, we don't have the right layout information here if
// we're a forward declaration.
if self.is_forward_declaration() {
return None;
}
// empty union case
if self.fields().is_empty() {
return None;
}
let mut max_size = 0;
// Don't allow align(0)
let mut max_align = 1;
for field in self.fields() {
let field_layout = field.layout(ctx);
if let Some(layout) = field_layout {
max_size = cmp::max(max_size, layout.size);
max_align = cmp::max(max_align, layout.align);
}
}
Some(Layout::new(max_size, max_align))
}
/// Get this type's set of fields.
pub fn fields(&self) -> &[Field] {
match self.fields {
CompFields::ErrorComputingBitfieldUnits => &[],
CompFields::AfterComputingBitfieldUnits { ref fields, .. } => {
fields
}
CompFields::BeforeComputingBitfieldUnits(_) => {
panic!("Should always have computed bitfield units first");
}
}
}
fn has_bitfields(&self) -> bool {
match self.fields {
CompFields::ErrorComputingBitfieldUnits => false,
CompFields::AfterComputingBitfieldUnits {
has_bitfield_units,
..
} => has_bitfield_units,
CompFields::BeforeComputingBitfieldUnits(_) => {
panic!("Should always have computed bitfield units first");
}
}
}
/// Returns whether we have a too large bitfield unit, in which case we may
/// not be able to derive some of the things we should be able to normally
/// derive.
pub fn has_too_large_bitfield_unit(&self) -> bool {
if !self.has_bitfields() {
return false;
}
self.fields().iter().any(|field| match *field {
Field::DataMember(..) => false,
Field::Bitfields(ref unit) => {
unit.layout.size > RUST_DERIVE_IN_ARRAY_LIMIT
}
})
}
/// Does this type have any template parameters that aren't types
/// (e.g. int)?
pub fn has_non_type_template_params(&self) -> bool {
self.has_non_type_template_params
}
/// Do we see a virtual function during parsing?
/// Get the has_own_virtual_method boolean.
pub fn has_own_virtual_method(&self) -> bool {
self.has_own_virtual_method
}
/// Did we see a destructor when parsing this type?
pub fn has_own_destructor(&self) -> bool {
self.has_destructor
}
/// Get this type's set of methods.
pub fn methods(&self) -> &[Method] {
&self.methods
}
/// Get this type's set of constructors.
pub fn constructors(&self) -> &[FunctionId] {
&self.constructors
}
/// Get this type's destructor.
pub fn destructor(&self) -> Option<(MethodKind, FunctionId)> {
self.destructor
}
/// What kind of compound type is this?
pub fn kind(&self) -> CompKind {
self.kind
}
/// Is this a union?
pub fn is_union(&self) -> bool {
self.kind() == CompKind::Union
}
/// The set of types that this one inherits from.
pub fn base_members(&self) -> &[Base] {
&self.base_members
}
/// Construct a new compound type from a Clang type.
pub fn from_ty(
potential_id: ItemId,
ty: &clang::Type,
location: Option<clang::Cursor>,
ctx: &mut BindgenContext,
) -> Result<Self, ParseError> {
use clang_sys::*;
assert!(
ty.template_args().is_none(),
"We handle template instantiations elsewhere"
);
let mut cursor = ty.declaration();
let mut kind = Self::kind_from_cursor(&cursor);
if kind.is_err() {
if let Some(location) = location {
kind = Self::kind_from_cursor(&location);
cursor = location;
}
}
let kind = kind?;
debug!("CompInfo::from_ty({:?}, {:?})", kind, cursor);
let mut ci = CompInfo::new(kind);
ci.is_forward_declaration =
location.map_or(true, |cur| match cur.kind() {
CXCursor_StructDecl | CXCursor_UnionDecl |
CXCursor_ClassDecl => !cur.is_definition(),
_ => false,
});
let mut maybe_anonymous_struct_field = None;
cursor.visit(|cur| {
if cur.kind() != CXCursor_FieldDecl {
if let Some((ty, clang_ty, offset)) =
maybe_anonymous_struct_field.take()
{
if cur.kind() == CXCursor_TypedefDecl &&
cur.typedef_type().unwrap().canonical_type() ==
clang_ty
{
// Typedefs of anonymous structs appear later in the ast
// than the struct itself, that would otherwise be an
// anonymous field. Detect that case here, and do
// nothing.
} else {
let field = RawField::new(
None, ty, None, None, None, false, offset,
);
ci.fields.append_raw_field(field);
}
}
}
match cur.kind() {
CXCursor_FieldDecl => {
if let Some((ty, clang_ty, offset)) =
maybe_anonymous_struct_field.take()
{
let mut used = false;
cur.visit(|child| {
if child.cur_type() == clang_ty {
used = true;
}
CXChildVisit_Continue
});
if !used {
let field = RawField::new(
None, ty, None, None, None, false, offset,
);
ci.fields.append_raw_field(field);
}
}
let bit_width = cur.bit_width();
let field_type = Item::from_ty_or_ref(
cur.cur_type(),
cur,
Some(potential_id),
ctx,
);
let comment = cur.raw_comment();
let annotations = Annotations::new(&cur);
let name = cur.spelling();
let is_mutable = cursor.is_mutable_field();
let offset = cur.offset_of_field().ok();
// Name can be empty if there are bitfields, for example,
// see tests/headers/struct_with_bitfields.h
assert!(
!name.is_empty() || bit_width.is_some(),
"Empty field name?"
);
let name = if name.is_empty() { None } else { Some(name) };
let field = RawField::new(
name,
field_type,
comment,
annotations,
bit_width,
is_mutable,
offset,
);
ci.fields.append_raw_field(field);
// No we look for things like attributes and stuff.
cur.visit(|cur| {
if cur.kind() == CXCursor_UnexposedAttr {
ci.found_unknown_attr = true;
}
CXChildVisit_Continue
});
}
CXCursor_UnexposedAttr => {
ci.found_unknown_attr = true;
}
CXCursor_EnumDecl |
CXCursor_TypeAliasDecl |
CXCursor_TypeAliasTemplateDecl |
CXCursor_TypedefDecl |
CXCursor_StructDecl |
CXCursor_UnionDecl |
CXCursor_ClassTemplate |
CXCursor_ClassDecl => {
// We can find non-semantic children here, clang uses a
// StructDecl to note incomplete structs that haven't been
// forward-declared before, see [1].
//
// Also, clang seems to scope struct definitions inside
// unions, and other named struct definitions inside other
// structs to the whole translation unit.
//
// Let's just assume that if the cursor we've found is a
// definition, it's a valid inner type.
//
// [1]: https://github.com/rust-lang/rust-bindgen/issues/482
let is_inner_struct =
cur.semantic_parent() == cursor || cur.is_definition();
if !is_inner_struct {
return CXChildVisit_Continue;
}
// Even if this is a definition, we may not be the semantic
// parent, see #1281.
let inner = Item::parse(cur, Some(potential_id), ctx)
.expect("Inner ClassDecl");
let inner = inner.expect_type_id(ctx);
ci.inner_types.push(inner);
// A declaration of an union or a struct without name could
// also be an unnamed field, unfortunately.
if cur.spelling().is_empty() &&
cur.kind() != CXCursor_EnumDecl
{
let ty = cur.cur_type();
let offset = cur.offset_of_field().ok();
maybe_anonymous_struct_field =
Some((inner, ty, offset));
}
}
CXCursor_PackedAttr => {
ci.packed_attr = true;
}
CXCursor_TemplateTypeParameter => {
let param = Item::type_param(None, cur, ctx).expect(
"Item::type_param should't fail when pointing \
at a TemplateTypeParameter",
);
ci.template_params.push(param);
}
CXCursor_CXXBaseSpecifier => {
let is_virtual_base = cur.is_virtual_base();
ci.has_own_virtual_method |= is_virtual_base;
let kind = if is_virtual_base {
BaseKind::Virtual
} else {
BaseKind::Normal
};
let field_name = match ci.base_members.len() {
0 => "_base".into(),
n => format!("_base_{}", n),
};
let type_id =
Item::from_ty_or_ref(cur.cur_type(), cur, None, ctx);
ci.base_members.push(Base {
ty: type_id,
kind,
field_name,
});
}
CXCursor_Constructor | CXCursor_Destructor |
CXCursor_CXXMethod => {
let is_virtual = cur.method_is_virtual();
let is_static = cur.method_is_static();
debug_assert!(!(is_static && is_virtual), "How?");
ci.has_destructor |= cur.kind() == CXCursor_Destructor;
ci.has_own_virtual_method |= is_virtual;
// This used to not be here, but then I tried generating
// stylo bindings with this (without path filters), and
// cried a lot with a method in gfx/Point.h
// (ToUnknownPoint), that somehow was causing the same type
// to be inserted in the map two times.
//
// I couldn't make a reduced test case, but anyway...
// Methods of template functions not only used to be inlined,
// but also instantiated, and we wouldn't be able to call
// them, so just bail out.
if !ci.template_params.is_empty() {
return CXChildVisit_Continue;
}
// NB: This gets us an owned `Function`, not a
// `FunctionSig`.
let signature =
match Item::parse(cur, Some(potential_id), ctx) {
Ok(item)
if ctx
.resolve_item(item)
.kind()
.is_function() =>
{
item
}
_ => return CXChildVisit_Continue,
};
let signature = signature.expect_function_id(ctx);
match cur.kind() {
CXCursor_Constructor => {
ci.constructors.push(signature);
}
CXCursor_Destructor => {
let kind = if is_virtual {
MethodKind::VirtualDestructor {
pure_virtual: cur.method_is_pure_virtual(),
}
} else {
MethodKind::Destructor
};
ci.destructor = Some((kind, signature));
}
CXCursor_CXXMethod => {
let is_const = cur.method_is_const();
let method_kind = if is_static {
MethodKind::Static
} else if is_virtual {
MethodKind::Virtual {
pure_virtual: cur.method_is_pure_virtual(),
}
} else {
MethodKind::Normal
};
let method =
Method::new(method_kind, signature, is_const);
ci.methods.push(method);
}
_ => unreachable!("How can we see this here?"),
}
}
CXCursor_NonTypeTemplateParameter => {
ci.has_non_type_template_params = true;
}
CXCursor_VarDecl => {
let linkage = cur.linkage();
if linkage != CXLinkage_External &&
linkage != CXLinkage_UniqueExternal
{
return CXChildVisit_Continue;
}
let visibility = cur.visibility();
if visibility != CXVisibility_Default {
return CXChildVisit_Continue;
}
if let Ok(item) = Item::parse(cur, Some(potential_id), ctx)
{
ci.inner_vars.push(item.as_var_id_unchecked());
}
}
// Intentionally not handled
CXCursor_CXXAccessSpecifier |
CXCursor_CXXFinalAttr |
CXCursor_FunctionTemplate |
CXCursor_ConversionFunction => {}
_ => {
warn!(
"unhandled comp member `{}` (kind {:?}) in `{}` ({})",
cur.spelling(),
clang::kind_to_str(cur.kind()),
cursor.spelling(),
cur.location()
);
}
}
CXChildVisit_Continue
});
if let Some((ty, _, offset)) = maybe_anonymous_struct_field {
let field =
RawField::new(None, ty, None, None, None, false, offset);
ci.fields.append_raw_field(field);
}
Ok(ci)
}
fn kind_from_cursor(
cursor: &clang::Cursor,
) -> Result<CompKind, ParseError> {
use clang_sys::*;
Ok(match cursor.kind() {
CXCursor_UnionDecl => CompKind::Union,
CXCursor_ClassDecl | CXCursor_StructDecl => CompKind::Struct,
CXCursor_CXXBaseSpecifier |
CXCursor_ClassTemplatePartialSpecialization |
CXCursor_ClassTemplate => match cursor.template_kind() {
CXCursor_UnionDecl => CompKind::Union,
_ => CompKind::Struct,
},
_ => {
warn!("Unknown kind for comp type: {:?}", cursor);
return Err(ParseError::Continue);
}
})
}
/// Get the set of types that were declared within this compound type
/// (e.g. nested class definitions).
pub fn inner_types(&self) -> &[TypeId] {
&self.inner_types
}
/// Get the set of static variables declared within this compound type.
pub fn inner_vars(&self) -> &[VarId] {
&self.inner_vars
}
/// Have we found a field with an opaque type that could potentially mess up
/// the layout of this compound type?
pub fn found_unknown_attr(&self) -> bool {
self.found_unknown_attr
}
/// Is this compound type packed?
pub fn is_packed(
&self,
ctx: &BindgenContext,
layout: &Option<Layout>,
) -> bool {
if self.packed_attr {
return true;
}
// Even though `libclang` doesn't expose `#pragma packed(...)`, we can
// detect it through its effects.
if let Some(ref parent_layout) = *layout {
if self.fields().iter().any(|f| match *f {
Field::Bitfields(ref unit) => {
unit.layout().align > parent_layout.align
}
Field::DataMember(ref data) => {
let field_ty = ctx.resolve_type(data.ty());
field_ty.layout(ctx).map_or(false, |field_ty_layout| {
field_ty_layout.align > parent_layout.align
})
}
}) {
info!("Found a struct that was defined within `#pragma packed(...)`");
return true;
} else if self.has_own_virtual_method {
if parent_layout.align == 1 {
return true;
}
}
}
false
}
/// Returns true if compound type has been forward declared
pub fn is_forward_declaration(&self) -> bool {
self.is_forward_declaration
}
/// Compute this compound structure's bitfield allocation units.
pub fn compute_bitfield_units(&mut self, ctx: &BindgenContext) {
// TODO(emilio): If we could detect #pragma packed here we'd fix layout
// tests in divide-by-zero-in-struct-layout.rs
self.fields.compute_bitfield_units(ctx, self.packed_attr)
}
/// Assign for each anonymous field a generated name.
pub fn deanonymize_fields(&mut self, ctx: &BindgenContext) {
self.fields.deanonymize_fields(ctx, &self.methods);
}
/// Returns whether the current union can be represented as a Rust `union`
///
/// Requirements:
/// 1. Current RustTarget allows for `untagged_union`
/// 2. Each field can derive `Copy`
pub fn can_be_rust_union(&self, ctx: &BindgenContext) -> bool {
if !ctx.options().rust_features().untagged_union {
return false;
}
if self.is_forward_declaration() {
return false;
}
self.fields().iter().all(|f| match *f {
Field::DataMember(ref field_data) => {
field_data.ty().can_derive_copy(ctx)
}
Field::Bitfields(_) => true,
})
}
}
impl DotAttributes for CompInfo {
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(out, "<tr><td>CompKind</td><td>{:?}</td></tr>", self.kind)?;
if self.has_own_virtual_method {
writeln!(out, "<tr><td>has_vtable</td><td>true</td></tr>")?;
}
if self.has_destructor {
writeln!(out, "<tr><td>has_destructor</td><td>true</td></tr>")?;
}
if self.has_nonempty_base {
writeln!(out, "<tr><td>has_nonempty_base</td><td>true</td></tr>")?;
}
if self.has_non_type_template_params {
writeln!(
out,
"<tr><td>has_non_type_template_params</td><td>true</td></tr>"
)?;
}
if self.packed_attr {
writeln!(out, "<tr><td>packed_attr</td><td>true</td></tr>")?;
}
if self.is_forward_declaration {
writeln!(
out,
"<tr><td>is_forward_declaration</td><td>true</td></tr>"
)?;
}
if !self.fields().is_empty() {
writeln!(out, r#"<tr><td>fields</td><td><table border="0">"#)?;
for field in self.fields() {
field.dot_attributes(ctx, out)?;
}
writeln!(out, "</table></td></tr>")?;
}
Ok(())
}
}
impl IsOpaque for CompInfo {
type Extra = Option<Layout>;
fn is_opaque(&self, ctx: &BindgenContext, layout: &Option<Layout>) -> bool {
if self.has_non_type_template_params {
return true;
}
// When we do not have the layout for a bitfield's type (for example, it
// is a type parameter), then we can't compute bitfield units. We are
// left with no choice but to make the whole struct opaque, or else we
// might generate structs with incorrect sizes and alignments.
if let CompFields::ErrorComputingBitfieldUnits = self.fields {
return true;
}
// Bitfields with a width that is larger than their unit's width have
// some strange things going on, and the best we can do is make the
// whole struct opaque.
if self.fields().iter().any(|f| match *f {
Field::DataMember(_) => false,
Field::Bitfields(ref unit) => unit.bitfields().iter().any(|bf| {
let bitfield_layout = ctx
.resolve_type(bf.ty())
.layout(ctx)
.expect("Bitfield without layout? Gah!");
bf.width() / 8 > bitfield_layout.size as u32
}),
}) {
return true;
}
if !ctx.options().rust_features().repr_packed_n {
// If we don't have `#[repr(packed(N)]`, the best we can
// do is make this struct opaque.
//
// See https://github.com/rust-lang/rust-bindgen/issues/537 and
// https://github.com/rust-lang/rust/issues/33158
if self.is_packed(ctx, layout) &&
layout.map_or(false, |l| l.align > 1)
{
warn!("Found a type that is both packed and aligned to greater than \
1; Rust before version 1.33 doesn't have `#[repr(packed(N))]`, so we \
are treating it as opaque. You may wish to set bindgen's rust target \
version to 1.33 or later to enable `#[repr(packed(N))]` support.");
return true;
}
}
false
}
}
impl TemplateParameters for CompInfo {
fn self_template_params(&self, _ctx: &BindgenContext) -> Vec<TypeId> {
self.template_params.clone()
}
}
impl Trace for CompInfo {
type Extra = Item;
fn trace<T>(&self, context: &BindgenContext, tracer: &mut T, item: &Item)
where
T: Tracer,
{
for p in item.all_template_params(context) {
tracer.visit_kind(p.into(), EdgeKind::TemplateParameterDefinition);
}
for ty in self.inner_types() {
tracer.visit_kind(ty.into(), EdgeKind::InnerType);
}
for &var in self.inner_vars() {
tracer.visit_kind(var.into(), EdgeKind::InnerVar);
}
for method in self.methods() {
tracer.visit_kind(method.signature.into(), EdgeKind::Method);
}
if let Some((_kind, signature)) = self.destructor() {
tracer.visit_kind(signature.into(), EdgeKind::Destructor);
}
for ctor in self.constructors() {
tracer.visit_kind(ctor.into(), EdgeKind::Constructor);
}
// Base members and fields are not generated for opaque types (but all
// of the above things are) so stop here.
if item.is_opaque(context, &()) {
return;
}
for base in self.base_members() {
tracer.visit_kind(base.ty.into(), EdgeKind::BaseMember);
}
self.fields.trace(context, tracer, &());
}
}
|
// Copyright 2017 GFX developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use super::*;
use cocoa::foundation::NSUInteger;
use foreign_types::ForeignType;
use objc::runtime::{Object, NO, YES};
use std::ffi::CStr;
pub enum MTLVertexAttribute {}
foreign_obj_type! {
type CType = MTLVertexAttribute;
pub struct VertexAttribute;
pub struct VertexAttributeRef;
}
impl VertexAttributeRef {
pub fn name(&self) -> &str {
unsafe {
let name = msg_send![self, name];
crate::nsstring_as_str(name)
}
}
pub fn attribute_index(&self) -> u64 {
unsafe { msg_send![self, attributeIndex] }
}
pub fn attribute_type(&self) -> MTLDataType {
unsafe { msg_send![self, attributeType] }
}
pub fn is_active(&self) -> bool {
unsafe {
match msg_send![self, isActive] {
YES => true,
NO => false,
_ => unreachable!(),
}
}
}
}
#[repr(u64)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub enum MTLFunctionType {
Vertex = 1,
Fragment = 2,
Kernel = 3,
}
pub enum MTLFunction {}
foreign_obj_type! {
type CType = MTLFunction;
pub struct Function;
pub struct FunctionRef;
}
impl FunctionRef {
pub fn name(&self) -> &str {
unsafe {
let name = msg_send![self, name];
crate::nsstring_as_str(name)
}
}
pub fn function_type(&self) -> MTLFunctionType {
unsafe { msg_send![self, functionType] }
}
pub fn vertex_attributes(&self) -> &Array<VertexAttribute> {
unsafe { msg_send![self, vertexAttributes] }
}
pub fn new_argument_encoder(&self, buffer_index: NSUInteger) -> ArgumentEncoder {
unsafe {
let ptr = msg_send![self, newArgumentEncoderWithBufferIndex: buffer_index];
ArgumentEncoder::from_ptr(ptr)
}
}
pub fn function_constants_dictionary(&self) -> *mut Object {
unsafe { msg_send![self, functionConstantsDictionary] }
}
}
#[repr(u64)]
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub enum MTLLanguageVersion {
V1_0 = 0x10000,
V1_1 = 0x10001,
V1_2 = 0x10002,
V2_0 = 0x20000,
V2_1 = 0x20001,
V2_2 = 0x20002,
}
pub enum MTLFunctionConstantValues {}
foreign_obj_type! {
type CType = MTLFunctionConstantValues;
pub struct FunctionConstantValues;
pub struct FunctionConstantValuesRef;
}
impl FunctionConstantValues {
pub fn new() -> Self {
unsafe {
let class = class!(MTLFunctionConstantValues);
msg_send![class, new]
}
}
}
impl FunctionConstantValuesRef {
pub unsafe fn set_constant_value_at_index(
&self,
index: NSUInteger,
ty: MTLDataType,
value: *const std::os::raw::c_void,
) {
msg_send![self, setConstantValue:value type:ty atIndex:index]
}
}
pub enum MTLCompileOptions {}
foreign_obj_type! {
type CType = MTLCompileOptions;
pub struct CompileOptions;
pub struct CompileOptionsRef;
}
impl CompileOptions {
pub fn new() -> Self {
unsafe {
let class = class!(MTLCompileOptions);
msg_send![class, new]
}
}
}
impl CompileOptionsRef {
pub unsafe fn preprocessor_defines(&self) -> *mut Object {
msg_send![self, preprocessorMacros]
}
pub unsafe fn set_preprocessor_defines(&self, defines: *mut Object) {
msg_send![self, setPreprocessorMacros: defines]
}
pub fn is_fast_math_enabled(&self) -> bool {
unsafe {
match msg_send![self, fastMathEnabled] {
YES => true,
NO => false,
_ => unreachable!(),
}
}
}
pub fn set_fast_math_enabled(&self, enabled: bool) {
unsafe { msg_send![self, setFastMathEnabled: enabled] }
}
pub fn language_version(&self) -> MTLLanguageVersion {
unsafe { msg_send![self, languageVersion] }
}
pub fn set_language_version(&self, version: MTLLanguageVersion) {
unsafe { msg_send![self, setLanguageVersion: version] }
}
}
#[repr(u64)]
#[allow(non_camel_case_types)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub enum MTLLibraryError {
Unsupported = 1,
Internal = 2,
CompileFailure = 3,
CompileWarning = 4,
}
pub enum MTLLibrary {}
foreign_obj_type! {
type CType = MTLLibrary;
pub struct Library;
pub struct LibraryRef;
}
impl LibraryRef {
pub fn label(&self) -> &str {
unsafe {
let label = msg_send![self, label];
crate::nsstring_as_str(label)
}
}
pub fn set_label(&self, label: &str) {
unsafe {
let nslabel = crate::nsstring_from_str(label);
let () = msg_send![self, setLabel: nslabel];
}
}
pub fn get_function(
&self,
name: &str,
constants: Option<FunctionConstantValues>,
) -> Result<Function, String> {
unsafe {
let nsname = crate::nsstring_from_str(name);
let function: *mut MTLFunction = match constants {
Some(c) => try_objc! { err => msg_send![self,
newFunctionWithName: nsname.as_ref()
constantValues: c.as_ref()
error: &mut err
]},
None => msg_send![self, newFunctionWithName: nsname.as_ref()],
};
if !function.is_null() {
Ok(Function::from_ptr(function))
} else {
Err(format!("Function '{}' does not exist", name))
}
}
}
pub fn function_names(&self) -> Vec<String> {
unsafe {
let names: *mut Object = msg_send![self, functionNames];
let count: NSUInteger = msg_send![names, count];
let ret = (0..count)
.map(|i| {
let name = msg_send![names, objectAtIndex: i];
nsstring_as_str(name).to_string()
})
.collect();
let () = msg_send![names, release];
ret
}
}
}
add device method to library (#142)
// Copyright 2017 GFX developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use super::*;
use cocoa::foundation::NSUInteger;
use foreign_types::ForeignType;
use objc::runtime::{Object, NO, YES};
use std::ffi::CStr;
pub enum MTLVertexAttribute {}
foreign_obj_type! {
type CType = MTLVertexAttribute;
pub struct VertexAttribute;
pub struct VertexAttributeRef;
}
impl VertexAttributeRef {
pub fn name(&self) -> &str {
unsafe {
let name = msg_send![self, name];
crate::nsstring_as_str(name)
}
}
pub fn attribute_index(&self) -> u64 {
unsafe { msg_send![self, attributeIndex] }
}
pub fn attribute_type(&self) -> MTLDataType {
unsafe { msg_send![self, attributeType] }
}
pub fn is_active(&self) -> bool {
unsafe {
match msg_send![self, isActive] {
YES => true,
NO => false,
_ => unreachable!(),
}
}
}
}
#[repr(u64)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub enum MTLFunctionType {
Vertex = 1,
Fragment = 2,
Kernel = 3,
}
pub enum MTLFunction {}
foreign_obj_type! {
type CType = MTLFunction;
pub struct Function;
pub struct FunctionRef;
}
impl FunctionRef {
pub fn name(&self) -> &str {
unsafe {
let name = msg_send![self, name];
crate::nsstring_as_str(name)
}
}
pub fn function_type(&self) -> MTLFunctionType {
unsafe { msg_send![self, functionType] }
}
pub fn vertex_attributes(&self) -> &Array<VertexAttribute> {
unsafe { msg_send![self, vertexAttributes] }
}
pub fn new_argument_encoder(&self, buffer_index: NSUInteger) -> ArgumentEncoder {
unsafe {
let ptr = msg_send![self, newArgumentEncoderWithBufferIndex: buffer_index];
ArgumentEncoder::from_ptr(ptr)
}
}
pub fn function_constants_dictionary(&self) -> *mut Object {
unsafe { msg_send![self, functionConstantsDictionary] }
}
}
#[repr(u64)]
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub enum MTLLanguageVersion {
V1_0 = 0x10000,
V1_1 = 0x10001,
V1_2 = 0x10002,
V2_0 = 0x20000,
V2_1 = 0x20001,
V2_2 = 0x20002,
}
pub enum MTLFunctionConstantValues {}
foreign_obj_type! {
type CType = MTLFunctionConstantValues;
pub struct FunctionConstantValues;
pub struct FunctionConstantValuesRef;
}
impl FunctionConstantValues {
pub fn new() -> Self {
unsafe {
let class = class!(MTLFunctionConstantValues);
msg_send![class, new]
}
}
}
impl FunctionConstantValuesRef {
pub unsafe fn set_constant_value_at_index(
&self,
index: NSUInteger,
ty: MTLDataType,
value: *const std::os::raw::c_void,
) {
msg_send![self, setConstantValue:value type:ty atIndex:index]
}
}
pub enum MTLCompileOptions {}
foreign_obj_type! {
type CType = MTLCompileOptions;
pub struct CompileOptions;
pub struct CompileOptionsRef;
}
impl CompileOptions {
pub fn new() -> Self {
unsafe {
let class = class!(MTLCompileOptions);
msg_send![class, new]
}
}
}
impl CompileOptionsRef {
pub unsafe fn preprocessor_defines(&self) -> *mut Object {
msg_send![self, preprocessorMacros]
}
pub unsafe fn set_preprocessor_defines(&self, defines: *mut Object) {
msg_send![self, setPreprocessorMacros: defines]
}
pub fn is_fast_math_enabled(&self) -> bool {
unsafe {
match msg_send![self, fastMathEnabled] {
YES => true,
NO => false,
_ => unreachable!(),
}
}
}
pub fn set_fast_math_enabled(&self, enabled: bool) {
unsafe { msg_send![self, setFastMathEnabled: enabled] }
}
pub fn language_version(&self) -> MTLLanguageVersion {
unsafe { msg_send![self, languageVersion] }
}
pub fn set_language_version(&self, version: MTLLanguageVersion) {
unsafe { msg_send![self, setLanguageVersion: version] }
}
}
#[repr(u64)]
#[allow(non_camel_case_types)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub enum MTLLibraryError {
Unsupported = 1,
Internal = 2,
CompileFailure = 3,
CompileWarning = 4,
}
pub enum MTLLibrary {}
foreign_obj_type! {
type CType = MTLLibrary;
pub struct Library;
pub struct LibraryRef;
}
impl LibraryRef {
pub fn device(&self) -> &DeviceRef {
unsafe { msg_send![self, device] }
}
pub fn label(&self) -> &str {
unsafe {
let label = msg_send![self, label];
crate::nsstring_as_str(label)
}
}
pub fn set_label(&self, label: &str) {
unsafe {
let nslabel = crate::nsstring_from_str(label);
let () = msg_send![self, setLabel: nslabel];
}
}
pub fn get_function(
&self,
name: &str,
constants: Option<FunctionConstantValues>,
) -> Result<Function, String> {
unsafe {
let nsname = crate::nsstring_from_str(name);
let function: *mut MTLFunction = match constants {
Some(c) => try_objc! { err => msg_send![self,
newFunctionWithName: nsname.as_ref()
constantValues: c.as_ref()
error: &mut err
]},
None => msg_send![self, newFunctionWithName: nsname.as_ref()],
};
if !function.is_null() {
Ok(Function::from_ptr(function))
} else {
Err(format!("Function '{}' does not exist", name))
}
}
}
pub fn function_names(&self) -> Vec<String> {
unsafe {
let names: *mut Object = msg_send![self, functionNames];
let count: NSUInteger = msg_send![names, count];
let ret = (0..count)
.map(|i| {
let name = msg_send![names, objectAtIndex: i];
nsstring_as_str(name).to_string()
})
.collect();
let () = msg_send![names, release];
ret
}
}
}
|
//! Lua functionality
use hlua;
use hlua::{Lua, LuaError};
use hlua::any::AnyLuaValue;
use rustc_serialize::json::Json;
use std::thread;
use std::fs::{File};
use std::path::Path;
use std::io::Write;
use std::fmt::{Debug, Formatter};
use std::fmt::Result as FmtResult;
use std::sync::{Mutex, RwLock};
use std::sync::mpsc::{channel, Sender, Receiver};
#[macro_use]
mod funcs;
#[cfg(test)]
mod tests;
lazy_static! {
/// Sends requests to the lua thread
static ref SENDER: Mutex<Option<Sender<LuaMessage>>> = Mutex::new(None);
/// Whether the lua thread is currently running
pub static ref RUNNING: RwLock<bool> = RwLock::new(false);
}
/// Represents an identifier for dealing with nested tables.
///
/// To access foo.bar.baz, use vec!["foo", "bar", "baz"].
///
/// To access foo[2], use vec!["foo", 2].
pub type LuaIdentifier = Vec<AnyLuaValue>;
/// Messages sent to the lua thread
#[derive(Debug)]
pub enum LuaQuery {
/// Pings the lua thread
Ping,
/// Halt the lua thread
Terminate,
// Restart the lua thread
Restart,
/// Execute a string
Execute(String),
/// Execute a file
ExecFile(String),
/// Get a variable, expecting an AnyLuaValue
GetValue(LuaIdentifier),
/// Invoke a function found at the position,
/// with the specified arguments.
Invoke(LuaIdentifier, Vec<AnyLuaValue>),
/// Set a value
SetValue {
/// The name of the thing to stuff
name: LuaIdentifier,
/// The value to store.
val: Json
},
/// Create a new table
NewTable(LuaIdentifier),
}
/// Messages received from lua thread
pub enum LuaResponse {
/// Lua variable obtained
Variable(Option<AnyLuaValue>),
/// Lua error
Error(hlua::LuaError),
/// A function is returned
Function(hlua::functions_read::LuaFunction<String>),
/// Pong response from lua ping
Pong,
}
/// Struct sent to the lua query
#[derive(Debug)]
struct LuaMessage {
reply: Sender<LuaResponse>,
query: LuaQuery
}
unsafe impl Send for LuaQuery { }
unsafe impl Sync for LuaQuery { }
unsafe impl Send for LuaResponse { }
unsafe impl Sync for LuaResponse { }
unsafe impl Send for LuaMessage { }
unsafe impl Sync for LuaMessage { }
impl Debug for LuaResponse {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match *self {
LuaResponse::Variable(ref var) =>
write!(f, "LuaResponse::Variable({:?})", var),
LuaResponse::Error(ref err) =>
write!(f, "LuaResponse::Error({:?})", err),
LuaResponse::Function(_) =>
write!(f, "LuaResponse::Function"),
LuaResponse::Pong =>
write!(f, "LuaResponse::Pong")
}
}
}
/// Whether the lua thread is currently available
pub fn thread_running() -> bool {
*RUNNING.read().unwrap()
}
/// Errors which may arise from attempting
/// to sending a message to the lua thread.
#[derive(Debug)]
pub enum LuaSendError {
/// The thread crashed, was shut down, or rebooted.
ThreadClosed,
/// The thread has not been initialized yet (maybe not used)
ThreadUninitialized,
/// The sender had an issue, most likey because the thread panicked.
/// Following the `Sender` API, the original value sent is returned.
Sender(LuaQuery)
}
/// Attemps to send a LuaQuery to the lua thread.
pub fn try_send(query: LuaQuery) -> Result<Receiver<LuaResponse>,LuaSendError> {
if !thread_running() {
Err(LuaSendError::ThreadClosed)
}
else if let Some(sender) = SENDER.lock().unwrap().clone() {
let (tx, rx) = channel();
let message = LuaMessage { reply: tx, query: query };
match sender.send(message) {
Ok(_) => Ok(rx),
Err(e) => Err(LuaSendError::Sender(e.0.query))
}
}
else {
Err(LuaSendError::ThreadUninitialized)
}
}
/// Initialize the lua thread
pub fn init() {
trace!("Initializing...");
let (query_tx, query_rx) = channel::<LuaMessage>();
{
let mut sender = SENDER.lock().unwrap();
*sender = Some(query_tx);
}
thread::spawn(move || {
thread_init(query_rx);
});
trace!("Created thread. Init finished.");
}
fn thread_init(receiver: Receiver<LuaMessage>) {
trace!("thread: initializing.");
let mut lua = Lua::new();
//unsafe {
// hlua_ffi::lua_atpanic(&mut lua.as_mut_lua().0, thread_on_panic);
//}
debug!("thread: Loading Lua libraries...");
lua.openlibs();
trace!("thread: Loading way-cooler lua extensions...");
// We should have some good file handling, read files from /usr by default,
// but for now we're reading directly from the source.
lua.execute_from_reader::<(), File>(
File::open("lib/lua/init.lua").unwrap()
).unwrap();
trace!("thread: loading way-cooler libraries...");
funcs::register_libraries(&mut lua);
// Only ready after loading libs
*RUNNING.write().unwrap() = true;
debug!("thread: entering main loop...");
thread_main_loop(receiver, &mut lua);
}
fn thread_main_loop(receiver: Receiver<LuaMessage>, lua: &mut Lua) {
loop {
let request = receiver.recv();
match request {
Err(e) => {
error!("Lua thread: unable to receive message: {}", e);
error!("Lua thread: now panicking!");
*RUNNING.write().unwrap() = false;
panic!("Lua thread: lost contact with host, exiting!");
}
Ok(message) => {
trace!("Handling a request");
thread_handle_message(message, lua);
}
}
}
}
fn thread_handle_message(request: LuaMessage, lua: &mut Lua) {
match request.query {
LuaQuery::Terminate => {
trace!("thread: Received terminate signal");
*RUNNING.write().unwrap() = false;
info!("thread: Lua thread terminating!");
thread_send(request.reply, LuaResponse::Pong);
return;
},
LuaQuery::Restart => {
trace!("thread: Received restart signal!");
error!("thread: Lua thread restart not supported!");
*RUNNING.write().unwrap() = false;
thread_send(request.reply, LuaResponse::Pong);
panic!("Lua thread: Restart not supported!");
},
LuaQuery::Execute(code) => {
trace!("thread: Received request to execute code");
trace!("thread: Executing {:?}", code);
match lua.execute::<()>(&code) {
Err(error) => {
warn!("thread: Error executing code: {:?}", error);
let response = LuaResponse::Error(error);
thread_send(request.reply, response);
}
Ok(_) => {
// This is gonna be really spammy one day
trace!("thread: Code executed okay.");
thread_send(request.reply, LuaResponse::Pong);
}
}
},
LuaQuery::ExecFile(name) => {
trace!("thread: Received request to execute file {}", name);
info!("thread: Executing {}", name);
let path = Path::new(&name);
let try_file = File::open(path);
if let Ok(file) = try_file {
let result = lua.execute_from_reader::<(), File>(file);
if let Err(err) = result {
warn!("thread: Error executing {}!", name);
thread_send(request.reply, LuaResponse::Error(err));
}
else {
trace!("thread: Execution of {} successful.", name);
thread_send(request.reply, LuaResponse::Pong);
}
}
else { // Could not open file
// Unwrap_err is used because we're in the else of let
let read_error =
LuaError::ReadError(try_file.unwrap_err());
thread_send(request.reply, LuaResponse::Error(read_error));
}
},
LuaQuery::GetValue(varname) => {
trace!("thread: Received request to get variable {:?}", varname);
let var_result = lua.get(format!("{:?}", varname));
match var_result {
Some(var) => {
thread_send(request.reply, LuaResponse::Variable(Some(var)));
}
None => {
warn!("thread: Unable to get variable {:?}", varname);
thread_send(request.reply, LuaResponse::Variable(None));
}
}
},
LuaQuery::SetValue { name: _name, val: _val } => {
panic!("thread: unimplemented LuaQuery::SetValue!");
},
LuaQuery::NewTable(_name) => {
panic!("thread: unimplemented LuaQuery::NewTable!");
},
LuaQuery::Ping => {
panic!("thread: unimplemented LuaQuery::Ping!");
},
_ => {
panic!("Unimplemented send type for lua thread!");
}
}
}
fn thread_send(sender: Sender<LuaResponse>, response: LuaResponse) {
match sender.send(response) {
Err(_) => {
error!("thread: Unable to broadcast response!");
error!("thread: Shutting down in response to inability \
to continue!");
*RUNNING.write().unwrap() = false;
panic!("Lua thread unable to communicate with main thread, \
shutting down!");
}
Ok(_) => {}
}
}
Don't crash the thread if a sender ignores the response
//! Lua functionality
use hlua;
use hlua::{Lua, LuaError};
use hlua::any::AnyLuaValue;
use rustc_serialize::json::Json;
use std::thread;
use std::fs::{File};
use std::path::Path;
use std::io::Write;
use std::fmt::{Debug, Formatter};
use std::fmt::Result as FmtResult;
use std::sync::{Mutex, RwLock};
use std::sync::mpsc::{channel, Sender, Receiver};
#[macro_use]
mod funcs;
#[cfg(test)]
mod tests;
lazy_static! {
/// Sends requests to the lua thread
static ref SENDER: Mutex<Option<Sender<LuaMessage>>> = Mutex::new(None);
/// Whether the lua thread is currently running
pub static ref RUNNING: RwLock<bool> = RwLock::new(false);
}
/// Represents an identifier for dealing with nested tables.
///
/// To access foo.bar.baz, use vec!["foo", "bar", "baz"].
///
/// To access foo[2], use vec!["foo", 2].
pub type LuaIdentifier = Vec<AnyLuaValue>;
/// Messages sent to the lua thread
#[derive(Debug)]
pub enum LuaQuery {
/// Pings the lua thread
Ping,
/// Halt the lua thread
Terminate,
// Restart the lua thread
Restart,
/// Execute a string
Execute(String),
/// Execute a file
ExecFile(String),
/// Get a variable, expecting an AnyLuaValue
GetValue(LuaIdentifier),
/// Invoke a function found at the position,
/// with the specified arguments.
Invoke(LuaIdentifier, Vec<AnyLuaValue>),
/// Set a value
SetValue {
/// The name of the thing to stuff
name: LuaIdentifier,
/// The value to store.
val: Json
},
/// Create a new table
NewTable(LuaIdentifier),
}
/// Messages received from lua thread
pub enum LuaResponse {
/// Lua variable obtained
Variable(Option<AnyLuaValue>),
/// Lua error
Error(hlua::LuaError),
/// A function is returned
Function(hlua::functions_read::LuaFunction<String>),
/// Pong response from lua ping
Pong,
}
/// Struct sent to the lua query
#[derive(Debug)]
struct LuaMessage {
reply: Sender<LuaResponse>,
query: LuaQuery
}
unsafe impl Send for LuaQuery { }
unsafe impl Sync for LuaQuery { }
unsafe impl Send for LuaResponse { }
unsafe impl Sync for LuaResponse { }
unsafe impl Send for LuaMessage { }
unsafe impl Sync for LuaMessage { }
impl Debug for LuaResponse {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match *self {
LuaResponse::Variable(ref var) =>
write!(f, "LuaResponse::Variable({:?})", var),
LuaResponse::Error(ref err) =>
write!(f, "LuaResponse::Error({:?})", err),
LuaResponse::Function(_) =>
write!(f, "LuaResponse::Function"),
LuaResponse::Pong =>
write!(f, "LuaResponse::Pong")
}
}
}
/// Whether the lua thread is currently available
pub fn thread_running() -> bool {
*RUNNING.read().unwrap()
}
/// Errors which may arise from attempting
/// to sending a message to the lua thread.
#[derive(Debug)]
pub enum LuaSendError {
/// The thread crashed, was shut down, or rebooted.
ThreadClosed,
/// The thread has not been initialized yet (maybe not used)
ThreadUninitialized,
/// The sender had an issue, most likey because the thread panicked.
/// Following the `Sender` API, the original value sent is returned.
Sender(LuaQuery)
}
/// Attemps to send a LuaQuery to the lua thread.
pub fn try_send(query: LuaQuery) -> Result<Receiver<LuaResponse>,LuaSendError> {
if !thread_running() {
Err(LuaSendError::ThreadClosed)
}
else if let Some(sender) = SENDER.lock().unwrap().clone() {
let (tx, rx) = channel();
let message = LuaMessage { reply: tx, query: query };
match sender.send(message) {
Ok(_) => Ok(rx),
Err(e) => Err(LuaSendError::Sender(e.0.query))
}
}
else {
Err(LuaSendError::ThreadUninitialized)
}
}
/// Initialize the lua thread
pub fn init() {
trace!("Initializing...");
let (query_tx, query_rx) = channel::<LuaMessage>();
{
let mut sender = SENDER.lock().unwrap();
*sender = Some(query_tx);
}
thread::spawn(move || {
thread_init(query_rx);
});
trace!("Created thread. Init finished.");
}
fn thread_init(receiver: Receiver<LuaMessage>) {
trace!("thread: initializing.");
let mut lua = Lua::new();
//unsafe {
// hlua_ffi::lua_atpanic(&mut lua.as_mut_lua().0, thread_on_panic);
//}
debug!("thread: Loading Lua libraries...");
lua.openlibs();
trace!("thread: Loading way-cooler lua extensions...");
// We should have some good file handling, read files from /usr by default,
// but for now we're reading directly from the source.
lua.execute_from_reader::<(), File>(
File::open("lib/lua/init.lua").unwrap()
).unwrap();
trace!("thread: loading way-cooler libraries...");
funcs::register_libraries(&mut lua);
// Only ready after loading libs
*RUNNING.write().unwrap() = true;
debug!("thread: entering main loop...");
thread_main_loop(receiver, &mut lua);
}
fn thread_main_loop(receiver: Receiver<LuaMessage>, lua: &mut Lua) {
loop {
let request = receiver.recv();
match request {
Err(e) => {
error!("Lua thread: unable to receive message: {}", e);
error!("Lua thread: now panicking!");
*RUNNING.write().unwrap() = false;
panic!("Lua thread: lost contact with host, exiting!");
}
Ok(message) => {
trace!("Handling a request");
thread_handle_message(message, lua);
}
}
}
}
fn thread_handle_message(request: LuaMessage, lua: &mut Lua) {
match request.query {
LuaQuery::Terminate => {
trace!("thread: Received terminate signal");
*RUNNING.write().unwrap() = false;
info!("thread: Lua thread terminating!");
thread_send(request.reply, LuaResponse::Pong);
return;
},
LuaQuery::Restart => {
trace!("thread: Received restart signal!");
error!("thread: Lua thread restart not supported!");
*RUNNING.write().unwrap() = false;
thread_send(request.reply, LuaResponse::Pong);
panic!("Lua thread: Restart not supported!");
},
LuaQuery::Execute(code) => {
trace!("thread: Received request to execute code");
trace!("thread: Executing {:?}", code);
match lua.execute::<()>(&code) {
Err(error) => {
warn!("thread: Error executing code: {:?}", error);
let response = LuaResponse::Error(error);
thread_send(request.reply, response);
}
Ok(_) => {
// This is gonna be really spammy one day
trace!("thread: Code executed okay.");
thread_send(request.reply, LuaResponse::Pong);
}
}
},
LuaQuery::ExecFile(name) => {
trace!("thread: Received request to execute file {}", name);
info!("thread: Executing {}", name);
let path = Path::new(&name);
let try_file = File::open(path);
if let Ok(file) = try_file {
let result = lua.execute_from_reader::<(), File>(file);
if let Err(err) = result {
warn!("thread: Error executing {}!", name);
thread_send(request.reply, LuaResponse::Error(err));
}
else {
trace!("thread: Execution of {} successful.", name);
thread_send(request.reply, LuaResponse::Pong);
}
}
else { // Could not open file
// Unwrap_err is used because we're in the else of let
let read_error =
LuaError::ReadError(try_file.unwrap_err());
thread_send(request.reply, LuaResponse::Error(read_error));
}
},
LuaQuery::GetValue(varname) => {
trace!("thread: Received request to get variable {:?}", varname);
let var_result = lua.get(format!("{:?}", varname));
match var_result {
Some(var) => {
thread_send(request.reply, LuaResponse::Variable(Some(var)));
}
None => {
warn!("thread: Unable to get variable {:?}", varname);
thread_send(request.reply, LuaResponse::Variable(None));
}
}
},
LuaQuery::SetValue { name: _name, val: _val } => {
panic!("thread: unimplemented LuaQuery::SetValue!");
},
LuaQuery::NewTable(_name) => {
panic!("thread: unimplemented LuaQuery::NewTable!");
},
LuaQuery::Ping => {
panic!("thread: unimplemented LuaQuery::Ping!");
},
_ => {
panic!("Unimplemented send type for lua thread!");
}
}
}
fn thread_send(sender: Sender<LuaResponse>, response: LuaResponse) {
match sender.send(response) {
Err(_) => {
warn!("A requester of the lua thread has ignored a response!");
}
Ok(_) => {}
}
}
|
//! Lua functionality
use hlua;
use hlua::{Lua, LuaError};
use hlua::any::AnyLuaValue;
use rustc_serialize::json::Json;
use std::thread;
use std::fs::{File};
use std::path::Path;
use std::io::Write;
use std::fmt::{Debug, Formatter};
use std::fmt::Result as FmtResult;
use std::sync::{Mutex, RwLock};
use std::sync::mpsc::{channel, Sender, Receiver};
#[macro_use]
mod funcs;
#[cfg(test)]
mod tests;
lazy_static! {
/// Sends requests to the lua thread
static ref SENDER: Mutex<Option<Sender<LuaMessage>>> = Mutex::new(None);
/// Whether the lua thread is currently running
pub static ref RUNNING: RwLock<bool> = RwLock::new(false);
}
/// Represents an identifier for dealing with nested tables.
///
/// To access foo.bar.baz, use vec!["foo", "bar", "baz"].
///
/// To access foo[2], use vec!["foo", 2].
pub type LuaIdentifier = Vec<AnyLuaValue>;
/// Messages sent to the lua thread
#[derive(Debug)]
pub enum LuaQuery {
/// Pings the lua thread
Ping,
/// Halt the lua thread
Terminate,
// Restart the lua thread
Restart,
/// Execute a string
Execute(String),
/// Execute a file
ExecFile(String),
/// Get a variable, expecting an AnyLuaValue
GetValue(LuaIdentifier),
/// Invoke a function found at the position,
/// with the specified arguments.
Invoke(LuaIdentifier, Vec<AnyLuaValue>),
/// Set a value
SetValue {
/// The name of the thing to stuff
name: LuaIdentifier,
/// The value to store.
val: Json
},
/// Create a new table
NewTable(LuaIdentifier),
}
/// Messages received from lua thread
pub enum LuaResponse {
/// Lua variable obtained
Variable(Option<AnyLuaValue>),
/// Lua error
Error(hlua::LuaError),
/// A function is returned
Function(hlua::functions_read::LuaFunction<String>),
/// Pong response from lua ping
Pong,
}
/// Struct sent to the lua query
#[derive(Debug)]
struct LuaMessage {
reply: Sender<LuaResponse>,
query: LuaQuery
}
unsafe impl Send for LuaQuery { }
unsafe impl Sync for LuaQuery { }
unsafe impl Send for LuaResponse { }
unsafe impl Sync for LuaResponse { }
unsafe impl Send for LuaMessage { }
unsafe impl Sync for LuaMessage { }
impl Debug for LuaResponse {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match *self {
LuaResponse::Variable(ref var) =>
write!(f, "LuaResponse::Variable({:?})", var),
LuaResponse::Error(ref err) =>
write!(f, "LuaResponse::Error({:?})", err),
LuaResponse::Function(_) =>
write!(f, "LuaResponse::Function"),
LuaResponse::Pong =>
write!(f, "LuaResponse::Pong")
}
}
}
/// Whether the lua thread is currently available
pub fn thread_running() -> bool {
*RUNNING.read().unwrap()
}
/// Errors which may arise from attempting
/// to sending a message to the lua thread.
#[derive(Debug)]
pub enum LuaSendError {
/// The thread crashed, was shut down, or rebooted.
ThreadClosed,
/// The thread has not been initialized yet (maybe not used)
ThreadUninitialized,
/// The sender had an issue, most likey because the thread panicked.
/// Following the `Sender` API, the original value sent is returned.
Sender(LuaQuery)
}
/// Attemps to send a LuaQuery to the lua thread.
pub fn try_send(query: LuaQuery) -> Result<Receiver<LuaResponse>,LuaSendError> {
if !thread_running() {
Err(LuaSendError::ThreadClosed)
}
else if let Some(ref sender) = *SENDER.lock().unwrap() {
let (tx, rx) = channel();
let message = LuaMessage { reply: tx, query: query };
match sender.send(message) {
Ok(_) => Ok(rx),
Err(e) => Err(LuaSendError::Sender(e.0.query))
}
}
else {
Err(LuaSendError::ThreadUninitialized)
}
}
/// Initialize the lua thread
pub fn init() {
trace!("Initializing...");
let (query_tx, query_rx) = channel::<LuaMessage>();
{
let mut sender = SENDER.lock().unwrap();
*sender = Some(query_tx);
}
thread::spawn(move || {
thread_init(query_rx);
});
trace!("Created thread. Init finished.");
}
fn thread_init(receiver: Receiver<LuaMessage>) {
trace!("thread: initializing.");
let mut lua = Lua::new();
//unsafe {
// hlua_ffi::lua_atpanic(&mut lua.as_mut_lua().0, thread_on_panic);
//}
debug!("thread: Loading Lua libraries...");
lua.openlibs();
trace!("thread: Loading way-cooler lua extensions...");
// We should have some good file handling, read files from /usr by default,
// but for now we're reading directly from the source.
lua.execute_from_reader::<(), File>(
File::open("lib/lua/init.lua").unwrap()
).unwrap();
trace!("thread: loading way-cooler libraries...");
funcs::register_libraries(&mut lua);
// Only ready after loading libs
*RUNNING.write().unwrap() = true;
debug!("thread: entering main loop...");
thread_main_loop(receiver, &mut lua);
}
fn thread_main_loop(receiver: Receiver<LuaMessage>, lua: &mut Lua) {
loop {
let request = receiver.recv();
match request {
Err(e) => {
error!("Lua thread: unable to receive message: {}", e);
error!("Lua thread: now panicking!");
*RUNNING.write().unwrap() = false;
panic!("Lua thread: lost contact with host, exiting!");
}
Ok(message) => {
trace!("Handling a request");
thread_handle_message(message, lua);
}
}
}
}
fn thread_handle_message(request: LuaMessage, lua: &mut Lua) {
match request.query {
LuaQuery::Terminate => {
trace!("thread: Received terminate signal");
*RUNNING.write().unwrap() = false;
info!("thread: Lua thread terminating!");
thread_send(request.reply, LuaResponse::Pong);
return;
},
LuaQuery::Restart => {
trace!("thread: Received restart signal!");
error!("thread: Lua thread restart not supported!");
*RUNNING.write().unwrap() = false;
thread_send(request.reply, LuaResponse::Pong);
panic!("Lua thread: Restart not supported!");
},
LuaQuery::Execute(code) => {
trace!("thread: Received request to execute code");
trace!("thread: Executing {:?}", code);
match lua.execute::<()>(&code) {
Err(error) => {
warn!("thread: Error executing code: {:?}", error);
let response = LuaResponse::Error(error);
thread_send(request.reply, response);
}
Ok(_) => {
// This is gonna be really spammy one day
trace!("thread: Code executed okay.");
thread_send(request.reply, LuaResponse::Pong);
}
}
},
LuaQuery::ExecFile(name) => {
trace!("thread: Received request to execute file {}", name);
info!("thread: Executing {}", name);
let path = Path::new(&name);
let try_file = File::open(path);
if let Ok(file) = try_file {
let result = lua.execute_from_reader::<(), File>(file);
if let Err(err) = result {
warn!("thread: Error executing {}!", name);
thread_send(request.reply, LuaResponse::Error(err));
}
else {
trace!("thread: Execution of {} successful.", name);
thread_send(request.reply, LuaResponse::Pong);
}
}
else { // Could not open file
// Unwrap_err is used because we're in the else of let
let read_error =
LuaError::ReadError(try_file.unwrap_err());
thread_send(request.reply, LuaResponse::Error(read_error));
}
},
LuaQuery::GetValue(varname) => {
trace!("thread: Received request to get variable {:?}", varname);
let var_result = lua.get(format!("{:?}", varname));
match var_result {
Some(var) => {
thread_send(request.reply, LuaResponse::Variable(Some(var)));
}
None => {
warn!("thread: Unable to get variable {:?}", varname);
thread_send(request.reply, LuaResponse::Variable(None));
}
}
},
LuaQuery::SetValue { name: _name, val: _val } => {
panic!("thread: unimplemented LuaQuery::SetValue!");
},
LuaQuery::NewTable(_name) => {
panic!("thread: unimplemented LuaQuery::NewTable!");
},
LuaQuery::Ping => {
panic!("thread: unimplemented LuaQuery::Ping!");
},
_ => {
panic!("Unimplemented send type for lua thread!");
}
}
}
fn thread_send(sender: Sender<LuaResponse>, response: LuaResponse) {
match sender.send(response) {
Err(_) => {
error!("thread: Unable to broadcast response!");
error!("thread: Shutting down in response to inability \
to continue!");
*RUNNING.write().unwrap() = false;
panic!("Lua thread unable to communicate with main thread, \
shutting down!");
}
Ok(_) => {}
}
}
Clone sender when sending for less time locking
Senders can be cloned for MPSC, if we only lock the mutex for the
amount of time it takes to clone the sender, we can spend less time
locking the mutex.
At the moment, SENDER can't be a RwLock because of Sync trait
restrictions from RwLock and lazy_static.
//! Lua functionality
use hlua;
use hlua::{Lua, LuaError};
use hlua::any::AnyLuaValue;
use rustc_serialize::json::Json;
use std::thread;
use std::fs::{File};
use std::path::Path;
use std::io::Write;
use std::fmt::{Debug, Formatter};
use std::fmt::Result as FmtResult;
use std::sync::{Mutex, RwLock};
use std::sync::mpsc::{channel, Sender, Receiver};
#[macro_use]
mod funcs;
#[cfg(test)]
mod tests;
lazy_static! {
/// Sends requests to the lua thread
static ref SENDER: Mutex<Option<Sender<LuaMessage>>> = Mutex::new(None);
/// Whether the lua thread is currently running
pub static ref RUNNING: RwLock<bool> = RwLock::new(false);
}
/// Represents an identifier for dealing with nested tables.
///
/// To access foo.bar.baz, use vec!["foo", "bar", "baz"].
///
/// To access foo[2], use vec!["foo", 2].
pub type LuaIdentifier = Vec<AnyLuaValue>;
/// Messages sent to the lua thread
#[derive(Debug)]
pub enum LuaQuery {
/// Pings the lua thread
Ping,
/// Halt the lua thread
Terminate,
// Restart the lua thread
Restart,
/// Execute a string
Execute(String),
/// Execute a file
ExecFile(String),
/// Get a variable, expecting an AnyLuaValue
GetValue(LuaIdentifier),
/// Invoke a function found at the position,
/// with the specified arguments.
Invoke(LuaIdentifier, Vec<AnyLuaValue>),
/// Set a value
SetValue {
/// The name of the thing to stuff
name: LuaIdentifier,
/// The value to store.
val: Json
},
/// Create a new table
NewTable(LuaIdentifier),
}
/// Messages received from lua thread
pub enum LuaResponse {
/// Lua variable obtained
Variable(Option<AnyLuaValue>),
/// Lua error
Error(hlua::LuaError),
/// A function is returned
Function(hlua::functions_read::LuaFunction<String>),
/// Pong response from lua ping
Pong,
}
/// Struct sent to the lua query
#[derive(Debug)]
struct LuaMessage {
reply: Sender<LuaResponse>,
query: LuaQuery
}
unsafe impl Send for LuaQuery { }
unsafe impl Sync for LuaQuery { }
unsafe impl Send for LuaResponse { }
unsafe impl Sync for LuaResponse { }
unsafe impl Send for LuaMessage { }
unsafe impl Sync for LuaMessage { }
impl Debug for LuaResponse {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
match *self {
LuaResponse::Variable(ref var) =>
write!(f, "LuaResponse::Variable({:?})", var),
LuaResponse::Error(ref err) =>
write!(f, "LuaResponse::Error({:?})", err),
LuaResponse::Function(_) =>
write!(f, "LuaResponse::Function"),
LuaResponse::Pong =>
write!(f, "LuaResponse::Pong")
}
}
}
/// Whether the lua thread is currently available
pub fn thread_running() -> bool {
*RUNNING.read().unwrap()
}
/// Errors which may arise from attempting
/// to sending a message to the lua thread.
#[derive(Debug)]
pub enum LuaSendError {
/// The thread crashed, was shut down, or rebooted.
ThreadClosed,
/// The thread has not been initialized yet (maybe not used)
ThreadUninitialized,
/// The sender had an issue, most likey because the thread panicked.
/// Following the `Sender` API, the original value sent is returned.
Sender(LuaQuery)
}
/// Attemps to send a LuaQuery to the lua thread.
pub fn try_send(query: LuaQuery) -> Result<Receiver<LuaResponse>,LuaSendError> {
if !thread_running() {
Err(LuaSendError::ThreadClosed)
}
else if let Some(sender) = SENDER.lock().unwrap().clone() {
let (tx, rx) = channel();
let message = LuaMessage { reply: tx, query: query };
match sender.send(message) {
Ok(_) => Ok(rx),
Err(e) => Err(LuaSendError::Sender(e.0.query))
}
}
else {
Err(LuaSendError::ThreadUninitialized)
}
}
/// Initialize the lua thread
pub fn init() {
trace!("Initializing...");
let (query_tx, query_rx) = channel::<LuaMessage>();
{
let mut sender = SENDER.lock().unwrap();
*sender = Some(query_tx);
}
thread::spawn(move || {
thread_init(query_rx);
});
trace!("Created thread. Init finished.");
}
fn thread_init(receiver: Receiver<LuaMessage>) {
trace!("thread: initializing.");
let mut lua = Lua::new();
//unsafe {
// hlua_ffi::lua_atpanic(&mut lua.as_mut_lua().0, thread_on_panic);
//}
debug!("thread: Loading Lua libraries...");
lua.openlibs();
trace!("thread: Loading way-cooler lua extensions...");
// We should have some good file handling, read files from /usr by default,
// but for now we're reading directly from the source.
lua.execute_from_reader::<(), File>(
File::open("lib/lua/init.lua").unwrap()
).unwrap();
trace!("thread: loading way-cooler libraries...");
funcs::register_libraries(&mut lua);
// Only ready after loading libs
*RUNNING.write().unwrap() = true;
debug!("thread: entering main loop...");
thread_main_loop(receiver, &mut lua);
}
fn thread_main_loop(receiver: Receiver<LuaMessage>, lua: &mut Lua) {
loop {
let request = receiver.recv();
match request {
Err(e) => {
error!("Lua thread: unable to receive message: {}", e);
error!("Lua thread: now panicking!");
*RUNNING.write().unwrap() = false;
panic!("Lua thread: lost contact with host, exiting!");
}
Ok(message) => {
trace!("Handling a request");
thread_handle_message(message, lua);
}
}
}
}
fn thread_handle_message(request: LuaMessage, lua: &mut Lua) {
match request.query {
LuaQuery::Terminate => {
trace!("thread: Received terminate signal");
*RUNNING.write().unwrap() = false;
info!("thread: Lua thread terminating!");
thread_send(request.reply, LuaResponse::Pong);
return;
},
LuaQuery::Restart => {
trace!("thread: Received restart signal!");
error!("thread: Lua thread restart not supported!");
*RUNNING.write().unwrap() = false;
thread_send(request.reply, LuaResponse::Pong);
panic!("Lua thread: Restart not supported!");
},
LuaQuery::Execute(code) => {
trace!("thread: Received request to execute code");
trace!("thread: Executing {:?}", code);
match lua.execute::<()>(&code) {
Err(error) => {
warn!("thread: Error executing code: {:?}", error);
let response = LuaResponse::Error(error);
thread_send(request.reply, response);
}
Ok(_) => {
// This is gonna be really spammy one day
trace!("thread: Code executed okay.");
thread_send(request.reply, LuaResponse::Pong);
}
}
},
LuaQuery::ExecFile(name) => {
trace!("thread: Received request to execute file {}", name);
info!("thread: Executing {}", name);
let path = Path::new(&name);
let try_file = File::open(path);
if let Ok(file) = try_file {
let result = lua.execute_from_reader::<(), File>(file);
if let Err(err) = result {
warn!("thread: Error executing {}!", name);
thread_send(request.reply, LuaResponse::Error(err));
}
else {
trace!("thread: Execution of {} successful.", name);
thread_send(request.reply, LuaResponse::Pong);
}
}
else { // Could not open file
// Unwrap_err is used because we're in the else of let
let read_error =
LuaError::ReadError(try_file.unwrap_err());
thread_send(request.reply, LuaResponse::Error(read_error));
}
},
LuaQuery::GetValue(varname) => {
trace!("thread: Received request to get variable {:?}", varname);
let var_result = lua.get(format!("{:?}", varname));
match var_result {
Some(var) => {
thread_send(request.reply, LuaResponse::Variable(Some(var)));
}
None => {
warn!("thread: Unable to get variable {:?}", varname);
thread_send(request.reply, LuaResponse::Variable(None));
}
}
},
LuaQuery::SetValue { name: _name, val: _val } => {
panic!("thread: unimplemented LuaQuery::SetValue!");
},
LuaQuery::NewTable(_name) => {
panic!("thread: unimplemented LuaQuery::NewTable!");
},
LuaQuery::Ping => {
panic!("thread: unimplemented LuaQuery::Ping!");
},
_ => {
panic!("Unimplemented send type for lua thread!");
}
}
}
fn thread_send(sender: Sender<LuaResponse>, response: LuaResponse) {
match sender.send(response) {
Err(_) => {
error!("thread: Unable to broadcast response!");
error!("thread: Shutting down in response to inability \
to continue!");
*RUNNING.write().unwrap() = false;
panic!("Lua thread unable to communicate with main thread, \
shutting down!");
}
Ok(_) => {}
}
}
|
// Copyright (C) 2014 The 6502-rs Developers
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the names of the copyright holders nor the names of any
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
use std;
use address::{Address, AddressDiff};
use instruction;
use instruction::{DecodedInstr};
use memory::Memory;
use registers::{ Registers, StackPointer, Status, StatusArgs };
use registers::{ PS_NEGATIVE, PS_DECIMAL_MODE, PS_OVERFLOW, PS_ZERO, PS_CARRY,
PS_DISABLE_INTERRUPTS };
pub struct Machine {
pub registers: Registers,
pub memory: Memory
}
impl Machine {
pub fn new() -> Machine {
Machine{
registers: Registers::new(),
memory: Memory::new()
}
}
pub fn reset(&mut self) {
*self = Machine::new();
}
pub fn fetch_next_and_decode(&mut self) -> Option<DecodedInstr> {
let x: u8 = self.memory.get_byte(self.registers.program_counter);
match instruction::OPCODES[x as uint] {
Some((instr, am)) => {
let extra_bytes = am.extra_bytes();
let num_bytes = AddressDiff(1) + extra_bytes;
let data_start = self.registers.program_counter
+ AddressDiff(1);
let slice = self.memory.get_slice(data_start, extra_bytes);
let am_out = am.process(self, slice);
// Increment program counter
self.registers.program_counter =
self.registers.program_counter + num_bytes;
Some((instr, am_out))
}
_ => None
}
}
pub fn execute_instruction(&mut self, decoded_instr: DecodedInstr) {
match decoded_instr {
(instruction::ADC, instruction::UseImmediate(val)) => {
debug!("add with carry immediate: {}", val);
self.add_with_carry(val as i8);
}
(instruction::ADC, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr) as i8;
debug!("add with carry. address: {}. value: {}", addr, val);
self.add_with_carry(val);
}
(instruction::AND, instruction::UseImmediate(val)) => {
self.and(val as i8);
}
(instruction::AND, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr) as i8;
self.and(val as i8);
}
(instruction::ASL, instruction::UseImplied) => {
// Accumulator mode
let mut val = self.registers.accumulator as u8;
Machine::shift_left_with_flags(&mut val,
&mut self.registers.status);
self.registers.accumulator = val as i8;
}
(instruction::ASL, instruction::UseAddress(addr)) => {
Machine::shift_left_with_flags(
self.memory.get_byte_mut_ref(addr),
&mut self.registers.status);
}
(instruction::BCC, instruction::UseRelative(rel)) => {
let addr = self.registers.program_counter
+ AddressDiff(rel as i32);
self.branch_if_carry_clear(addr);
}
(instruction::BCS, instruction::UseRelative(rel)) => {
let addr = self.registers.program_counter
+ AddressDiff(rel as i32);
self.branch_if_carry_set(addr);
}
(instruction::BEQ, instruction::UseRelative(rel)) => {
let addr = self.registers.program_counter
+ AddressDiff(rel as i32);
self.branch_if_equal(addr);
}
(instruction::BIT, instruction::UseAddress(addr)) => {
let a: u8 = self.registers.accumulator as u8;
let m: u8 = self.memory.get_byte(addr);
let res = a & m;
// The zero flag is set based on the result of the 'and'.
let is_zero = 0 == res;
// The N flag is set to bit 7 of the byte from memory.
let bit7 = 0 != (0x80 & res);
// The V flag is set to bit 6 of the byte from memory.
let bit6 = 0 != (0x40 & res);
self.registers.status.set_with_mask(
PS_ZERO | PS_NEGATIVE | PS_OVERFLOW,
Status::new(StatusArgs { zero: is_zero,
negative: bit7,
overflow: bit6,
..StatusArgs::none() } ));
}
(instruction::BMI, instruction::UseRelative(rel)) => {
let addr = self.registers.program_counter
+ AddressDiff(rel as i32);
debug!("branch if minus relative. address: {}", addr);
self.branch_if_minus(addr);
}
(instruction::BPL, instruction::UseRelative(rel)) => {
let addr = self.registers.program_counter
+ AddressDiff(rel as i32);
self.branch_if_positive(addr);
}
(instruction::CLC, instruction::UseImplied) => {
self.registers.status.and(!PS_CARRY);
}
(instruction::CLD, instruction::UseImplied) => {
self.registers.status.and(!PS_DECIMAL_MODE);
}
(instruction::CLI, instruction::UseImplied) => {
self.registers.status.and(!PS_DISABLE_INTERRUPTS);
}
(instruction::CLV, instruction::UseImplied) => {
self.registers.status.and(!PS_OVERFLOW);
}
(instruction::DEC, instruction::UseAddress(addr)) => {
self.decrement_memory(addr)
}
(instruction::DEX, instruction::UseImplied) => {
self.dec_x();
}
(instruction::INC, instruction::UseAddress(addr)) => {
let m = self.memory.get_byte(addr);
let m = m + 1;
self.memory.set_byte(addr, m);
let i = m as i8;
Machine::set_flags_from_i8(&mut self.registers.status, i);
}
(instruction::INX, instruction::UseImplied) => {
let x = self.registers.index_x + 1;
self.load_x_register(x);
}
(instruction::INY, instruction::UseImplied) => {
let y = self.registers.index_y + 1;
self.load_y_register(y);
}
(instruction::JMP, instruction::UseAddress(addr)) => {
self.jump(addr)
}
(instruction::LDA, instruction::UseImmediate(val)) => {
debug!("load A immediate: {}", val);
self.load_accumulator(val as i8);
}
(instruction::LDA, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
debug!("load A. address: {}. value: {}", addr, val);
self.load_accumulator(val as i8);
}
(instruction::LDX, instruction::UseImmediate(val)) => {
debug!("load X immediate: {}", val);
self.load_x_register(val as i8);
}
(instruction::LDX, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
debug!("load X. address: {}. value: {}", addr, val);
self.load_x_register(val as i8);
}
(instruction::LDY, instruction::UseImmediate(val)) => {
debug!("load Y immediate: {}", val);
self.load_y_register(val as i8);
}
(instruction::LDY, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
debug!("load Y. address: {}. value: {}", addr, val);
self.load_y_register(val as i8);
}
(instruction::LSR, instruction::UseImplied) => {
// Accumulator mode
let mut val = self.registers.accumulator as u8;
Machine::shift_right_with_flags(&mut val,
&mut self.registers.status);
self.registers.accumulator = val as i8;
}
(instruction::LSR, instruction::UseAddress(addr)) => {
Machine::shift_right_with_flags(
self.memory.get_byte_mut_ref(addr),
&mut self.registers.status);
}
(instruction::PHA, instruction::UseImplied) => {
// Push accumulator
let val = self.registers.accumulator as u8;
self.push_on_stack(val);
}
(instruction::PHP, instruction::UseImplied) => {
// Push status
let val = self.registers.status.bits();
self.push_on_stack(val);
}
(instruction::PLA, instruction::UseImplied) => {
// Pull accumulator
let val: u8 = self.pull_from_stack();
self.registers.accumulator = val as i8;
}
(instruction::PLP, instruction::UseImplied) => {
// Pull status
let val: u8 = self.pull_from_stack();
// The `truncate` here won't do anything because we have a
// constant for the single unused flags bit. This probably
// corresponds to the behavior of the 6502...? FIXME: verify
self.registers.status = Status::from_bits_truncate(val);
}
(instruction::ROL, instruction::UseImplied) => {
// Accumulator mode
let mut val = self.registers.accumulator as u8;
Machine::rotate_left_with_flags(&mut val,
&mut self.registers.status);
self.registers.accumulator = val as i8;
}
(instruction::ROL, instruction::UseAddress(addr)) => {
Machine::rotate_left_with_flags(
self.memory.get_byte_mut_ref(addr),
&mut self.registers.status);
}
(instruction::ROR, instruction::UseImplied) => {
// Accumulator mode
let mut val = self.registers.accumulator as u8;
Machine::rotate_right_with_flags(&mut val,
&mut self.registers.status);
self.registers.accumulator = val as i8;
}
(instruction::ROR, instruction::UseAddress(addr)) => {
Machine::rotate_right_with_flags(
self.memory.get_byte_mut_ref(addr),
&mut self.registers.status);
}
(instruction::SBC, instruction::UseImmediate(val)) => {
debug!("subtract with carry immediate: {}", val);
self.subtract_with_carry(val as i8);
}
(instruction::SBC, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr) as i8;
debug!("subtract with carry. address: {}. value: {}",
addr, val);
self.subtract_with_carry(val);
}
(instruction::SEC, instruction::UseImplied) => {
self.registers.status.or(PS_CARRY);
}
(instruction::SED, instruction::UseImplied) => {
self.registers.status.or(PS_DECIMAL_MODE);
}
(instruction::SEI, instruction::UseImplied) => {
self.registers.status.or(PS_DISABLE_INTERRUPTS);
}
(instruction::STA, instruction::UseAddress(addr)) => {
self.memory.set_byte(addr, self.registers.accumulator as u8);
}
(instruction::STX, instruction::UseAddress(addr)) => {
self.memory.set_byte(addr, self.registers.index_x as u8);
}
(instruction::STY, instruction::UseAddress(addr)) => {
self.memory.set_byte(addr, self.registers.index_y as u8);
}
(instruction::TAX, instruction::UseImplied) => {
let val = self.registers.accumulator;
self.load_x_register(val);
}
(instruction::TAY, instruction::UseImplied) => {
let val = self.registers.accumulator;
self.load_y_register(val);
}
(instruction::TSX, instruction::UseImplied) => {
let StackPointer(val) = self.registers.stack_pointer;
let val = val as i8;
self.load_x_register(val);
}
(instruction::TXA, instruction::UseImplied) => {
let val = self.registers.index_x;
self.load_accumulator(val);
}
(instruction::TXS, instruction::UseImplied) => {
// Note that this is the only 'transfer' instruction that does
// NOT set the zero and negative flags. (Because the target
// is the stack pointer)
let val = self.registers.index_x;
self.registers.stack_pointer = StackPointer(val as u8);
}
(instruction::TYA, instruction::UseImplied) => {
let val = self.registers.index_y;
self.load_accumulator(val);
}
(instruction::NOP, instruction::UseImplied) => {
debug!("NOP instruction");
}
(_, _) => {
debug!("attempting to execute unimplemented or invalid \
instruction");
}
};
}
pub fn run(&mut self) {
loop {
if let Some(decoded_instr) = self.fetch_next_and_decode() {
self.execute_instruction(decoded_instr);
} else {
break
}
}
}
fn set_flags_from_i8(status: &mut Status, value: i8) {
let is_zero = value == 0;
let is_negative = value < 0;
status.set_with_mask(
PS_ZERO | PS_NEGATIVE,
Status::new(StatusArgs { zero: is_zero,
negative: is_negative,
..StatusArgs::none() } ));
}
fn shift_left_with_flags(p_val: &mut u8, status: &mut Status) {
let mask = 1 << 7;
let is_bit_7_set = (*p_val & mask) == mask;
let shifted = (*p_val & !(1 << 7)) << 1;
*p_val = shifted;
status.set_with_mask(
PS_CARRY,
Status::new(StatusArgs { carry: is_bit_7_set,
..StatusArgs::none() } ));
Machine::set_flags_from_i8(status, *p_val as i8);
}
fn shift_right_with_flags(p_val: &mut u8, status: &mut Status) {
let mask = 1;
let is_bit_0_set = (*p_val & mask) == mask;
*p_val = *p_val >> 1;
status.set_with_mask(
PS_CARRY,
Status::new(StatusArgs { carry: is_bit_0_set,
..StatusArgs::none() } ));
Machine::set_flags_from_i8(status, *p_val as i8);
}
fn rotate_left_with_flags(p_val: &mut u8, status: &mut Status) {
let is_carry_set = status.contains(PS_CARRY);
let mask = 1 << 7;
let is_bit_7_set = (*p_val & mask) == mask;
let shifted = (*p_val & !(1 << 7)) << 1;
*p_val = shifted + if is_carry_set { 1 } else { 0 };
status.set_with_mask(
PS_CARRY,
Status::new(StatusArgs { carry: is_bit_7_set,
..StatusArgs::none() } ));
Machine::set_flags_from_i8(status, *p_val as i8);
}
fn rotate_right_with_flags(p_val: &mut u8, status: &mut Status) {
let is_carry_set = status.contains(PS_CARRY);
let mask = 1;
let is_bit_0_set = (*p_val & mask) == mask;
let shifted = *p_val >> 1;
*p_val = shifted + if is_carry_set { 1 << 7 } else { 0 };
status.set_with_mask(
PS_CARRY,
Status::new(StatusArgs { carry: is_bit_0_set,
..StatusArgs::none() } ));
Machine::set_flags_from_i8(status, *p_val as i8);
}
fn set_i8_with_flags(mem: &mut i8, status: &mut Status, value: i8) {
*mem = value;
Machine::set_flags_from_i8(status, value);
}
fn load_x_register(&mut self, value: i8) {
Machine::set_i8_with_flags(&mut self.registers.index_x,
&mut self.registers.status,
value);
}
fn load_y_register(&mut self, value: i8) {
Machine::set_i8_with_flags(&mut self.registers.index_y,
&mut self.registers.status,
value);
}
fn load_accumulator(&mut self, value: i8) {
Machine::set_i8_with_flags(&mut self.registers.accumulator,
&mut self.registers.status,
value);
}
fn add_with_carry(&mut self, value: i8) {
if self.registers.status.contains(PS_DECIMAL_MODE) {
// TODO akeeton: Implement binary-coded decimal.
debug!("binary-coded decimal not implemented for add_with_carry");
} else {
let a_before: i8 = self.registers.accumulator;
let c_before: i8 = if self.registers.status.contains(PS_CARRY)
{ 1 } else { 0 };
let a_after: i8 = a_before + c_before + value;
debug_assert_eq!(a_after as u8, a_before as u8 + c_before as u8
+ value as u8);
let did_carry = (a_after as u8) < (a_before as u8);
let did_overflow =
(a_before < 0 && value < 0 && a_after >= 0)
|| (a_before > 0 && value > 0 && a_after <= 0);
let mask = PS_CARRY | PS_OVERFLOW;
self.registers.status.set_with_mask(mask,
Status::new(StatusArgs { carry: did_carry,
overflow: did_overflow,
..StatusArgs::none() } ));
self.load_accumulator(a_after);
debug!("accumulator: {}", self.registers.accumulator);
}
}
fn and(&mut self, value: i8) {
let a_after = self.registers.accumulator & value;
self.load_accumulator(a_after);
}
// TODO: Implement binary-coded decimal
fn subtract_with_carry(&mut self, value: i8) {
if self.registers.status.contains(PS_DECIMAL_MODE) {
debug!("binary-coded decimal not implemented for \
subtract_with_carry");
} else {
// A - M - (1 - C)
// nc -- 'not carry'
let nc: i8 = if self.registers.status.contains(PS_CARRY)
{ 0 } else { 1 };
let a_before: i8 = self.registers.accumulator;
let a_after = a_before - value - nc;
// The carry flag is set on unsigned overflow.
let did_carry = (a_after as u8) > (a_before as u8);
// The overflow flag is set on two's-complement overflow.
//
// range of A is -128 to 127
// range of - M - (1 - C) is -128 to 128
// -(127 + 1) to -(-128 + 0)
//
let over = ((nc == 0 && value < 0) || (nc == 1 && value < -1))
&& a_before >= 0
&& a_after < 0;
let under = (a_before < 0) && (-value - nc < 0)
&& a_after >= 0;
let did_overflow = over || under;
let mask = PS_CARRY | PS_OVERFLOW;
self.registers.status.set_with_mask(mask,
Status::new(StatusArgs { carry: did_carry,
overflow: did_overflow,
..StatusArgs::none() } ));
self.load_accumulator(a_after);
}
}
fn decrement_memory(&mut self, addr: Address) {
let value_new = self.memory.get_byte(addr) - 1;
self.memory.set_byte(addr, value_new);
let is_negative = (value_new as i8) < 0;
let is_zero = value_new == 0;
self.registers.status.set_with_mask(
PS_NEGATIVE | PS_ZERO,
Status::new(StatusArgs { negative: is_negative,
zero: is_zero,
..StatusArgs::none() } ));
}
fn dec_x(&mut self) {
let val = self.registers.index_x;
self.load_x_register(val - 1);
}
fn jump(&mut self, addr: Address) {
self.registers.program_counter = addr;
}
fn branch_if_carry_clear(&mut self, addr: Address) {
if !self.registers.status.contains(PS_CARRY) {
self.registers.program_counter = addr;
}
}
fn branch_if_carry_set(&mut self, addr: Address) {
if self.registers.status.contains(PS_CARRY) {
self.registers.program_counter = addr;
}
}
fn branch_if_equal(&mut self, addr: Address) {
if self.registers.status.contains(PS_ZERO) {
self.registers.program_counter = addr;
}
}
fn branch_if_minus(&mut self, addr: Address) {
if self.registers.status.contains(PS_NEGATIVE) {
self.registers.program_counter = addr;
}
}
fn branch_if_positive(&mut self, addr: Address) {
if !self.registers.status.contains(PS_NEGATIVE) {
self.registers.program_counter = addr;
}
}
fn push_on_stack(&mut self, val: u8) {
let addr = self.registers.stack_pointer.to_address();
self.memory.set_byte(addr, val);
self.registers.stack_pointer.decrement();
}
fn pull_from_stack(&mut self) -> u8 {
let addr = self.registers.stack_pointer.to_address();
let out = self.memory.get_byte(addr);
self.registers.stack_pointer.increment();
out
}
}
impl std::fmt::Show for Machine {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Machine Dump:\n\nAccumulator: {}",
self.registers.accumulator)
}
}
#[test]
fn add_with_carry_test() {
let mut machine = Machine::new();
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, 1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(-1);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, 2);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
let mut machine = Machine::new();
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(-127);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.registers.status.remove(PS_CARRY);
machine.add_with_carry(-128);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
let mut machine = Machine::new();
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
}
#[test]
fn and_test() {
let mut machine = Machine::new();
machine.registers.accumulator = 0;
machine.and(-1);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.registers.accumulator = -1;
machine.and(0);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.registers.accumulator = -1;
machine.and(0x0f);
assert_eq!(machine.registers.accumulator, 0x0f);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.registers.accumulator = -1;
machine.and(-128);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
}
#[test]
fn subtract_with_carry_test() {
let mut machine = Machine::new();
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.registers.accumulator = 0;
machine.subtract_with_carry(1);
assert_eq!(machine.registers.accumulator, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.registers.accumulator = -128;
machine.subtract_with_carry(1);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.registers.accumulator = 127;
machine.subtract_with_carry(-1);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
machine.execute_instruction((instruction::CLC, instruction::UseImplied));
machine.registers.accumulator = -64;
machine.subtract_with_carry(64);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.registers.accumulator = 0;
machine.subtract_with_carry(-128);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
machine.execute_instruction((instruction::CLC, instruction::UseImplied));
machine.registers.accumulator = 0;
machine.subtract_with_carry(127);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
}
#[test]
fn decrement_memory_test() {
let mut machine = Machine::new();
let addr = Address(0xA1B2);
machine.memory.set_byte(addr, 5);
machine.decrement_memory(addr);
assert_eq!(machine.memory.get_byte(addr), 4);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.decrement_memory(addr);
assert_eq!(machine.memory.get_byte(addr), 3);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.decrement_memory(addr);
machine.decrement_memory(addr);
machine.decrement_memory(addr);
assert_eq!(machine.memory.get_byte(addr), 0);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.decrement_memory(addr);
assert_eq!(machine.memory.get_byte(addr) as i8, -1);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
}
#[test]
fn logical_shift_right_test() {
// Testing UseImplied version (which targets the accumulator) only, for now
let mut machine = Machine::new();
machine.execute_instruction((instruction::LDA,
instruction::UseImmediate(0)));
machine.execute_instruction((instruction::LSR,
instruction::UseImplied));
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.execute_instruction((instruction::LDA,
instruction::UseImmediate(1)));
machine.execute_instruction((instruction::LSR,
instruction::UseImplied));
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.execute_instruction((instruction::LDA,
instruction::UseImmediate(255)));
machine.execute_instruction((instruction::LSR,
instruction::UseImplied));
assert_eq!(machine.registers.accumulator, 0x7F);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.execute_instruction((instruction::LDA,
instruction::UseImmediate(254)));
machine.execute_instruction((instruction::LSR,
instruction::UseImplied));
assert_eq!(machine.registers.accumulator, 0x7F);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
}
#[test]
fn dec_x_test() {
let mut machine = Machine::new();
machine.dec_x();
assert_eq!(machine.registers.index_x, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.dec_x();
assert_eq!(machine.registers.index_x, -2);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.load_x_register(5);
machine.dec_x();
assert_eq!(machine.registers.index_x, 4);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.dec_x();
machine.dec_x();
machine.dec_x();
machine.dec_x();
assert_eq!(machine.registers.index_x, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.dec_x();
assert_eq!(machine.registers.index_x, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
}
#[test]
fn jump_test() {
let mut machine = Machine::new();
let addr = Address(0xA1B1);
machine.jump(addr);
assert_eq!(machine.registers.program_counter, addr);
}
#[test]
fn branch_if_carry_clear_test() {
let mut machine = Machine::new();
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.branch_if_carry_clear(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0));
machine.execute_instruction((instruction::CLC, instruction::UseImplied));
machine.branch_if_carry_clear(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
#[test]
fn branch_if_carry_set_test() {
let mut machine = Machine::new();
machine.execute_instruction((instruction::CLC, instruction::UseImplied));
machine.branch_if_carry_set(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0));
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.branch_if_carry_set(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
#[test]
fn branch_if_equal_test() {
let mut machine = Machine::new();
machine.branch_if_equal(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0));
machine.registers.status.or(PS_ZERO);
machine.branch_if_equal(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
#[test]
fn branch_if_minus_test() {
{
let mut machine = Machine::new();
let registers_before = machine.registers;
machine.branch_if_minus(Address(0xABCD));
assert_eq!(machine.registers, registers_before);
assert_eq!(machine.registers.program_counter, Address(0));
}
{
let mut machine = Machine::new();
machine.registers.status.or(PS_NEGATIVE);
let registers_before = machine.registers;
machine.branch_if_minus(Address(0xABCD));
assert_eq!(machine.registers.status, registers_before.status);
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
}
#[test]
fn branch_if_positive_test() {
let mut machine = Machine::new();
machine.registers.status.insert(PS_NEGATIVE);
machine.branch_if_positive(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0));
machine.registers.status.remove(PS_NEGATIVE);
machine.branch_if_positive(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
#[test]
fn branch_if_overflow_clear_test() {
let mut machine = Machine::new();
machine.registers.status.insert(PS_OVERFLOW);
machine.branch_if_overflow_clear(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0));
machine.registers.status.remove(PS_OVERFLOW);
machine.branch_if_overflow_clear(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
Add branch_if_overflow_clear().
// Copyright (C) 2014 The 6502-rs Developers
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the names of the copyright holders nor the names of any
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
use std;
use address::{Address, AddressDiff};
use instruction;
use instruction::{DecodedInstr};
use memory::Memory;
use registers::{ Registers, StackPointer, Status, StatusArgs };
use registers::{ PS_NEGATIVE, PS_DECIMAL_MODE, PS_OVERFLOW, PS_ZERO, PS_CARRY,
PS_DISABLE_INTERRUPTS };
pub struct Machine {
pub registers: Registers,
pub memory: Memory
}
impl Machine {
pub fn new() -> Machine {
Machine{
registers: Registers::new(),
memory: Memory::new()
}
}
pub fn reset(&mut self) {
*self = Machine::new();
}
pub fn fetch_next_and_decode(&mut self) -> Option<DecodedInstr> {
let x: u8 = self.memory.get_byte(self.registers.program_counter);
match instruction::OPCODES[x as uint] {
Some((instr, am)) => {
let extra_bytes = am.extra_bytes();
let num_bytes = AddressDiff(1) + extra_bytes;
let data_start = self.registers.program_counter
+ AddressDiff(1);
let slice = self.memory.get_slice(data_start, extra_bytes);
let am_out = am.process(self, slice);
// Increment program counter
self.registers.program_counter =
self.registers.program_counter + num_bytes;
Some((instr, am_out))
}
_ => None
}
}
pub fn execute_instruction(&mut self, decoded_instr: DecodedInstr) {
match decoded_instr {
(instruction::ADC, instruction::UseImmediate(val)) => {
debug!("add with carry immediate: {}", val);
self.add_with_carry(val as i8);
}
(instruction::ADC, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr) as i8;
debug!("add with carry. address: {}. value: {}", addr, val);
self.add_with_carry(val);
}
(instruction::AND, instruction::UseImmediate(val)) => {
self.and(val as i8);
}
(instruction::AND, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr) as i8;
self.and(val as i8);
}
(instruction::ASL, instruction::UseImplied) => {
// Accumulator mode
let mut val = self.registers.accumulator as u8;
Machine::shift_left_with_flags(&mut val,
&mut self.registers.status);
self.registers.accumulator = val as i8;
}
(instruction::ASL, instruction::UseAddress(addr)) => {
Machine::shift_left_with_flags(
self.memory.get_byte_mut_ref(addr),
&mut self.registers.status);
}
(instruction::BCC, instruction::UseRelative(rel)) => {
let addr = self.registers.program_counter
+ AddressDiff(rel as i32);
self.branch_if_carry_clear(addr);
}
(instruction::BCS, instruction::UseRelative(rel)) => {
let addr = self.registers.program_counter
+ AddressDiff(rel as i32);
self.branch_if_carry_set(addr);
}
(instruction::BEQ, instruction::UseRelative(rel)) => {
let addr = self.registers.program_counter
+ AddressDiff(rel as i32);
self.branch_if_equal(addr);
}
(instruction::BIT, instruction::UseAddress(addr)) => {
let a: u8 = self.registers.accumulator as u8;
let m: u8 = self.memory.get_byte(addr);
let res = a & m;
// The zero flag is set based on the result of the 'and'.
let is_zero = 0 == res;
// The N flag is set to bit 7 of the byte from memory.
let bit7 = 0 != (0x80 & res);
// The V flag is set to bit 6 of the byte from memory.
let bit6 = 0 != (0x40 & res);
self.registers.status.set_with_mask(
PS_ZERO | PS_NEGATIVE | PS_OVERFLOW,
Status::new(StatusArgs { zero: is_zero,
negative: bit7,
overflow: bit6,
..StatusArgs::none() } ));
}
(instruction::BMI, instruction::UseRelative(rel)) => {
let addr = self.registers.program_counter
+ AddressDiff(rel as i32);
debug!("branch if minus relative. address: {}", addr);
self.branch_if_minus(addr);
}
(instruction::BPL, instruction::UseRelative(rel)) => {
let addr = self.registers.program_counter
+ AddressDiff(rel as i32);
self.branch_if_positive(addr);
}
(instruction::CLC, instruction::UseImplied) => {
self.registers.status.and(!PS_CARRY);
}
(instruction::CLD, instruction::UseImplied) => {
self.registers.status.and(!PS_DECIMAL_MODE);
}
(instruction::CLI, instruction::UseImplied) => {
self.registers.status.and(!PS_DISABLE_INTERRUPTS);
}
(instruction::CLV, instruction::UseImplied) => {
self.registers.status.and(!PS_OVERFLOW);
}
(instruction::DEC, instruction::UseAddress(addr)) => {
self.decrement_memory(addr)
}
(instruction::DEX, instruction::UseImplied) => {
self.dec_x();
}
(instruction::INC, instruction::UseAddress(addr)) => {
let m = self.memory.get_byte(addr);
let m = m + 1;
self.memory.set_byte(addr, m);
let i = m as i8;
Machine::set_flags_from_i8(&mut self.registers.status, i);
}
(instruction::INX, instruction::UseImplied) => {
let x = self.registers.index_x + 1;
self.load_x_register(x);
}
(instruction::INY, instruction::UseImplied) => {
let y = self.registers.index_y + 1;
self.load_y_register(y);
}
(instruction::JMP, instruction::UseAddress(addr)) => {
self.jump(addr)
}
(instruction::LDA, instruction::UseImmediate(val)) => {
debug!("load A immediate: {}", val);
self.load_accumulator(val as i8);
}
(instruction::LDA, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
debug!("load A. address: {}. value: {}", addr, val);
self.load_accumulator(val as i8);
}
(instruction::LDX, instruction::UseImmediate(val)) => {
debug!("load X immediate: {}", val);
self.load_x_register(val as i8);
}
(instruction::LDX, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
debug!("load X. address: {}. value: {}", addr, val);
self.load_x_register(val as i8);
}
(instruction::LDY, instruction::UseImmediate(val)) => {
debug!("load Y immediate: {}", val);
self.load_y_register(val as i8);
}
(instruction::LDY, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr);
debug!("load Y. address: {}. value: {}", addr, val);
self.load_y_register(val as i8);
}
(instruction::LSR, instruction::UseImplied) => {
// Accumulator mode
let mut val = self.registers.accumulator as u8;
Machine::shift_right_with_flags(&mut val,
&mut self.registers.status);
self.registers.accumulator = val as i8;
}
(instruction::LSR, instruction::UseAddress(addr)) => {
Machine::shift_right_with_flags(
self.memory.get_byte_mut_ref(addr),
&mut self.registers.status);
}
(instruction::PHA, instruction::UseImplied) => {
// Push accumulator
let val = self.registers.accumulator as u8;
self.push_on_stack(val);
}
(instruction::PHP, instruction::UseImplied) => {
// Push status
let val = self.registers.status.bits();
self.push_on_stack(val);
}
(instruction::PLA, instruction::UseImplied) => {
// Pull accumulator
let val: u8 = self.pull_from_stack();
self.registers.accumulator = val as i8;
}
(instruction::PLP, instruction::UseImplied) => {
// Pull status
let val: u8 = self.pull_from_stack();
// The `truncate` here won't do anything because we have a
// constant for the single unused flags bit. This probably
// corresponds to the behavior of the 6502...? FIXME: verify
self.registers.status = Status::from_bits_truncate(val);
}
(instruction::ROL, instruction::UseImplied) => {
// Accumulator mode
let mut val = self.registers.accumulator as u8;
Machine::rotate_left_with_flags(&mut val,
&mut self.registers.status);
self.registers.accumulator = val as i8;
}
(instruction::ROL, instruction::UseAddress(addr)) => {
Machine::rotate_left_with_flags(
self.memory.get_byte_mut_ref(addr),
&mut self.registers.status);
}
(instruction::ROR, instruction::UseImplied) => {
// Accumulator mode
let mut val = self.registers.accumulator as u8;
Machine::rotate_right_with_flags(&mut val,
&mut self.registers.status);
self.registers.accumulator = val as i8;
}
(instruction::ROR, instruction::UseAddress(addr)) => {
Machine::rotate_right_with_flags(
self.memory.get_byte_mut_ref(addr),
&mut self.registers.status);
}
(instruction::SBC, instruction::UseImmediate(val)) => {
debug!("subtract with carry immediate: {}", val);
self.subtract_with_carry(val as i8);
}
(instruction::SBC, instruction::UseAddress(addr)) => {
let val = self.memory.get_byte(addr) as i8;
debug!("subtract with carry. address: {}. value: {}",
addr, val);
self.subtract_with_carry(val);
}
(instruction::SEC, instruction::UseImplied) => {
self.registers.status.or(PS_CARRY);
}
(instruction::SED, instruction::UseImplied) => {
self.registers.status.or(PS_DECIMAL_MODE);
}
(instruction::SEI, instruction::UseImplied) => {
self.registers.status.or(PS_DISABLE_INTERRUPTS);
}
(instruction::STA, instruction::UseAddress(addr)) => {
self.memory.set_byte(addr, self.registers.accumulator as u8);
}
(instruction::STX, instruction::UseAddress(addr)) => {
self.memory.set_byte(addr, self.registers.index_x as u8);
}
(instruction::STY, instruction::UseAddress(addr)) => {
self.memory.set_byte(addr, self.registers.index_y as u8);
}
(instruction::TAX, instruction::UseImplied) => {
let val = self.registers.accumulator;
self.load_x_register(val);
}
(instruction::TAY, instruction::UseImplied) => {
let val = self.registers.accumulator;
self.load_y_register(val);
}
(instruction::TSX, instruction::UseImplied) => {
let StackPointer(val) = self.registers.stack_pointer;
let val = val as i8;
self.load_x_register(val);
}
(instruction::TXA, instruction::UseImplied) => {
let val = self.registers.index_x;
self.load_accumulator(val);
}
(instruction::TXS, instruction::UseImplied) => {
// Note that this is the only 'transfer' instruction that does
// NOT set the zero and negative flags. (Because the target
// is the stack pointer)
let val = self.registers.index_x;
self.registers.stack_pointer = StackPointer(val as u8);
}
(instruction::TYA, instruction::UseImplied) => {
let val = self.registers.index_y;
self.load_accumulator(val);
}
(instruction::NOP, instruction::UseImplied) => {
debug!("NOP instruction");
}
(_, _) => {
debug!("attempting to execute unimplemented or invalid \
instruction");
}
};
}
pub fn run(&mut self) {
loop {
if let Some(decoded_instr) = self.fetch_next_and_decode() {
self.execute_instruction(decoded_instr);
} else {
break
}
}
}
fn set_flags_from_i8(status: &mut Status, value: i8) {
let is_zero = value == 0;
let is_negative = value < 0;
status.set_with_mask(
PS_ZERO | PS_NEGATIVE,
Status::new(StatusArgs { zero: is_zero,
negative: is_negative,
..StatusArgs::none() } ));
}
fn shift_left_with_flags(p_val: &mut u8, status: &mut Status) {
let mask = 1 << 7;
let is_bit_7_set = (*p_val & mask) == mask;
let shifted = (*p_val & !(1 << 7)) << 1;
*p_val = shifted;
status.set_with_mask(
PS_CARRY,
Status::new(StatusArgs { carry: is_bit_7_set,
..StatusArgs::none() } ));
Machine::set_flags_from_i8(status, *p_val as i8);
}
fn shift_right_with_flags(p_val: &mut u8, status: &mut Status) {
let mask = 1;
let is_bit_0_set = (*p_val & mask) == mask;
*p_val = *p_val >> 1;
status.set_with_mask(
PS_CARRY,
Status::new(StatusArgs { carry: is_bit_0_set,
..StatusArgs::none() } ));
Machine::set_flags_from_i8(status, *p_val as i8);
}
fn rotate_left_with_flags(p_val: &mut u8, status: &mut Status) {
let is_carry_set = status.contains(PS_CARRY);
let mask = 1 << 7;
let is_bit_7_set = (*p_val & mask) == mask;
let shifted = (*p_val & !(1 << 7)) << 1;
*p_val = shifted + if is_carry_set { 1 } else { 0 };
status.set_with_mask(
PS_CARRY,
Status::new(StatusArgs { carry: is_bit_7_set,
..StatusArgs::none() } ));
Machine::set_flags_from_i8(status, *p_val as i8);
}
fn rotate_right_with_flags(p_val: &mut u8, status: &mut Status) {
let is_carry_set = status.contains(PS_CARRY);
let mask = 1;
let is_bit_0_set = (*p_val & mask) == mask;
let shifted = *p_val >> 1;
*p_val = shifted + if is_carry_set { 1 << 7 } else { 0 };
status.set_with_mask(
PS_CARRY,
Status::new(StatusArgs { carry: is_bit_0_set,
..StatusArgs::none() } ));
Machine::set_flags_from_i8(status, *p_val as i8);
}
fn set_i8_with_flags(mem: &mut i8, status: &mut Status, value: i8) {
*mem = value;
Machine::set_flags_from_i8(status, value);
}
fn load_x_register(&mut self, value: i8) {
Machine::set_i8_with_flags(&mut self.registers.index_x,
&mut self.registers.status,
value);
}
fn load_y_register(&mut self, value: i8) {
Machine::set_i8_with_flags(&mut self.registers.index_y,
&mut self.registers.status,
value);
}
fn load_accumulator(&mut self, value: i8) {
Machine::set_i8_with_flags(&mut self.registers.accumulator,
&mut self.registers.status,
value);
}
fn add_with_carry(&mut self, value: i8) {
if self.registers.status.contains(PS_DECIMAL_MODE) {
// TODO akeeton: Implement binary-coded decimal.
debug!("binary-coded decimal not implemented for add_with_carry");
} else {
let a_before: i8 = self.registers.accumulator;
let c_before: i8 = if self.registers.status.contains(PS_CARRY)
{ 1 } else { 0 };
let a_after: i8 = a_before + c_before + value;
debug_assert_eq!(a_after as u8, a_before as u8 + c_before as u8
+ value as u8);
let did_carry = (a_after as u8) < (a_before as u8);
let did_overflow =
(a_before < 0 && value < 0 && a_after >= 0)
|| (a_before > 0 && value > 0 && a_after <= 0);
let mask = PS_CARRY | PS_OVERFLOW;
self.registers.status.set_with_mask(mask,
Status::new(StatusArgs { carry: did_carry,
overflow: did_overflow,
..StatusArgs::none() } ));
self.load_accumulator(a_after);
debug!("accumulator: {}", self.registers.accumulator);
}
}
fn and(&mut self, value: i8) {
let a_after = self.registers.accumulator & value;
self.load_accumulator(a_after);
}
// TODO: Implement binary-coded decimal
fn subtract_with_carry(&mut self, value: i8) {
if self.registers.status.contains(PS_DECIMAL_MODE) {
debug!("binary-coded decimal not implemented for \
subtract_with_carry");
} else {
// A - M - (1 - C)
// nc -- 'not carry'
let nc: i8 = if self.registers.status.contains(PS_CARRY)
{ 0 } else { 1 };
let a_before: i8 = self.registers.accumulator;
let a_after = a_before - value - nc;
// The carry flag is set on unsigned overflow.
let did_carry = (a_after as u8) > (a_before as u8);
// The overflow flag is set on two's-complement overflow.
//
// range of A is -128 to 127
// range of - M - (1 - C) is -128 to 128
// -(127 + 1) to -(-128 + 0)
//
let over = ((nc == 0 && value < 0) || (nc == 1 && value < -1))
&& a_before >= 0
&& a_after < 0;
let under = (a_before < 0) && (-value - nc < 0)
&& a_after >= 0;
let did_overflow = over || under;
let mask = PS_CARRY | PS_OVERFLOW;
self.registers.status.set_with_mask(mask,
Status::new(StatusArgs { carry: did_carry,
overflow: did_overflow,
..StatusArgs::none() } ));
self.load_accumulator(a_after);
}
}
fn decrement_memory(&mut self, addr: Address) {
let value_new = self.memory.get_byte(addr) - 1;
self.memory.set_byte(addr, value_new);
let is_negative = (value_new as i8) < 0;
let is_zero = value_new == 0;
self.registers.status.set_with_mask(
PS_NEGATIVE | PS_ZERO,
Status::new(StatusArgs { negative: is_negative,
zero: is_zero,
..StatusArgs::none() } ));
}
fn dec_x(&mut self) {
let val = self.registers.index_x;
self.load_x_register(val - 1);
}
fn jump(&mut self, addr: Address) {
self.registers.program_counter = addr;
}
fn branch_if_carry_clear(&mut self, addr: Address) {
if !self.registers.status.contains(PS_CARRY) {
self.registers.program_counter = addr;
}
}
fn branch_if_carry_set(&mut self, addr: Address) {
if self.registers.status.contains(PS_CARRY) {
self.registers.program_counter = addr;
}
}
fn branch_if_equal(&mut self, addr: Address) {
if self.registers.status.contains(PS_ZERO) {
self.registers.program_counter = addr;
}
}
fn branch_if_minus(&mut self, addr: Address) {
if self.registers.status.contains(PS_NEGATIVE) {
self.registers.program_counter = addr;
}
}
fn branch_if_positive(&mut self, addr: Address) {
if !self.registers.status.contains(PS_NEGATIVE) {
self.registers.program_counter = addr;
}
}
fn branch_if_overflow_clear(&mut self, addr: Address) {
if !self.registers.status.contains(PS_OVERFLOW) {
self.registers.program_counter = addr;
}
}
fn push_on_stack(&mut self, val: u8) {
let addr = self.registers.stack_pointer.to_address();
self.memory.set_byte(addr, val);
self.registers.stack_pointer.decrement();
}
fn pull_from_stack(&mut self) -> u8 {
let addr = self.registers.stack_pointer.to_address();
let out = self.memory.get_byte(addr);
self.registers.stack_pointer.increment();
out
}
}
impl std::fmt::Show for Machine {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Machine Dump:\n\nAccumulator: {}",
self.registers.accumulator)
}
}
#[test]
fn add_with_carry_test() {
let mut machine = Machine::new();
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, 1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(-1);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, 2);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
let mut machine = Machine::new();
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(-127);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.registers.status.remove(PS_CARRY);
machine.add_with_carry(-128);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
let mut machine = Machine::new();
machine.add_with_carry(127);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.add_with_carry(1);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
}
#[test]
fn and_test() {
let mut machine = Machine::new();
machine.registers.accumulator = 0;
machine.and(-1);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.registers.accumulator = -1;
machine.and(0);
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.registers.accumulator = -1;
machine.and(0x0f);
assert_eq!(machine.registers.accumulator, 0x0f);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.registers.accumulator = -1;
machine.and(-128);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
}
#[test]
fn subtract_with_carry_test() {
let mut machine = Machine::new();
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.registers.accumulator = 0;
machine.subtract_with_carry(1);
assert_eq!(machine.registers.accumulator, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.registers.accumulator = -128;
machine.subtract_with_carry(1);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.registers.accumulator = 127;
machine.subtract_with_carry(-1);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
machine.execute_instruction((instruction::CLC, instruction::UseImplied));
machine.registers.accumulator = -64;
machine.subtract_with_carry(64);
assert_eq!(machine.registers.accumulator, 127);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.registers.accumulator = 0;
machine.subtract_with_carry(-128);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), true);
machine.execute_instruction((instruction::CLC, instruction::UseImplied));
machine.registers.accumulator = 0;
machine.subtract_with_carry(127);
assert_eq!(machine.registers.accumulator, -128);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
}
#[test]
fn decrement_memory_test() {
let mut machine = Machine::new();
let addr = Address(0xA1B2);
machine.memory.set_byte(addr, 5);
machine.decrement_memory(addr);
assert_eq!(machine.memory.get_byte(addr), 4);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.decrement_memory(addr);
assert_eq!(machine.memory.get_byte(addr), 3);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.decrement_memory(addr);
machine.decrement_memory(addr);
machine.decrement_memory(addr);
assert_eq!(machine.memory.get_byte(addr), 0);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
machine.decrement_memory(addr);
assert_eq!(machine.memory.get_byte(addr) as i8, -1);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
}
#[test]
fn logical_shift_right_test() {
// Testing UseImplied version (which targets the accumulator) only, for now
let mut machine = Machine::new();
machine.execute_instruction((instruction::LDA,
instruction::UseImmediate(0)));
machine.execute_instruction((instruction::LSR,
instruction::UseImplied));
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.execute_instruction((instruction::LDA,
instruction::UseImmediate(1)));
machine.execute_instruction((instruction::LSR,
instruction::UseImplied));
assert_eq!(machine.registers.accumulator, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.execute_instruction((instruction::LDA,
instruction::UseImmediate(255)));
machine.execute_instruction((instruction::LSR,
instruction::UseImplied));
assert_eq!(machine.registers.accumulator, 0x7F);
assert_eq!(machine.registers.status.contains(PS_CARRY), true);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.execute_instruction((instruction::LDA,
instruction::UseImmediate(254)));
machine.execute_instruction((instruction::LSR,
instruction::UseImplied));
assert_eq!(machine.registers.accumulator, 0x7F);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
}
#[test]
fn dec_x_test() {
let mut machine = Machine::new();
machine.dec_x();
assert_eq!(machine.registers.index_x, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.dec_x();
assert_eq!(machine.registers.index_x, -2);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.load_x_register(5);
machine.dec_x();
assert_eq!(machine.registers.index_x, 4);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.dec_x();
machine.dec_x();
machine.dec_x();
machine.dec_x();
assert_eq!(machine.registers.index_x, 0);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), true);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), false);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
machine.dec_x();
assert_eq!(machine.registers.index_x, -1);
assert_eq!(machine.registers.status.contains(PS_CARRY), false);
assert_eq!(machine.registers.status.contains(PS_ZERO), false);
assert_eq!(machine.registers.status.contains(PS_NEGATIVE), true);
assert_eq!(machine.registers.status.contains(PS_OVERFLOW), false);
}
#[test]
fn jump_test() {
let mut machine = Machine::new();
let addr = Address(0xA1B1);
machine.jump(addr);
assert_eq!(machine.registers.program_counter, addr);
}
#[test]
fn branch_if_carry_clear_test() {
let mut machine = Machine::new();
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.branch_if_carry_clear(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0));
machine.execute_instruction((instruction::CLC, instruction::UseImplied));
machine.branch_if_carry_clear(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
#[test]
fn branch_if_carry_set_test() {
let mut machine = Machine::new();
machine.execute_instruction((instruction::CLC, instruction::UseImplied));
machine.branch_if_carry_set(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0));
machine.execute_instruction((instruction::SEC, instruction::UseImplied));
machine.branch_if_carry_set(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
#[test]
fn branch_if_equal_test() {
let mut machine = Machine::new();
machine.branch_if_equal(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0));
machine.registers.status.or(PS_ZERO);
machine.branch_if_equal(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
#[test]
fn branch_if_minus_test() {
{
let mut machine = Machine::new();
let registers_before = machine.registers;
machine.branch_if_minus(Address(0xABCD));
assert_eq!(machine.registers, registers_before);
assert_eq!(machine.registers.program_counter, Address(0));
}
{
let mut machine = Machine::new();
machine.registers.status.or(PS_NEGATIVE);
let registers_before = machine.registers;
machine.branch_if_minus(Address(0xABCD));
assert_eq!(machine.registers.status, registers_before.status);
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
}
#[test]
fn branch_if_positive_test() {
let mut machine = Machine::new();
machine.registers.status.insert(PS_NEGATIVE);
machine.branch_if_positive(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0));
machine.registers.status.remove(PS_NEGATIVE);
machine.branch_if_positive(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
#[test]
fn branch_if_overflow_clear_test() {
let mut machine = Machine::new();
machine.registers.status.insert(PS_OVERFLOW);
machine.branch_if_overflow_clear(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0));
machine.registers.status.remove(PS_OVERFLOW);
machine.branch_if_overflow_clear(Address(0xABCD));
assert_eq!(machine.registers.program_counter, Address(0xABCD));
}
|
//
// Copyright (c) 2016, Boris Popov <popov@whitekefir.ru>
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//
extern crate regex;
use logger;
use env;
use std::process::exit;
use self::regex::Regex;
use blkid;
//////////////////////////////////////////////////////////////////
pub struct Mounter {
label: Option<String>
}
impl Mounter {
pub fn new() -> Mounter {
Mounter{label: None}
}
pub fn mount(&mut self, l: &logger::Logger, e: &env::Env) {
self.load_label(e.getDevice(), l);
if !self.verify_label(e.getLabel()) {
let i = format!("Label does not match {}.", e.getLabel());
l.info(&i);
exit(0);
}
//
//TODO
//
//self.load_fs_type();
//self.process();
}
fn verify_label(&self, templ: &str) -> bool {
let lbl :String;
match self.label {
None => { return false; },
Some(ref l) => { lbl = l.to_string(); }
}
let res = Regex::new(templ);
match res {
Ok(re) => re.is_match(&lbl),
_ => false
}
}
fn load_label(&mut self, dev: &str, l: &logger::Logger) {
let prober_r = blkid::Prober::new(dev);
//
//TODO
//
}
}
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
#[test]
fn test_verify_label_01() {
let i = Mounter{label: Some("BNC-124".to_string())};
assert_eq!(false, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_02() {
let i = Mounter{label: Some("XANC-124".to_string())};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_04() {
let i = Mounter{label: Some("ANC".to_string())};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_05() {
let i = Mounter{label: Some("01234_ ANC".to_string())};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_06() {
let i = Mounter{label: Some("01234_ ANC 782883 ".to_string())};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_07() {
let i = Mounter{label: Some(" ANC 782883 ".to_string())};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_08() {
let i = Mounter{label: Some("ANC---!782883 ".to_string())};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_09() {
let i = Mounter{label: Some("01234_ NC 782883 ".to_string())};
assert_eq!(false, i.verify_label(".*ANC.*"));
}
//////////////////////////////////////////////////////////////////
add Mounter::load_part_info
//
// Copyright (c) 2016, Boris Popov <popov@whitekefir.ru>
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//
extern crate regex;
use logger;
use env;
use std::process::exit;
use self::regex::Regex;
use blkid;
//////////////////////////////////////////////////////////////////
pub struct Mounter {
part_label: String,
part_type: String
}
impl Mounter {
pub fn new() -> Mounter {
Mounter{
part_label: "".to_string(),
part_type: "".to_string()
}
}
pub fn mount(&mut self, l: &logger::Logger, e: &env::Env) {
self.load_part_info(e.getDevice(), l);
if !self.verify_label(e.getLabel()) {
let i = format!("Label does not match {}.", e.getLabel());
l.info(&i);
exit(0);
}
//
//TODO
//
//self.process();
}
fn verify_label(&self, templ: &str) -> bool {
if self.part_label.len() == 0 {
return false;
}
let res = Regex::new(templ);
match res {
Ok(re) => re.is_match(&self.part_label),
_ => false
}
}
fn load_part_info(&mut self, dev: &str, l: &logger::Logger) {
let prober = blkid::Prober::new(dev);
let res = prober.probe();
if let Err(e) = res {
l.error(&e);
exit(1);
}
self.loadLabel(&prober, l);
self.loadType(&prober, l);
}
fn loadLabel(&mut self, prober: &blkid::Prober,
l: &logger::Logger) {
let lr = prober.getLabel();
match lr {
Ok(o) => {
self.part_label = o;
let mess = "LABEL=".to_string() + &self.part_label;
l.info(&mess);
},
Err(e) => {
l.error(&e);
}
}
}
fn loadType(&mut self, prober: &blkid::Prober,
l: &logger::Logger) {
let lr = prober.getType();
match lr {
Ok(o) => {
self.part_type = o;
let mess = "TYPE=".to_string() + &self.part_label;
l.info(&mess);
},
Err(e) => {
l.error(&e);
}
}
}
}
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
#[test]
fn test_verify_label_01() {
let i = Mounter{part_label: "BNC-124".to_string(),
part_type: "".to_string()};
assert_eq!(false, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_02() {
let i = Mounter{part_label: "XANC-124".to_string(),
part_type: "".to_string()};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_04() {
let i = Mounter{part_label: "ANC".to_string(),
part_type: "".to_string()};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_05() {
let i = Mounter{part_label: "01234_ ANC".to_string(),
part_type: "".to_string()};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_06() {
let i = Mounter{part_label: "01234_ ANC 782883 ".to_string(),
part_type: "".to_string()};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_07() {
let i = Mounter{part_label: " ANC 782883 ".to_string(),
part_type: "".to_string()};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_08() {
let i = Mounter{part_label: "ANC---!782883 ".to_string(),
part_type: "".to_string()};
assert_eq!(true, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_09() {
let i = Mounter{part_label: "01234_ NC 782883 ".to_string(),
part_type: "".to_string()};
assert_eq!(false, i.verify_label(".*ANC.*"));
}
#[test]
fn test_verify_label_10() {
let i = Mounter{part_label: "".to_string(),
part_type: "".to_string()};
assert_eq!(false, i.verify_label(".*ANC.*"));
}
//////////////////////////////////////////////////////////////////
|
use std::fmt;
use std::io::{self, Read, Write};
use std::mem;
use std::net::{self, SocketAddr, Shutdown};
use std::time::Duration;
use bytes::{Buf, BufMut};
use futures::stream::Stream;
use futures::{Future, Poll, Async};
use iovec::IoVec;
use mio;
use tokio_io::{AsyncRead, AsyncWrite};
use reactor::{Handle, PollEvented};
/// An I/O object representing a TCP socket listening for incoming connections.
///
/// This object can be converted into a stream of incoming connections for
/// various forms of processing.
pub struct TcpListener {
io: PollEvented<mio::net::TcpListener>,
}
/// Stream returned by the `TcpListener::incoming` function representing the
/// stream of sockets received from a listener.
#[must_use = "streams do nothing unless polled"]
#[derive(Debug)]
pub struct Incoming {
inner: TcpListener,
}
impl TcpListener {
/// Create a new TCP listener associated with this event loop.
///
/// The TCP listener will bind to the provided `addr` address, if available.
/// If the result is `Ok`, the socket has successfully bound.
pub fn bind(addr: &SocketAddr) -> io::Result<TcpListener> {
let l = try!(mio::net::TcpListener::bind(addr));
TcpListener::new(l, &Handle::default())
}
/// Attempt to accept a connection and create a new connected `TcpStream` if
/// successful.
///
/// This function will attempt an accept operation, but will not block
/// waiting for it to complete. If the operation would block then a "would
/// block" error is returned. Additionally, if this method would block, it
/// registers the current task to receive a notification when it would
/// otherwise not block.
///
/// Note that typically for simple usage it's easier to treat incoming
/// connections as a `Stream` of `TcpStream`s with the `incoming` method
/// below.
///
/// # Panics
///
/// This function will panic if it is called outside the context of a
/// future's task. It's recommended to only call this from the
/// implementation of a `Future::poll`, if necessary.
pub fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> {
let (stream, addr) = self.accept_std()?;
let stream = TcpStream::from_std(stream, self.io.handle())?;
Ok((stream, addr))
}
/// Attempt to accept a connection and create a new connected `TcpStream` if
/// successful.
///
/// This function is the asme as `accept` above except that it returns a
/// `std::net::TcpStream` instead of a `tokio::net::TcpStream`. This in turn
/// can then allow for the TCP stream to be assoiated with a different
/// reactor than the one this `TcpListener` is associated with.
///
/// # Panics
///
/// This function will panic for the same reasons as `accept`, notably if
/// called outside the context of a future.
pub fn accept_std(&mut self) -> io::Result<(net::TcpStream, SocketAddr)> {
if let Async::NotReady = self.io.poll_read() {
return Err(io::ErrorKind::WouldBlock.into())
}
match self.io.get_ref().accept_std() {
Ok(pair) => Ok(pair),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.io.need_read()?;
}
Err(e)
}
}
}
/// Create a new TCP listener from the standard library's TCP listener.
///
/// This method can be used when the `Handle::tcp_listen` method isn't
/// sufficient because perhaps some more configuration is needed in terms of
/// before the calls to `bind` and `listen`.
///
/// This API is typically paired with the `net2` crate and the `TcpBuilder`
/// type to build up and customize a listener before it's shipped off to the
/// backing event loop. This allows configuration of options like
/// `SO_REUSEPORT`, binding to multiple addresses, etc.
///
/// The `addr` argument here is one of the addresses that `listener` is
/// bound to and the listener will only be guaranteed to accept connections
/// of the same address type currently.
///
/// Finally, the `handle` argument is the event loop that this listener will
/// be bound to.
///
/// The platform specific behavior of this function looks like:
///
/// * On Unix, the socket is placed into nonblocking mode and connections
/// can be accepted as normal
///
/// * On Windows, the address is stored internally and all future accepts
/// will only be for the same IP version as `addr` specified. That is, if
/// `addr` is an IPv4 address then all sockets accepted will be IPv4 as
/// well (same for IPv6).
pub fn from_std(listener: net::TcpListener,
handle: &Handle) -> io::Result<TcpListener> {
let l = mio::net::TcpListener::from_std(listener)?;
TcpListener::new(l, handle)
}
fn new(listener: mio::net::TcpListener, handle: &Handle)
-> io::Result<TcpListener> {
let io = try!(PollEvented::new(listener, handle));
Ok(TcpListener { io: io })
}
/// Test whether this socket is ready to be read or not.
///
/// # Panics
///
/// This function will panic if called outside the context of a future's
/// task.
pub fn poll_read(&self) -> Async<()> {
self.io.poll_read()
}
/// Returns the local address that this listener is bound to.
///
/// This can be useful, for example, when binding to port 0 to figure out
/// which port was actually bound.
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.io.get_ref().local_addr()
}
/// Consumes this listener, returning a stream of the sockets this listener
/// accepts.
///
/// This method returns an implementation of the `Stream` trait which
/// resolves to the sockets the are accepted on this listener.
pub fn incoming(self) -> Incoming {
Incoming { inner: self }
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`].
///
/// [`set_ttl`]: #method.set_ttl
pub fn ttl(&self) -> io::Result<u32> {
self.io.get_ref().ttl()
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.io.get_ref().set_ttl(ttl)
}
/// Gets the value of the `IPV6_V6ONLY` option for this socket.
///
/// For more information about this option, see [`set_only_v6`].
///
/// [`set_only_v6`]: #method.set_only_v6
pub fn only_v6(&self) -> io::Result<bool> {
self.io.get_ref().only_v6()
}
/// Sets the value for the `IPV6_V6ONLY` option on this socket.
///
/// If this is set to `true` then the socket is restricted to sending and
/// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
/// can bind the same port at the same time.
///
/// If this is set to `false` then the socket can be used to send and
/// receive packets from an IPv4-mapped IPv6 address.
pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
self.io.get_ref().set_only_v6(only_v6)
}
}
impl fmt::Debug for TcpListener {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.io.get_ref().fmt(f)
}
}
impl Stream for Incoming {
type Item = (TcpStream, SocketAddr);
type Error = io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, io::Error> {
Ok(Async::Ready(Some(try_nb!(self.inner.accept()))))
}
}
/// An I/O object representing a TCP stream connected to a remote endpoint.
///
/// A TCP stream can either be created by connecting to an endpoint, via the
/// [`connect`] method, or by [accepting] a connection from a [listener].
///
/// [`connect`]: struct.TcpStream.html#method.connect
/// [accepting]: struct.TcpListener.html#method.accept
/// [listener]: struct.TcpListener.html
pub struct TcpStream {
io: PollEvented<mio::net::TcpStream>,
}
/// Future returned by `TcpStream::connect` which will resolve to a `TcpStream`
/// when the stream is connected.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct TcpStreamNew {
inner: TcpStreamNewState,
}
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
enum TcpStreamNewState {
Waiting(TcpStream),
Error(io::Error),
Empty,
}
impl TcpStream {
/// Create a new TCP stream connected to the specified address.
///
/// This function will create a new TCP socket and attempt to connect it to
/// the `addr` provided. The returned future will be resolved once the
/// stream has successfully connected, or it wil return an error if one
/// occurs.
pub fn connect(addr: &SocketAddr) -> TcpStreamNew {
let inner = match mio::net::TcpStream::connect(addr) {
Ok(tcp) => TcpStream::new(tcp, &Handle::default()),
Err(e) => TcpStreamNewState::Error(e),
};
TcpStreamNew { inner: inner }
}
fn new(connected_stream: mio::net::TcpStream, handle: &Handle)
-> TcpStreamNewState {
match PollEvented::new(connected_stream, handle) {
Ok(io) => TcpStreamNewState::Waiting(TcpStream { io: io }),
Err(e) => TcpStreamNewState::Error(e),
}
}
/// Create a new `TcpStream` from a `net::TcpStream`.
///
/// This function will convert a TCP stream created by the standard library
/// to a TCP stream ready to be used with the provided event loop handle.
/// The stream returned is associated with the event loop and ready to
/// perform I/O.
pub fn from_std(stream: net::TcpStream, handle: &Handle)
-> io::Result<TcpStream>
{
let inner = mio::net::TcpStream::from_stream(stream)?;
Ok(TcpStream {
io: try!(PollEvented::new(inner, handle)),
})
}
/// Creates a new `TcpStream` from the pending socket inside the given
/// `std::net::TcpStream`, connecting it to the address specified.
///
/// This constructor allows configuring the socket before it's actually
/// connected, and this function will transfer ownership to the returned
/// `TcpStream` if successful. An unconnected `TcpStream` can be created
/// with the `net2::TcpBuilder` type (and also configured via that route).
///
/// The platform specific behavior of this function looks like:
///
/// * On Unix, the socket is placed into nonblocking mode and then a
/// `connect` call is issued.
///
/// * On Windows, the address is stored internally and the connect operation
/// is issued when the returned `TcpStream` is registered with an event
/// loop. Note that on Windows you must `bind` a socket before it can be
/// connected, so if a custom `TcpBuilder` is used it should be bound
/// (perhaps to `INADDR_ANY`) before this method is called.
pub fn connect_std(stream: net::TcpStream,
addr: &SocketAddr,
handle: &Handle)
-> TcpStreamNew
{
let inner = match mio::net::TcpStream::connect_stream(stream, addr) {
Ok(tcp) => TcpStream::new(tcp, handle),
Err(e) => TcpStreamNewState::Error(e),
};
TcpStreamNew { inner: inner }
}
/// Test whether this stream is ready to be read or not.
///
/// If the stream is *not* readable then the current task is scheduled to
/// get a notification when the stream does become readable.
///
/// # Panics
///
/// This function will panic if called outside the context of a future's
/// task.
pub fn poll_read(&self) -> Async<()> {
self.io.poll_read()
}
/// Test whether this stream is ready to be written or not.
///
/// If the stream is *not* writable then the current task is scheduled to
/// get a notification when the stream does become writable.
///
/// # Panics
///
/// This function will panic if called outside the context of a future's
/// task.
pub fn poll_write(&self) -> Async<()> {
self.io.poll_write()
}
/// Returns the local address that this stream is bound to.
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.io.get_ref().local_addr()
}
/// Returns the remote address that this stream is connected to.
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.io.get_ref().peer_addr()
}
/// Receives data on the socket from the remote address to which it is
/// connected, without removing that data from the queue. On success,
/// returns the number of bytes peeked.
///
/// Successive calls return the same data. This is accomplished by passing
/// `MSG_PEEK` as a flag to the underlying recv system call.
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
if let Async::NotReady = self.poll_read() {
return Err(io::ErrorKind::WouldBlock.into())
}
match self.io.get_ref().peek(buf) {
Ok(v) => Ok(v),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.io.need_read()?;
Err(io::ErrorKind::WouldBlock.into())
}
Err(e) => Err(e),
}
}
/// Shuts down the read, write, or both halves of this connection.
///
/// This function will cause all pending and future I/O on the specified
/// portions to return immediately with an appropriate value (see the
/// documentation of `Shutdown`).
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.io.get_ref().shutdown(how)
}
/// Gets the value of the `TCP_NODELAY` option on this socket.
///
/// For more information about this option, see [`set_nodelay`].
///
/// [`set_nodelay`]: #method.set_nodelay
pub fn nodelay(&self) -> io::Result<bool> {
self.io.get_ref().nodelay()
}
/// Sets the value of the `TCP_NODELAY` option on this socket.
///
/// If set, this option disables the Nagle algorithm. This means that
/// segments are always sent as soon as possible, even if there is only a
/// small amount of data. When not set, data is buffered until there is a
/// sufficient amount to send out, thereby avoiding the frequent sending of
/// small packets.
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
self.io.get_ref().set_nodelay(nodelay)
}
/// Gets the value of the `SO_RCVBUF` option on this socket.
///
/// For more information about this option, see [`set_recv_buffer_size`].
///
/// [`set_recv_buffer_size`]: #tymethod.set_recv_buffer_size
pub fn recv_buffer_size(&self) -> io::Result<usize> {
self.io.get_ref().recv_buffer_size()
}
/// Sets the value of the `SO_RCVBUF` option on this socket.
///
/// Changes the size of the operating system's receive buffer associated
/// with the socket.
pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
self.io.get_ref().set_recv_buffer_size(size)
}
/// Gets the value of the `SO_SNDBUF` option on this socket.
///
/// For more information about this option, see [`set_send_buffer`].
///
/// [`set_send_buffer`]: #tymethod.set_send_buffer
pub fn send_buffer_size(&self) -> io::Result<usize> {
self.io.get_ref().send_buffer_size()
}
/// Sets the value of the `SO_SNDBUF` option on this socket.
///
/// Changes the size of the operating system's send buffer associated with
/// the socket.
pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
self.io.get_ref().set_send_buffer_size(size)
}
/// Returns whether keepalive messages are enabled on this socket, and if so
/// the duration of time between them.
///
/// For more information about this option, see [`set_keepalive`].
///
/// [`set_keepalive`]: #tymethod.set_keepalive
pub fn keepalive(&self) -> io::Result<Option<Duration>> {
self.io.get_ref().keepalive()
}
/// Sets whether keepalive messages are enabled to be sent on this socket.
///
/// On Unix, this option will set the `SO_KEEPALIVE` as well as the
/// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform).
/// On Windows, this will set the `SIO_KEEPALIVE_VALS` option.
///
/// If `None` is specified then keepalive messages are disabled, otherwise
/// the duration specified will be the time to remain idle before sending a
/// TCP keepalive probe.
///
/// Some platforms specify this value in seconds, so sub-second
/// specifications may be omitted.
pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
self.io.get_ref().set_keepalive(keepalive)
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`].
///
/// [`set_ttl`]: #tymethod.set_ttl
pub fn ttl(&self) -> io::Result<u32> {
self.io.get_ref().ttl()
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.io.get_ref().set_ttl(ttl)
}
/// Gets the value of the `IPV6_V6ONLY` option for this socket.
///
/// For more information about this option, see [`set_only_v6`].
///
/// [`set_only_v6`]: #tymethod.set_only_v6
pub fn only_v6(&self) -> io::Result<bool> {
self.io.get_ref().only_v6()
}
/// Sets the value for the `IPV6_V6ONLY` option on this socket.
///
/// If this is set to `true` then the socket is restricted to sending and
/// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
/// can bind the same port at the same time.
///
/// If this is set to `false` then the socket can be used to send and
/// receive packets from an IPv4-mapped IPv6 address.
pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
self.io.get_ref().set_only_v6(only_v6)
}
/// Reads the linger duration for this socket by getting the `SO_LINGER`
/// option.
///
/// For more information about this option, see [`set_linger`].
///
/// [`set_linger`]: #tymethod.set_linger
pub fn linger(&self) -> io::Result<Option<Duration>> {
self.io.get_ref().linger()
}
/// Sets the linger duration of this socket by setting the `SO_LINGER`
/// option.
///
/// This option controls the action taken when a stream has unsent messages
/// and the stream is closed. If `SO_LINGER` is set, the system
/// shall block the process until it can transmit the data or until the
/// time expires.
///
/// If `SO_LINGER` is not specified, and the stream is closed, the system
/// handles the call in a way that allows the process to continue as quickly
/// as possible.
pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
self.io.get_ref().set_linger(dur)
}
}
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.io.read(buf)
}
}
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.io.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl AsyncRead for TcpStream {
unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
false
}
fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
<&TcpStream>::read_buf(&mut &*self, buf)
}
}
impl AsyncWrite for TcpStream {
fn shutdown(&mut self) -> Poll<(), io::Error> {
<&TcpStream>::shutdown(&mut &*self)
}
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
<&TcpStream>::write_buf(&mut &*self, buf)
}
}
impl<'a> Read for &'a TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(&self.io).read(buf)
}
}
impl<'a> Write for &'a TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
(&self.io).write(buf)
}
fn flush(&mut self) -> io::Result<()> {
(&self.io).flush()
}
}
impl<'a> AsyncRead for &'a TcpStream {
unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
false
}
fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
if let Async::NotReady = <TcpStream>::poll_read(self) {
return Ok(Async::NotReady)
}
let r = unsafe {
// The `IoVec` type can't have a 0-length size, so we create a bunch
// of dummy versions on the stack with 1 length which we'll quickly
// overwrite.
let b1: &mut [u8] = &mut [0];
let b2: &mut [u8] = &mut [0];
let b3: &mut [u8] = &mut [0];
let b4: &mut [u8] = &mut [0];
let b5: &mut [u8] = &mut [0];
let b6: &mut [u8] = &mut [0];
let b7: &mut [u8] = &mut [0];
let b8: &mut [u8] = &mut [0];
let b9: &mut [u8] = &mut [0];
let b10: &mut [u8] = &mut [0];
let b11: &mut [u8] = &mut [0];
let b12: &mut [u8] = &mut [0];
let b13: &mut [u8] = &mut [0];
let b14: &mut [u8] = &mut [0];
let b15: &mut [u8] = &mut [0];
let b16: &mut [u8] = &mut [0];
let mut bufs: [&mut IoVec; 16] = [
b1.into(), b2.into(), b3.into(), b4.into(),
b5.into(), b6.into(), b7.into(), b8.into(),
b9.into(), b10.into(), b11.into(), b12.into(),
b13.into(), b14.into(), b15.into(), b16.into(),
];
let n = buf.bytes_vec_mut(&mut bufs);
self.io.get_ref().read_bufs(&mut bufs[..n])
};
match r {
Ok(n) => {
unsafe { buf.advance_mut(n); }
Ok(Async::Ready(n))
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.io.need_read()?;
Ok(Async::NotReady)
}
Err(e) => Err(e),
}
}
}
impl<'a> AsyncWrite for &'a TcpStream {
fn shutdown(&mut self) -> Poll<(), io::Error> {
Ok(().into())
}
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
if let Async::NotReady = <TcpStream>::poll_write(self) {
return Ok(Async::NotReady)
}
let r = {
// The `IoVec` type can't have a zero-length size, so create a dummy
// version from a 1-length slice which we'll overwrite with the
// `bytes_vec` method.
static DUMMY: &[u8] = &[0];
let iovec = <&IoVec>::from(DUMMY);
let mut bufs = [iovec; 64];
let n = buf.bytes_vec(&mut bufs);
self.io.get_ref().write_bufs(&bufs[..n])
};
match r {
Ok(n) => {
buf.advance(n);
Ok(Async::Ready(n))
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.io.need_write()?;
Ok(Async::NotReady)
}
Err(e) => Err(e),
}
}
}
impl fmt::Debug for TcpStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.io.get_ref().fmt(f)
}
}
impl Future for TcpStreamNew {
type Item = TcpStream;
type Error = io::Error;
fn poll(&mut self) -> Poll<TcpStream, io::Error> {
self.inner.poll()
}
}
impl Future for TcpStreamNewState {
type Item = TcpStream;
type Error = io::Error;
fn poll(&mut self) -> Poll<TcpStream, io::Error> {
{
let stream = match *self {
TcpStreamNewState::Waiting(ref s) => s,
TcpStreamNewState::Error(_) => {
let e = match mem::replace(self, TcpStreamNewState::Empty) {
TcpStreamNewState::Error(e) => e,
_ => panic!(),
};
return Err(e)
}
TcpStreamNewState::Empty => panic!("can't poll TCP stream twice"),
};
// Once we've connected, wait for the stream to be writable as
// that's when the actual connection has been initiated. Once we're
// writable we check for `take_socket_error` to see if the connect
// actually hit an error or not.
//
// If all that succeeded then we ship everything on up.
if let Async::NotReady = stream.io.poll_write() {
return Ok(Async::NotReady)
}
if let Some(e) = try!(stream.io.get_ref().take_error()) {
return Err(e)
}
}
match mem::replace(self, TcpStreamNewState::Empty) {
TcpStreamNewState::Waiting(stream) => Ok(Async::Ready(stream)),
_ => panic!(),
}
}
}
#[cfg(all(unix, not(target_os = "fuchsia")))]
mod sys {
use std::os::unix::prelude::*;
use super::{TcpStream, TcpListener};
impl AsRawFd for TcpStream {
fn as_raw_fd(&self) -> RawFd {
self.io.get_ref().as_raw_fd()
}
}
impl AsRawFd for TcpListener {
fn as_raw_fd(&self) -> RawFd {
self.io.get_ref().as_raw_fd()
}
}
}
#[cfg(windows)]
mod sys {
// TODO: let's land these upstream with mio and then we can add them here.
//
// use std::os::windows::prelude::*;
// use super::{TcpStream, TcpListener};
//
// impl AsRawHandle for TcpStream {
// fn as_raw_handle(&self) -> RawHandle {
// self.io.get_ref().as_raw_handle()
// }
// }
//
// impl AsRawHandle for TcpListener {
// fn as_raw_handle(&self) -> RawHandle {
// self.listener.io().as_raw_handle()
// }
// }
}
Remove only_v6 from TCP types (#90)
use std::fmt;
use std::io::{self, Read, Write};
use std::mem;
use std::net::{self, SocketAddr, Shutdown};
use std::time::Duration;
use bytes::{Buf, BufMut};
use futures::stream::Stream;
use futures::{Future, Poll, Async};
use iovec::IoVec;
use mio;
use tokio_io::{AsyncRead, AsyncWrite};
use reactor::{Handle, PollEvented};
/// An I/O object representing a TCP socket listening for incoming connections.
///
/// This object can be converted into a stream of incoming connections for
/// various forms of processing.
pub struct TcpListener {
io: PollEvented<mio::net::TcpListener>,
}
/// Stream returned by the `TcpListener::incoming` function representing the
/// stream of sockets received from a listener.
#[must_use = "streams do nothing unless polled"]
#[derive(Debug)]
pub struct Incoming {
inner: TcpListener,
}
impl TcpListener {
/// Create a new TCP listener associated with this event loop.
///
/// The TCP listener will bind to the provided `addr` address, if available.
/// If the result is `Ok`, the socket has successfully bound.
pub fn bind(addr: &SocketAddr) -> io::Result<TcpListener> {
let l = try!(mio::net::TcpListener::bind(addr));
TcpListener::new(l, &Handle::default())
}
/// Attempt to accept a connection and create a new connected `TcpStream` if
/// successful.
///
/// This function will attempt an accept operation, but will not block
/// waiting for it to complete. If the operation would block then a "would
/// block" error is returned. Additionally, if this method would block, it
/// registers the current task to receive a notification when it would
/// otherwise not block.
///
/// Note that typically for simple usage it's easier to treat incoming
/// connections as a `Stream` of `TcpStream`s with the `incoming` method
/// below.
///
/// # Panics
///
/// This function will panic if it is called outside the context of a
/// future's task. It's recommended to only call this from the
/// implementation of a `Future::poll`, if necessary.
pub fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> {
let (stream, addr) = self.accept_std()?;
let stream = TcpStream::from_std(stream, self.io.handle())?;
Ok((stream, addr))
}
/// Attempt to accept a connection and create a new connected `TcpStream` if
/// successful.
///
/// This function is the asme as `accept` above except that it returns a
/// `std::net::TcpStream` instead of a `tokio::net::TcpStream`. This in turn
/// can then allow for the TCP stream to be assoiated with a different
/// reactor than the one this `TcpListener` is associated with.
///
/// # Panics
///
/// This function will panic for the same reasons as `accept`, notably if
/// called outside the context of a future.
pub fn accept_std(&mut self) -> io::Result<(net::TcpStream, SocketAddr)> {
if let Async::NotReady = self.io.poll_read() {
return Err(io::ErrorKind::WouldBlock.into())
}
match self.io.get_ref().accept_std() {
Ok(pair) => Ok(pair),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.io.need_read()?;
}
Err(e)
}
}
}
/// Create a new TCP listener from the standard library's TCP listener.
///
/// This method can be used when the `Handle::tcp_listen` method isn't
/// sufficient because perhaps some more configuration is needed in terms of
/// before the calls to `bind` and `listen`.
///
/// This API is typically paired with the `net2` crate and the `TcpBuilder`
/// type to build up and customize a listener before it's shipped off to the
/// backing event loop. This allows configuration of options like
/// `SO_REUSEPORT`, binding to multiple addresses, etc.
///
/// The `addr` argument here is one of the addresses that `listener` is
/// bound to and the listener will only be guaranteed to accept connections
/// of the same address type currently.
///
/// Finally, the `handle` argument is the event loop that this listener will
/// be bound to.
///
/// The platform specific behavior of this function looks like:
///
/// * On Unix, the socket is placed into nonblocking mode and connections
/// can be accepted as normal
///
/// * On Windows, the address is stored internally and all future accepts
/// will only be for the same IP version as `addr` specified. That is, if
/// `addr` is an IPv4 address then all sockets accepted will be IPv4 as
/// well (same for IPv6).
pub fn from_std(listener: net::TcpListener,
handle: &Handle) -> io::Result<TcpListener> {
let l = mio::net::TcpListener::from_std(listener)?;
TcpListener::new(l, handle)
}
fn new(listener: mio::net::TcpListener, handle: &Handle)
-> io::Result<TcpListener> {
let io = try!(PollEvented::new(listener, handle));
Ok(TcpListener { io: io })
}
/// Test whether this socket is ready to be read or not.
///
/// # Panics
///
/// This function will panic if called outside the context of a future's
/// task.
pub fn poll_read(&self) -> Async<()> {
self.io.poll_read()
}
/// Returns the local address that this listener is bound to.
///
/// This can be useful, for example, when binding to port 0 to figure out
/// which port was actually bound.
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.io.get_ref().local_addr()
}
/// Consumes this listener, returning a stream of the sockets this listener
/// accepts.
///
/// This method returns an implementation of the `Stream` trait which
/// resolves to the sockets the are accepted on this listener.
pub fn incoming(self) -> Incoming {
Incoming { inner: self }
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`].
///
/// [`set_ttl`]: #method.set_ttl
pub fn ttl(&self) -> io::Result<u32> {
self.io.get_ref().ttl()
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.io.get_ref().set_ttl(ttl)
}
}
impl fmt::Debug for TcpListener {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.io.get_ref().fmt(f)
}
}
impl Stream for Incoming {
type Item = (TcpStream, SocketAddr);
type Error = io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, io::Error> {
Ok(Async::Ready(Some(try_nb!(self.inner.accept()))))
}
}
/// An I/O object representing a TCP stream connected to a remote endpoint.
///
/// A TCP stream can either be created by connecting to an endpoint, via the
/// [`connect`] method, or by [accepting] a connection from a [listener].
///
/// [`connect`]: struct.TcpStream.html#method.connect
/// [accepting]: struct.TcpListener.html#method.accept
/// [listener]: struct.TcpListener.html
pub struct TcpStream {
io: PollEvented<mio::net::TcpStream>,
}
/// Future returned by `TcpStream::connect` which will resolve to a `TcpStream`
/// when the stream is connected.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct TcpStreamNew {
inner: TcpStreamNewState,
}
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
enum TcpStreamNewState {
Waiting(TcpStream),
Error(io::Error),
Empty,
}
impl TcpStream {
/// Create a new TCP stream connected to the specified address.
///
/// This function will create a new TCP socket and attempt to connect it to
/// the `addr` provided. The returned future will be resolved once the
/// stream has successfully connected, or it wil return an error if one
/// occurs.
pub fn connect(addr: &SocketAddr) -> TcpStreamNew {
let inner = match mio::net::TcpStream::connect(addr) {
Ok(tcp) => TcpStream::new(tcp, &Handle::default()),
Err(e) => TcpStreamNewState::Error(e),
};
TcpStreamNew { inner: inner }
}
fn new(connected_stream: mio::net::TcpStream, handle: &Handle)
-> TcpStreamNewState {
match PollEvented::new(connected_stream, handle) {
Ok(io) => TcpStreamNewState::Waiting(TcpStream { io: io }),
Err(e) => TcpStreamNewState::Error(e),
}
}
/// Create a new `TcpStream` from a `net::TcpStream`.
///
/// This function will convert a TCP stream created by the standard library
/// to a TCP stream ready to be used with the provided event loop handle.
/// The stream returned is associated with the event loop and ready to
/// perform I/O.
pub fn from_std(stream: net::TcpStream, handle: &Handle)
-> io::Result<TcpStream>
{
let inner = mio::net::TcpStream::from_stream(stream)?;
Ok(TcpStream {
io: try!(PollEvented::new(inner, handle)),
})
}
/// Creates a new `TcpStream` from the pending socket inside the given
/// `std::net::TcpStream`, connecting it to the address specified.
///
/// This constructor allows configuring the socket before it's actually
/// connected, and this function will transfer ownership to the returned
/// `TcpStream` if successful. An unconnected `TcpStream` can be created
/// with the `net2::TcpBuilder` type (and also configured via that route).
///
/// The platform specific behavior of this function looks like:
///
/// * On Unix, the socket is placed into nonblocking mode and then a
/// `connect` call is issued.
///
/// * On Windows, the address is stored internally and the connect operation
/// is issued when the returned `TcpStream` is registered with an event
/// loop. Note that on Windows you must `bind` a socket before it can be
/// connected, so if a custom `TcpBuilder` is used it should be bound
/// (perhaps to `INADDR_ANY`) before this method is called.
pub fn connect_std(stream: net::TcpStream,
addr: &SocketAddr,
handle: &Handle)
-> TcpStreamNew
{
let inner = match mio::net::TcpStream::connect_stream(stream, addr) {
Ok(tcp) => TcpStream::new(tcp, handle),
Err(e) => TcpStreamNewState::Error(e),
};
TcpStreamNew { inner: inner }
}
/// Test whether this stream is ready to be read or not.
///
/// If the stream is *not* readable then the current task is scheduled to
/// get a notification when the stream does become readable.
///
/// # Panics
///
/// This function will panic if called outside the context of a future's
/// task.
pub fn poll_read(&self) -> Async<()> {
self.io.poll_read()
}
/// Test whether this stream is ready to be written or not.
///
/// If the stream is *not* writable then the current task is scheduled to
/// get a notification when the stream does become writable.
///
/// # Panics
///
/// This function will panic if called outside the context of a future's
/// task.
pub fn poll_write(&self) -> Async<()> {
self.io.poll_write()
}
/// Returns the local address that this stream is bound to.
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.io.get_ref().local_addr()
}
/// Returns the remote address that this stream is connected to.
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.io.get_ref().peer_addr()
}
/// Receives data on the socket from the remote address to which it is
/// connected, without removing that data from the queue. On success,
/// returns the number of bytes peeked.
///
/// Successive calls return the same data. This is accomplished by passing
/// `MSG_PEEK` as a flag to the underlying recv system call.
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
if let Async::NotReady = self.poll_read() {
return Err(io::ErrorKind::WouldBlock.into())
}
match self.io.get_ref().peek(buf) {
Ok(v) => Ok(v),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.io.need_read()?;
Err(io::ErrorKind::WouldBlock.into())
}
Err(e) => Err(e),
}
}
/// Shuts down the read, write, or both halves of this connection.
///
/// This function will cause all pending and future I/O on the specified
/// portions to return immediately with an appropriate value (see the
/// documentation of `Shutdown`).
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.io.get_ref().shutdown(how)
}
/// Gets the value of the `TCP_NODELAY` option on this socket.
///
/// For more information about this option, see [`set_nodelay`].
///
/// [`set_nodelay`]: #method.set_nodelay
pub fn nodelay(&self) -> io::Result<bool> {
self.io.get_ref().nodelay()
}
/// Sets the value of the `TCP_NODELAY` option on this socket.
///
/// If set, this option disables the Nagle algorithm. This means that
/// segments are always sent as soon as possible, even if there is only a
/// small amount of data. When not set, data is buffered until there is a
/// sufficient amount to send out, thereby avoiding the frequent sending of
/// small packets.
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
self.io.get_ref().set_nodelay(nodelay)
}
/// Gets the value of the `SO_RCVBUF` option on this socket.
///
/// For more information about this option, see [`set_recv_buffer_size`].
///
/// [`set_recv_buffer_size`]: #tymethod.set_recv_buffer_size
pub fn recv_buffer_size(&self) -> io::Result<usize> {
self.io.get_ref().recv_buffer_size()
}
/// Sets the value of the `SO_RCVBUF` option on this socket.
///
/// Changes the size of the operating system's receive buffer associated
/// with the socket.
pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
self.io.get_ref().set_recv_buffer_size(size)
}
/// Gets the value of the `SO_SNDBUF` option on this socket.
///
/// For more information about this option, see [`set_send_buffer`].
///
/// [`set_send_buffer`]: #tymethod.set_send_buffer
pub fn send_buffer_size(&self) -> io::Result<usize> {
self.io.get_ref().send_buffer_size()
}
/// Sets the value of the `SO_SNDBUF` option on this socket.
///
/// Changes the size of the operating system's send buffer associated with
/// the socket.
pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
self.io.get_ref().set_send_buffer_size(size)
}
/// Returns whether keepalive messages are enabled on this socket, and if so
/// the duration of time between them.
///
/// For more information about this option, see [`set_keepalive`].
///
/// [`set_keepalive`]: #tymethod.set_keepalive
pub fn keepalive(&self) -> io::Result<Option<Duration>> {
self.io.get_ref().keepalive()
}
/// Sets whether keepalive messages are enabled to be sent on this socket.
///
/// On Unix, this option will set the `SO_KEEPALIVE` as well as the
/// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform).
/// On Windows, this will set the `SIO_KEEPALIVE_VALS` option.
///
/// If `None` is specified then keepalive messages are disabled, otherwise
/// the duration specified will be the time to remain idle before sending a
/// TCP keepalive probe.
///
/// Some platforms specify this value in seconds, so sub-second
/// specifications may be omitted.
pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
self.io.get_ref().set_keepalive(keepalive)
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`].
///
/// [`set_ttl`]: #tymethod.set_ttl
pub fn ttl(&self) -> io::Result<u32> {
self.io.get_ref().ttl()
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.io.get_ref().set_ttl(ttl)
}
/// Reads the linger duration for this socket by getting the `SO_LINGER`
/// option.
///
/// For more information about this option, see [`set_linger`].
///
/// [`set_linger`]: #tymethod.set_linger
pub fn linger(&self) -> io::Result<Option<Duration>> {
self.io.get_ref().linger()
}
/// Sets the linger duration of this socket by setting the `SO_LINGER`
/// option.
///
/// This option controls the action taken when a stream has unsent messages
/// and the stream is closed. If `SO_LINGER` is set, the system
/// shall block the process until it can transmit the data or until the
/// time expires.
///
/// If `SO_LINGER` is not specified, and the stream is closed, the system
/// handles the call in a way that allows the process to continue as quickly
/// as possible.
pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
self.io.get_ref().set_linger(dur)
}
}
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.io.read(buf)
}
}
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.io.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl AsyncRead for TcpStream {
unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
false
}
fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
<&TcpStream>::read_buf(&mut &*self, buf)
}
}
impl AsyncWrite for TcpStream {
fn shutdown(&mut self) -> Poll<(), io::Error> {
<&TcpStream>::shutdown(&mut &*self)
}
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
<&TcpStream>::write_buf(&mut &*self, buf)
}
}
impl<'a> Read for &'a TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(&self.io).read(buf)
}
}
impl<'a> Write for &'a TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
(&self.io).write(buf)
}
fn flush(&mut self) -> io::Result<()> {
(&self.io).flush()
}
}
impl<'a> AsyncRead for &'a TcpStream {
unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
false
}
fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
if let Async::NotReady = <TcpStream>::poll_read(self) {
return Ok(Async::NotReady)
}
let r = unsafe {
// The `IoVec` type can't have a 0-length size, so we create a bunch
// of dummy versions on the stack with 1 length which we'll quickly
// overwrite.
let b1: &mut [u8] = &mut [0];
let b2: &mut [u8] = &mut [0];
let b3: &mut [u8] = &mut [0];
let b4: &mut [u8] = &mut [0];
let b5: &mut [u8] = &mut [0];
let b6: &mut [u8] = &mut [0];
let b7: &mut [u8] = &mut [0];
let b8: &mut [u8] = &mut [0];
let b9: &mut [u8] = &mut [0];
let b10: &mut [u8] = &mut [0];
let b11: &mut [u8] = &mut [0];
let b12: &mut [u8] = &mut [0];
let b13: &mut [u8] = &mut [0];
let b14: &mut [u8] = &mut [0];
let b15: &mut [u8] = &mut [0];
let b16: &mut [u8] = &mut [0];
let mut bufs: [&mut IoVec; 16] = [
b1.into(), b2.into(), b3.into(), b4.into(),
b5.into(), b6.into(), b7.into(), b8.into(),
b9.into(), b10.into(), b11.into(), b12.into(),
b13.into(), b14.into(), b15.into(), b16.into(),
];
let n = buf.bytes_vec_mut(&mut bufs);
self.io.get_ref().read_bufs(&mut bufs[..n])
};
match r {
Ok(n) => {
unsafe { buf.advance_mut(n); }
Ok(Async::Ready(n))
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.io.need_read()?;
Ok(Async::NotReady)
}
Err(e) => Err(e),
}
}
}
impl<'a> AsyncWrite for &'a TcpStream {
fn shutdown(&mut self) -> Poll<(), io::Error> {
Ok(().into())
}
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
if let Async::NotReady = <TcpStream>::poll_write(self) {
return Ok(Async::NotReady)
}
let r = {
// The `IoVec` type can't have a zero-length size, so create a dummy
// version from a 1-length slice which we'll overwrite with the
// `bytes_vec` method.
static DUMMY: &[u8] = &[0];
let iovec = <&IoVec>::from(DUMMY);
let mut bufs = [iovec; 64];
let n = buf.bytes_vec(&mut bufs);
self.io.get_ref().write_bufs(&bufs[..n])
};
match r {
Ok(n) => {
buf.advance(n);
Ok(Async::Ready(n))
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
self.io.need_write()?;
Ok(Async::NotReady)
}
Err(e) => Err(e),
}
}
}
impl fmt::Debug for TcpStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.io.get_ref().fmt(f)
}
}
impl Future for TcpStreamNew {
type Item = TcpStream;
type Error = io::Error;
fn poll(&mut self) -> Poll<TcpStream, io::Error> {
self.inner.poll()
}
}
impl Future for TcpStreamNewState {
type Item = TcpStream;
type Error = io::Error;
fn poll(&mut self) -> Poll<TcpStream, io::Error> {
{
let stream = match *self {
TcpStreamNewState::Waiting(ref s) => s,
TcpStreamNewState::Error(_) => {
let e = match mem::replace(self, TcpStreamNewState::Empty) {
TcpStreamNewState::Error(e) => e,
_ => panic!(),
};
return Err(e)
}
TcpStreamNewState::Empty => panic!("can't poll TCP stream twice"),
};
// Once we've connected, wait for the stream to be writable as
// that's when the actual connection has been initiated. Once we're
// writable we check for `take_socket_error` to see if the connect
// actually hit an error or not.
//
// If all that succeeded then we ship everything on up.
if let Async::NotReady = stream.io.poll_write() {
return Ok(Async::NotReady)
}
if let Some(e) = try!(stream.io.get_ref().take_error()) {
return Err(e)
}
}
match mem::replace(self, TcpStreamNewState::Empty) {
TcpStreamNewState::Waiting(stream) => Ok(Async::Ready(stream)),
_ => panic!(),
}
}
}
#[cfg(all(unix, not(target_os = "fuchsia")))]
mod sys {
use std::os::unix::prelude::*;
use super::{TcpStream, TcpListener};
impl AsRawFd for TcpStream {
fn as_raw_fd(&self) -> RawFd {
self.io.get_ref().as_raw_fd()
}
}
impl AsRawFd for TcpListener {
fn as_raw_fd(&self) -> RawFd {
self.io.get_ref().as_raw_fd()
}
}
}
#[cfg(windows)]
mod sys {
// TODO: let's land these upstream with mio and then we can add them here.
//
// use std::os::windows::prelude::*;
// use super::{TcpStream, TcpListener};
//
// impl AsRawHandle for TcpStream {
// fn as_raw_handle(&self) -> RawHandle {
// self.io.get_ref().as_raw_handle()
// }
// }
//
// impl AsRawHandle for TcpListener {
// fn as_raw_handle(&self) -> RawHandle {
// self.listener.io().as_raw_handle()
// }
// }
}
|
use colours::Colours;
use dir::Dir;
use file::File;
use column::Column;
use column::Column::*;
use feature::Attribute;
use output::{Grid, Details, Lines};
use term::dimensions;
use std::cmp::Ordering;
use std::fmt;
use std::num::ParseIntError;
use std::os::unix::fs::MetadataExt;
use getopts;
use natord;
use self::Misfire::*;
/// The *Options* struct represents a parsed version of the user's
/// command-line options.
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct Options {
pub dir_action: DirAction,
pub filter: FileFilter,
pub view: View,
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct FileFilter {
list_dirs_first: bool,
reverse: bool,
show_invisibles: bool,
sort_field: SortField,
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum View {
Details(Details),
Lines(Lines),
Grid(Grid),
}
impl Options {
/// Call getopts on the given slice of command-line strings.
pub fn getopts(args: &[String]) -> Result<(Options, Vec<String>), Misfire> {
let mut opts = getopts::Options::new();
opts.optflag("1", "oneline", "display one entry per line");
opts.optflag("a", "all", "show dot-files");
opts.optflag("b", "binary", "use binary prefixes in file sizes");
opts.optflag("B", "bytes", "list file sizes in bytes, without prefixes");
opts.optflag("d", "list-dirs", "list directories as regular files");
opts.optflag("g", "group", "show group as well as user");
opts.optflag("", "group-directories-first", "list directories before other files");
opts.optflag("h", "header", "show a header row at the top");
opts.optflag("H", "links", "show number of hard links");
opts.optflag("i", "inode", "show each file's inode number");
opts.optflag("l", "long", "display extended details and attributes");
opts.optopt ("L", "level", "maximum depth of recursion", "DEPTH");
opts.optflag("m", "modified", "display timestamp of most recent modification");
opts.optflag("r", "reverse", "reverse order of files");
opts.optflag("R", "recurse", "recurse into directories");
opts.optopt ("s", "sort", "field to sort by", "WORD");
opts.optflag("S", "blocks", "show number of file system blocks");
opts.optopt ("t", "time", "which timestamp to show for a file", "WORD");
opts.optflag("T", "tree", "recurse into subdirectories in a tree view");
opts.optflag("u", "accessed", "display timestamp of last access for a file");
opts.optflag("U", "created", "display timestamp of creation for a file");
opts.optflag("x", "across", "sort multi-column view entries across");
opts.optflag("", "version", "display version of exa");
opts.optflag("?", "help", "show list of command-line options");
if cfg!(feature="git") {
opts.optflag("", "git", "show git status");
}
if Attribute::feature_implemented() {
opts.optflag("@", "extended", "display extended attribute keys and sizes in long (-l) output");
}
let matches = match opts.parse(args) {
Ok(m) => m,
Err(e) => return Err(Misfire::InvalidOptions(e)),
};
if matches.opt_present("help") {
return Err(Misfire::Help(opts.usage("Usage:\n exa [options] [files...]")));
}
else if matches.opt_present("version") {
return Err(Misfire::Version);
}
let sort_field = match matches.opt_str("sort") {
Some(word) => try!(SortField::from_word(word)),
None => SortField::default(),
};
let filter = FileFilter {
list_dirs_first: matches.opt_present("group-directories-first"),
reverse: matches.opt_present("reverse"),
show_invisibles: matches.opt_present("all"),
sort_field: sort_field,
};
let path_strs = if matches.free.is_empty() {
vec![ ".".to_string() ]
}
else {
matches.free.clone()
};
let dir_action = try!(DirAction::deduce(&matches));
let view = try!(View::deduce(&matches, filter, dir_action));
Ok((Options {
dir_action: dir_action,
view: view,
filter: filter,
}, path_strs))
}
pub fn transform_files(&self, files: &mut Vec<File>) {
self.filter.transform_files(files)
}
}
impl FileFilter {
/// Transform the files (sorting, reversing, filtering) before listing them.
pub fn transform_files(&self, files: &mut Vec<File>) {
if !self.show_invisibles {
files.retain(|f| !f.is_dotfile());
}
match self.sort_field {
SortField::Unsorted => {},
SortField::Name => files.sort_by(|a, b| natord::compare(&*a.name, &*b.name)),
SortField::Size => files.sort_by(|a, b| a.metadata.len().cmp(&b.metadata.len())),
SortField::FileInode => files.sort_by(|a, b| a.metadata.as_raw().ino().cmp(&b.metadata.as_raw().ino())),
SortField::ModifiedDate => files.sort_by(|a, b| a.metadata.as_raw().mtime().cmp(&b.metadata.as_raw().mtime())),
SortField::AccessedDate => files.sort_by(|a, b| a.metadata.as_raw().atime().cmp(&b.metadata.as_raw().atime())),
SortField::CreatedDate => files.sort_by(|a, b| a.metadata.as_raw().ctime().cmp(&b.metadata.as_raw().ctime())),
SortField::Extension => files.sort_by(|a, b| match a.ext.cmp(&b.ext) {
Ordering::Equal => natord::compare(&*a.name, &*b.name),
order => order,
}),
}
if self.reverse {
files.reverse();
}
if self.list_dirs_first {
// This relies on the fact that sort_by is stable.
files.sort_by(|a, b| b.is_directory().cmp(&a.is_directory()));
}
}
}
/// User-supplied field to sort by.
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum SortField {
Unsorted, Name, Extension, Size, FileInode,
ModifiedDate, AccessedDate, CreatedDate,
}
impl Default for SortField {
fn default() -> SortField {
SortField::Name
}
}
impl SortField {
/// Find which field to use based on a user-supplied word.
fn from_word(word: String) -> Result<SortField, Misfire> {
match &word[..] {
"name" | "filename" => Ok(SortField::Name),
"size" | "filesize" => Ok(SortField::Size),
"ext" | "extension" => Ok(SortField::Extension),
"mod" | "modified" => Ok(SortField::ModifiedDate),
"acc" | "accessed" => Ok(SortField::AccessedDate),
"cr" | "created" => Ok(SortField::CreatedDate),
"none" => Ok(SortField::Unsorted),
"inode" => Ok(SortField::FileInode),
field => Err(SortField::none(field))
}
}
/// How to display an error when the word didn't match with anything.
fn none(field: &str) -> Misfire {
Misfire::InvalidOptions(getopts::Fail::UnrecognizedOption(format!("--sort {}", field)))
}
}
/// One of these things could happen instead of listing files.
#[derive(PartialEq, Debug)]
pub enum Misfire {
/// The getopts crate didn't like these arguments.
InvalidOptions(getopts::Fail),
/// The user asked for help. This isn't strictly an error, which is why
/// this enum isn't named Error!
Help(String),
/// The user wanted the version number.
Version,
/// Two options were given that conflict with one another.
Conflict(&'static str, &'static str),
/// An option was given that does nothing when another one either is or
/// isn't present.
Useless(&'static str, bool, &'static str),
/// An option was given that does nothing when either of two other options
/// are not present.
Useless2(&'static str, &'static str, &'static str),
/// A numeric option was given that failed to be parsed as a number.
FailedParse(ParseIntError),
}
impl Misfire {
/// The OS return code this misfire should signify.
pub fn error_code(&self) -> i32 {
if let Help(_) = *self { 2 }
else { 3 }
}
}
impl fmt::Display for Misfire {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
InvalidOptions(ref e) => write!(f, "{}", e),
Help(ref text) => write!(f, "{}", text),
Version => write!(f, "exa {}", env!("CARGO_PKG_VERSION")),
Conflict(a, b) => write!(f, "Option --{} conflicts with option {}.", a, b),
Useless(a, false, b) => write!(f, "Option --{} is useless without option --{}.", a, b),
Useless(a, true, b) => write!(f, "Option --{} is useless given option --{}.", a, b),
Useless2(a, b1, b2) => write!(f, "Option --{} is useless without options --{} or --{}.", a, b1, b2),
FailedParse(ref e) => write!(f, "Failed to parse number: {}", e),
}
}
}
impl View {
pub fn deduce(matches: &getopts::Matches, filter: FileFilter, dir_action: DirAction) -> Result<View, Misfire> {
if matches.opt_present("long") {
if matches.opt_present("across") {
Err(Misfire::Useless("across", true, "long"))
}
else if matches.opt_present("oneline") {
Err(Misfire::Useless("oneline", true, "long"))
}
else {
let details = Details {
columns: try!(Columns::deduce(matches)),
header: matches.opt_present("header"),
recurse: dir_action.recurse_options().map(|o| (o, filter)),
xattr: Attribute::feature_implemented() && matches.opt_present("extended"),
colours: if dimensions().is_some() { Colours::colourful() } else { Colours::plain() },
};
Ok(View::Details(details))
}
}
else if matches.opt_present("binary") {
Err(Misfire::Useless("binary", false, "long"))
}
else if matches.opt_present("bytes") {
Err(Misfire::Useless("bytes", false, "long"))
}
else if matches.opt_present("inode") {
Err(Misfire::Useless("inode", false, "long"))
}
else if matches.opt_present("links") {
Err(Misfire::Useless("links", false, "long"))
}
else if matches.opt_present("header") {
Err(Misfire::Useless("header", false, "long"))
}
else if matches.opt_present("blocks") {
Err(Misfire::Useless("blocks", false, "long"))
}
else if cfg!(feature="git") && matches.opt_present("git") {
Err(Misfire::Useless("git", false, "long"))
}
else if matches.opt_present("time") {
Err(Misfire::Useless("time", false, "long"))
}
else if matches.opt_present("tree") {
Err(Misfire::Useless("tree", false, "long"))
}
else if matches.opt_present("group") {
Err(Misfire::Useless("group", false, "long"))
}
else if matches.opt_present("level") && !matches.opt_present("recurse") {
Err(Misfire::Useless2("level", "recurse", "tree"))
}
else if Attribute::feature_implemented() && matches.opt_present("extended") {
Err(Misfire::Useless("extended", false, "long"))
}
else if let Some((width, _)) = dimensions() {
if matches.opt_present("oneline") {
if matches.opt_present("across") {
Err(Misfire::Useless("across", true, "oneline"))
}
else {
let lines = Lines {
colours: Colours::colourful(),
};
Ok(View::Lines(lines))
}
}
else {
let grid = Grid {
across: matches.opt_present("across"),
console_width: width,
colours: Colours::colourful(),
};
Ok(View::Grid(grid))
}
}
else {
// If the terminal width couldn't be matched for some reason, such
// as the program's stdout being connected to a file, then
// fallback to the lines view.
let lines = Lines {
colours: Colours::plain(),
};
Ok(View::Lines(lines))
}
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum SizeFormat {
DecimalBytes,
BinaryBytes,
JustBytes,
}
impl Default for SizeFormat {
fn default() -> SizeFormat {
SizeFormat::DecimalBytes
}
}
impl SizeFormat {
pub fn deduce(matches: &getopts::Matches) -> Result<SizeFormat, Misfire> {
let binary = matches.opt_present("binary");
let bytes = matches.opt_present("bytes");
match (binary, bytes) {
(true, true ) => Err(Misfire::Conflict("binary", "bytes")),
(true, false) => Ok(SizeFormat::BinaryBytes),
(false, true ) => Ok(SizeFormat::JustBytes),
(false, false) => Ok(SizeFormat::DecimalBytes),
}
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum TimeType {
FileAccessed,
FileModified,
FileCreated,
}
impl TimeType {
pub fn header(&self) -> &'static str {
match *self {
TimeType::FileAccessed => "Date Accessed",
TimeType::FileModified => "Date Modified",
TimeType::FileCreated => "Date Created",
}
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct TimeTypes {
accessed: bool,
modified: bool,
created: bool,
}
impl Default for TimeTypes {
fn default() -> TimeTypes {
TimeTypes { accessed: false, modified: true, created: false }
}
}
impl TimeTypes {
/// Find which field to use based on a user-supplied word.
fn deduce(matches: &getopts::Matches) -> Result<TimeTypes, Misfire> {
let possible_word = matches.opt_str("time");
let modified = matches.opt_present("modified");
let created = matches.opt_present("created");
let accessed = matches.opt_present("accessed");
if let Some(word) = possible_word {
if modified {
return Err(Misfire::Useless("modified", true, "time"));
}
else if created {
return Err(Misfire::Useless("created", true, "time"));
}
else if accessed {
return Err(Misfire::Useless("accessed", true, "time"));
}
match &word[..] {
"mod" | "modified" => Ok(TimeTypes { accessed: false, modified: true, created: false }),
"acc" | "accessed" => Ok(TimeTypes { accessed: true, modified: false, created: false }),
"cr" | "created" => Ok(TimeTypes { accessed: false, modified: false, created: true }),
field => Err(TimeTypes::none(field)),
}
}
else {
if modified || created || accessed {
Ok(TimeTypes { accessed: accessed, modified: modified, created: created })
}
else {
Ok(TimeTypes::default())
}
}
}
/// How to display an error when the word didn't match with anything.
fn none(field: &str) -> Misfire {
Misfire::InvalidOptions(getopts::Fail::UnrecognizedOption(format!("--time {}", field)))
}
}
/// What to do when encountering a directory?
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum DirAction {
AsFile,
List,
Recurse(RecurseOptions),
}
impl DirAction {
pub fn deduce(matches: &getopts::Matches) -> Result<DirAction, Misfire> {
let recurse = matches.opt_present("recurse");
let list = matches.opt_present("list-dirs");
let tree = matches.opt_present("tree");
match (recurse, list, tree) {
(true, true, _ ) => Err(Misfire::Conflict("recurse", "list-dirs")),
(_, true, true ) => Err(Misfire::Conflict("tree", "list-dirs")),
(true, false, false) => Ok(DirAction::Recurse(try!(RecurseOptions::deduce(matches, false)))),
(_ , _, true ) => Ok(DirAction::Recurse(try!(RecurseOptions::deduce(matches, true)))),
(false, true, _ ) => Ok(DirAction::AsFile),
(false, false, _ ) => Ok(DirAction::List),
}
}
pub fn recurse_options(&self) -> Option<RecurseOptions> {
match *self {
DirAction::Recurse(opts) => Some(opts),
_ => None,
}
}
pub fn is_as_file(&self) -> bool {
match *self {
DirAction::AsFile => true,
_ => false,
}
}
pub fn is_tree(&self) -> bool {
match *self {
DirAction::Recurse(RecurseOptions { max_depth: _, tree }) => tree,
_ => false,
}
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct RecurseOptions {
pub tree: bool,
pub max_depth: Option<usize>,
}
impl RecurseOptions {
pub fn deduce(matches: &getopts::Matches, tree: bool) -> Result<RecurseOptions, Misfire> {
let max_depth = if let Some(level) = matches.opt_str("level") {
match level.parse() {
Ok(l) => Some(l),
Err(e) => return Err(Misfire::FailedParse(e)),
}
}
else {
None
};
Ok(RecurseOptions {
tree: tree,
max_depth: max_depth,
})
}
pub fn is_too_deep(&self, depth: usize) -> bool {
match self.max_depth {
None => false,
Some(d) => {
d <= depth
}
}
}
}
#[derive(PartialEq, Copy, Clone, Debug, Default)]
pub struct Columns {
size_format: SizeFormat,
time_types: TimeTypes,
inode: bool,
links: bool,
blocks: bool,
group: bool,
git: bool
}
impl Columns {
pub fn deduce(matches: &getopts::Matches) -> Result<Columns, Misfire> {
Ok(Columns {
size_format: try!(SizeFormat::deduce(matches)),
time_types: try!(TimeTypes::deduce(matches)),
inode: matches.opt_present("inode"),
links: matches.opt_present("links"),
blocks: matches.opt_present("blocks"),
group: matches.opt_present("group"),
git: cfg!(feature="git") && matches.opt_present("git"),
})
}
pub fn for_dir(&self, dir: Option<&Dir>) -> Vec<Column> {
let mut columns = vec![];
if self.inode {
columns.push(Inode);
}
columns.push(Permissions);
if self.links {
columns.push(HardLinks);
}
columns.push(FileSize(self.size_format));
if self.blocks {
columns.push(Blocks);
}
columns.push(User);
if self.group {
columns.push(Group);
}
if self.time_types.modified {
columns.push(Timestamp(TimeType::FileModified));
}
if self.time_types.created {
columns.push(Timestamp(TimeType::FileCreated));
}
if self.time_types.accessed {
columns.push(Timestamp(TimeType::FileAccessed));
}
if cfg!(feature="git") {
if let Some(d) = dir {
if self.git && d.has_git_repo() {
columns.push(GitStatus);
}
}
}
columns
}
}
#[cfg(test)]
mod test {
use super::Options;
use super::Misfire;
use super::Misfire::*;
use feature::Attribute;
fn is_helpful<T>(misfire: Result<T, Misfire>) -> bool {
match misfire {
Err(Help(_)) => true,
_ => false,
}
}
#[test]
fn help() {
let opts = Options::getopts(&[ "--help".to_string() ]);
assert!(is_helpful(opts))
}
#[test]
fn help_with_file() {
let opts = Options::getopts(&[ "--help".to_string(), "me".to_string() ]);
assert!(is_helpful(opts))
}
#[test]
fn files() {
let args = Options::getopts(&[ "this file".to_string(), "that file".to_string() ]).unwrap().1;
assert_eq!(args, vec![ "this file".to_string(), "that file".to_string() ])
}
#[test]
fn no_args() {
let args = Options::getopts(&[]).unwrap().1;
assert_eq!(args, vec![ ".".to_string() ])
}
#[test]
fn file_sizes() {
let opts = Options::getopts(&[ "--long".to_string(), "--binary".to_string(), "--bytes".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Conflict("binary", "bytes"))
}
#[test]
fn just_binary() {
let opts = Options::getopts(&[ "--binary".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("binary", false, "long"))
}
#[test]
fn just_bytes() {
let opts = Options::getopts(&[ "--bytes".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("bytes", false, "long"))
}
#[test]
fn long_across() {
let opts = Options::getopts(&[ "--long".to_string(), "--across".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("across", true, "long"))
}
#[test]
fn oneline_across() {
let opts = Options::getopts(&[ "--oneline".to_string(), "--across".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("across", true, "oneline"))
}
#[test]
fn just_header() {
let opts = Options::getopts(&[ "--header".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("header", false, "long"))
}
#[test]
fn just_group() {
let opts = Options::getopts(&[ "--group".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("group", false, "long"))
}
#[test]
fn just_inode() {
let opts = Options::getopts(&[ "--inode".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("inode", false, "long"))
}
#[test]
fn just_links() {
let opts = Options::getopts(&[ "--links".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("links", false, "long"))
}
#[test]
fn just_blocks() {
let opts = Options::getopts(&[ "--blocks".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("blocks", false, "long"))
}
#[test]
#[cfg(feature="git")]
fn just_git() {
let opts = Options::getopts(&[ "--git".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("git", false, "long"))
}
#[test]
fn extended_without_long() {
if Attribute::feature_implemented() {
let opts = Options::getopts(&[ "--extended".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("extended", false, "long"))
}
}
#[test]
fn level_without_recurse_or_tree() {
let opts = Options::getopts(&[ "--level".to_string(), "69105".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless2("level", "recurse", "tree"))
}
}
Minuscule code cleanup
use colours::Colours;
use dir::Dir;
use file::File;
use column::Column;
use column::Column::*;
use feature::Attribute;
use output::{Grid, Details, Lines};
use term::dimensions;
use std::cmp::Ordering;
use std::fmt;
use std::num::ParseIntError;
use std::os::unix::fs::MetadataExt;
use getopts;
use natord;
use self::Misfire::*;
/// The *Options* struct represents a parsed version of the user's
/// command-line options.
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct Options {
pub dir_action: DirAction,
pub filter: FileFilter,
pub view: View,
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct FileFilter {
list_dirs_first: bool,
reverse: bool,
show_invisibles: bool,
sort_field: SortField,
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum View {
Details(Details),
Lines(Lines),
Grid(Grid),
}
impl Options {
/// Call getopts on the given slice of command-line strings.
pub fn getopts(args: &[String]) -> Result<(Options, Vec<String>), Misfire> {
let mut opts = getopts::Options::new();
opts.optflag("1", "oneline", "display one entry per line");
opts.optflag("a", "all", "show dot-files");
opts.optflag("b", "binary", "use binary prefixes in file sizes");
opts.optflag("B", "bytes", "list file sizes in bytes, without prefixes");
opts.optflag("d", "list-dirs", "list directories as regular files");
opts.optflag("g", "group", "show group as well as user");
opts.optflag("", "group-directories-first", "list directories before other files");
opts.optflag("h", "header", "show a header row at the top");
opts.optflag("H", "links", "show number of hard links");
opts.optflag("i", "inode", "show each file's inode number");
opts.optflag("l", "long", "display extended details and attributes");
opts.optopt ("L", "level", "maximum depth of recursion", "DEPTH");
opts.optflag("m", "modified", "display timestamp of most recent modification");
opts.optflag("r", "reverse", "reverse order of files");
opts.optflag("R", "recurse", "recurse into directories");
opts.optopt ("s", "sort", "field to sort by", "WORD");
opts.optflag("S", "blocks", "show number of file system blocks");
opts.optopt ("t", "time", "which timestamp to show for a file", "WORD");
opts.optflag("T", "tree", "recurse into subdirectories in a tree view");
opts.optflag("u", "accessed", "display timestamp of last access for a file");
opts.optflag("U", "created", "display timestamp of creation for a file");
opts.optflag("x", "across", "sort multi-column view entries across");
opts.optflag("", "version", "display version of exa");
opts.optflag("?", "help", "show list of command-line options");
if cfg!(feature="git") {
opts.optflag("", "git", "show git status");
}
if Attribute::feature_implemented() {
opts.optflag("@", "extended", "display extended attribute keys and sizes in long (-l) output");
}
let matches = match opts.parse(args) {
Ok(m) => m,
Err(e) => return Err(Misfire::InvalidOptions(e)),
};
if matches.opt_present("help") {
return Err(Misfire::Help(opts.usage("Usage:\n exa [options] [files...]")));
}
else if matches.opt_present("version") {
return Err(Misfire::Version);
}
let sort_field = match matches.opt_str("sort") {
Some(word) => try!(SortField::from_word(word)),
None => SortField::default(),
};
let filter = FileFilter {
list_dirs_first: matches.opt_present("group-directories-first"),
reverse: matches.opt_present("reverse"),
show_invisibles: matches.opt_present("all"),
sort_field: sort_field,
};
let path_strs = if matches.free.is_empty() {
vec![ ".".to_string() ]
}
else {
matches.free.clone()
};
let dir_action = try!(DirAction::deduce(&matches));
let view = try!(View::deduce(&matches, filter, dir_action));
Ok((Options {
dir_action: dir_action,
view: view,
filter: filter,
}, path_strs))
}
pub fn transform_files(&self, files: &mut Vec<File>) {
self.filter.transform_files(files)
}
}
impl FileFilter {
/// Transform the files (sorting, reversing, filtering) before listing them.
pub fn transform_files(&self, files: &mut Vec<File>) {
if !self.show_invisibles {
files.retain(|f| !f.is_dotfile());
}
match self.sort_field {
SortField::Unsorted => {},
SortField::Name => files.sort_by(|a, b| natord::compare(&*a.name, &*b.name)),
SortField::Size => files.sort_by(|a, b| a.metadata.len().cmp(&b.metadata.len())),
SortField::FileInode => files.sort_by(|a, b| a.metadata.as_raw().ino().cmp(&b.metadata.as_raw().ino())),
SortField::ModifiedDate => files.sort_by(|a, b| a.metadata.as_raw().mtime().cmp(&b.metadata.as_raw().mtime())),
SortField::AccessedDate => files.sort_by(|a, b| a.metadata.as_raw().atime().cmp(&b.metadata.as_raw().atime())),
SortField::CreatedDate => files.sort_by(|a, b| a.metadata.as_raw().ctime().cmp(&b.metadata.as_raw().ctime())),
SortField::Extension => files.sort_by(|a, b| match a.ext.cmp(&b.ext) {
Ordering::Equal => natord::compare(&*a.name, &*b.name),
order => order,
}),
}
if self.reverse {
files.reverse();
}
if self.list_dirs_first {
// This relies on the fact that sort_by is stable.
files.sort_by(|a, b| b.is_directory().cmp(&a.is_directory()));
}
}
}
/// User-supplied field to sort by.
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum SortField {
Unsorted, Name, Extension, Size, FileInode,
ModifiedDate, AccessedDate, CreatedDate,
}
impl Default for SortField {
fn default() -> SortField {
SortField::Name
}
}
impl SortField {
/// Find which field to use based on a user-supplied word.
fn from_word(word: String) -> Result<SortField, Misfire> {
match &word[..] {
"name" | "filename" => Ok(SortField::Name),
"size" | "filesize" => Ok(SortField::Size),
"ext" | "extension" => Ok(SortField::Extension),
"mod" | "modified" => Ok(SortField::ModifiedDate),
"acc" | "accessed" => Ok(SortField::AccessedDate),
"cr" | "created" => Ok(SortField::CreatedDate),
"none" => Ok(SortField::Unsorted),
"inode" => Ok(SortField::FileInode),
field => Err(SortField::none(field))
}
}
/// How to display an error when the word didn't match with anything.
fn none(field: &str) -> Misfire {
Misfire::InvalidOptions(getopts::Fail::UnrecognizedOption(format!("--sort {}", field)))
}
}
/// One of these things could happen instead of listing files.
#[derive(PartialEq, Debug)]
pub enum Misfire {
/// The getopts crate didn't like these arguments.
InvalidOptions(getopts::Fail),
/// The user asked for help. This isn't strictly an error, which is why
/// this enum isn't named Error!
Help(String),
/// The user wanted the version number.
Version,
/// Two options were given that conflict with one another.
Conflict(&'static str, &'static str),
/// An option was given that does nothing when another one either is or
/// isn't present.
Useless(&'static str, bool, &'static str),
/// An option was given that does nothing when either of two other options
/// are not present.
Useless2(&'static str, &'static str, &'static str),
/// A numeric option was given that failed to be parsed as a number.
FailedParse(ParseIntError),
}
impl Misfire {
/// The OS return code this misfire should signify.
pub fn error_code(&self) -> i32 {
if let Help(_) = *self { 2 }
else { 3 }
}
}
impl fmt::Display for Misfire {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
InvalidOptions(ref e) => write!(f, "{}", e),
Help(ref text) => write!(f, "{}", text),
Version => write!(f, "exa {}", env!("CARGO_PKG_VERSION")),
Conflict(a, b) => write!(f, "Option --{} conflicts with option {}.", a, b),
Useless(a, false, b) => write!(f, "Option --{} is useless without option --{}.", a, b),
Useless(a, true, b) => write!(f, "Option --{} is useless given option --{}.", a, b),
Useless2(a, b1, b2) => write!(f, "Option --{} is useless without options --{} or --{}.", a, b1, b2),
FailedParse(ref e) => write!(f, "Failed to parse number: {}", e),
}
}
}
impl View {
pub fn deduce(matches: &getopts::Matches, filter: FileFilter, dir_action: DirAction) -> Result<View, Misfire> {
if matches.opt_present("long") {
if matches.opt_present("across") {
Err(Misfire::Useless("across", true, "long"))
}
else if matches.opt_present("oneline") {
Err(Misfire::Useless("oneline", true, "long"))
}
else {
let details = Details {
columns: try!(Columns::deduce(matches)),
header: matches.opt_present("header"),
recurse: dir_action.recurse_options().map(|o| (o, filter)),
xattr: Attribute::feature_implemented() && matches.opt_present("extended"),
colours: if dimensions().is_some() { Colours::colourful() } else { Colours::plain() },
};
Ok(View::Details(details))
}
}
else if matches.opt_present("binary") {
Err(Misfire::Useless("binary", false, "long"))
}
else if matches.opt_present("bytes") {
Err(Misfire::Useless("bytes", false, "long"))
}
else if matches.opt_present("inode") {
Err(Misfire::Useless("inode", false, "long"))
}
else if matches.opt_present("links") {
Err(Misfire::Useless("links", false, "long"))
}
else if matches.opt_present("header") {
Err(Misfire::Useless("header", false, "long"))
}
else if matches.opt_present("blocks") {
Err(Misfire::Useless("blocks", false, "long"))
}
else if cfg!(feature="git") && matches.opt_present("git") {
Err(Misfire::Useless("git", false, "long"))
}
else if matches.opt_present("time") {
Err(Misfire::Useless("time", false, "long"))
}
else if matches.opt_present("tree") {
Err(Misfire::Useless("tree", false, "long"))
}
else if matches.opt_present("group") {
Err(Misfire::Useless("group", false, "long"))
}
else if matches.opt_present("level") && !matches.opt_present("recurse") {
Err(Misfire::Useless2("level", "recurse", "tree"))
}
else if Attribute::feature_implemented() && matches.opt_present("extended") {
Err(Misfire::Useless("extended", false, "long"))
}
else if let Some((width, _)) = dimensions() {
if matches.opt_present("oneline") {
if matches.opt_present("across") {
Err(Misfire::Useless("across", true, "oneline"))
}
else {
let lines = Lines {
colours: Colours::colourful(),
};
Ok(View::Lines(lines))
}
}
else {
let grid = Grid {
across: matches.opt_present("across"),
console_width: width,
colours: Colours::colourful(),
};
Ok(View::Grid(grid))
}
}
else {
// If the terminal width couldn't be matched for some reason, such
// as the program's stdout being connected to a file, then
// fallback to the lines view.
let lines = Lines {
colours: Colours::plain(),
};
Ok(View::Lines(lines))
}
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum SizeFormat {
DecimalBytes,
BinaryBytes,
JustBytes,
}
impl Default for SizeFormat {
fn default() -> SizeFormat {
SizeFormat::DecimalBytes
}
}
impl SizeFormat {
pub fn deduce(matches: &getopts::Matches) -> Result<SizeFormat, Misfire> {
let binary = matches.opt_present("binary");
let bytes = matches.opt_present("bytes");
match (binary, bytes) {
(true, true ) => Err(Misfire::Conflict("binary", "bytes")),
(true, false) => Ok(SizeFormat::BinaryBytes),
(false, true ) => Ok(SizeFormat::JustBytes),
(false, false) => Ok(SizeFormat::DecimalBytes),
}
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum TimeType {
FileAccessed,
FileModified,
FileCreated,
}
impl TimeType {
pub fn header(&self) -> &'static str {
match *self {
TimeType::FileAccessed => "Date Accessed",
TimeType::FileModified => "Date Modified",
TimeType::FileCreated => "Date Created",
}
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct TimeTypes {
accessed: bool,
modified: bool,
created: bool,
}
impl Default for TimeTypes {
fn default() -> TimeTypes {
TimeTypes { accessed: false, modified: true, created: false }
}
}
impl TimeTypes {
/// Find which field to use based on a user-supplied word.
fn deduce(matches: &getopts::Matches) -> Result<TimeTypes, Misfire> {
let possible_word = matches.opt_str("time");
let modified = matches.opt_present("modified");
let created = matches.opt_present("created");
let accessed = matches.opt_present("accessed");
if let Some(word) = possible_word {
if modified {
return Err(Misfire::Useless("modified", true, "time"));
}
else if created {
return Err(Misfire::Useless("created", true, "time"));
}
else if accessed {
return Err(Misfire::Useless("accessed", true, "time"));
}
match &word[..] {
"mod" | "modified" => Ok(TimeTypes { accessed: false, modified: true, created: false }),
"acc" | "accessed" => Ok(TimeTypes { accessed: true, modified: false, created: false }),
"cr" | "created" => Ok(TimeTypes { accessed: false, modified: false, created: true }),
field => Err(TimeTypes::none(field)),
}
}
else {
if modified || created || accessed {
Ok(TimeTypes { accessed: accessed, modified: modified, created: created })
}
else {
Ok(TimeTypes::default())
}
}
}
/// How to display an error when the word didn't match with anything.
fn none(field: &str) -> Misfire {
Misfire::InvalidOptions(getopts::Fail::UnrecognizedOption(format!("--time {}", field)))
}
}
/// What to do when encountering a directory?
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum DirAction {
AsFile,
List,
Recurse(RecurseOptions),
}
impl DirAction {
pub fn deduce(matches: &getopts::Matches) -> Result<DirAction, Misfire> {
let recurse = matches.opt_present("recurse");
let list = matches.opt_present("list-dirs");
let tree = matches.opt_present("tree");
match (recurse, list, tree) {
(true, true, _ ) => Err(Misfire::Conflict("recurse", "list-dirs")),
(_, true, true ) => Err(Misfire::Conflict("tree", "list-dirs")),
(true, false, false) => Ok(DirAction::Recurse(try!(RecurseOptions::deduce(matches, false)))),
(_ , _, true ) => Ok(DirAction::Recurse(try!(RecurseOptions::deduce(matches, true)))),
(false, true, _ ) => Ok(DirAction::AsFile),
(false, false, _ ) => Ok(DirAction::List),
}
}
pub fn recurse_options(&self) -> Option<RecurseOptions> {
match *self {
DirAction::Recurse(opts) => Some(opts),
_ => None,
}
}
pub fn is_as_file(&self) -> bool {
match *self {
DirAction::AsFile => true,
_ => false,
}
}
pub fn is_tree(&self) -> bool {
match *self {
DirAction::Recurse(RecurseOptions { tree, .. }) => tree,
_ => false,
}
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct RecurseOptions {
pub tree: bool,
pub max_depth: Option<usize>,
}
impl RecurseOptions {
pub fn deduce(matches: &getopts::Matches, tree: bool) -> Result<RecurseOptions, Misfire> {
let max_depth = if let Some(level) = matches.opt_str("level") {
match level.parse() {
Ok(l) => Some(l),
Err(e) => return Err(Misfire::FailedParse(e)),
}
}
else {
None
};
Ok(RecurseOptions {
tree: tree,
max_depth: max_depth,
})
}
pub fn is_too_deep(&self, depth: usize) -> bool {
match self.max_depth {
None => false,
Some(d) => {
d <= depth
}
}
}
}
#[derive(PartialEq, Copy, Clone, Debug, Default)]
pub struct Columns {
size_format: SizeFormat,
time_types: TimeTypes,
inode: bool,
links: bool,
blocks: bool,
group: bool,
git: bool
}
impl Columns {
pub fn deduce(matches: &getopts::Matches) -> Result<Columns, Misfire> {
Ok(Columns {
size_format: try!(SizeFormat::deduce(matches)),
time_types: try!(TimeTypes::deduce(matches)),
inode: matches.opt_present("inode"),
links: matches.opt_present("links"),
blocks: matches.opt_present("blocks"),
group: matches.opt_present("group"),
git: cfg!(feature="git") && matches.opt_present("git"),
})
}
pub fn for_dir(&self, dir: Option<&Dir>) -> Vec<Column> {
let mut columns = vec![];
if self.inode {
columns.push(Inode);
}
columns.push(Permissions);
if self.links {
columns.push(HardLinks);
}
columns.push(FileSize(self.size_format));
if self.blocks {
columns.push(Blocks);
}
columns.push(User);
if self.group {
columns.push(Group);
}
if self.time_types.modified {
columns.push(Timestamp(TimeType::FileModified));
}
if self.time_types.created {
columns.push(Timestamp(TimeType::FileCreated));
}
if self.time_types.accessed {
columns.push(Timestamp(TimeType::FileAccessed));
}
if cfg!(feature="git") {
if let Some(d) = dir {
if self.git && d.has_git_repo() {
columns.push(GitStatus);
}
}
}
columns
}
}
#[cfg(test)]
mod test {
use super::Options;
use super::Misfire;
use super::Misfire::*;
use feature::Attribute;
fn is_helpful<T>(misfire: Result<T, Misfire>) -> bool {
match misfire {
Err(Help(_)) => true,
_ => false,
}
}
#[test]
fn help() {
let opts = Options::getopts(&[ "--help".to_string() ]);
assert!(is_helpful(opts))
}
#[test]
fn help_with_file() {
let opts = Options::getopts(&[ "--help".to_string(), "me".to_string() ]);
assert!(is_helpful(opts))
}
#[test]
fn files() {
let args = Options::getopts(&[ "this file".to_string(), "that file".to_string() ]).unwrap().1;
assert_eq!(args, vec![ "this file".to_string(), "that file".to_string() ])
}
#[test]
fn no_args() {
let args = Options::getopts(&[]).unwrap().1;
assert_eq!(args, vec![ ".".to_string() ])
}
#[test]
fn file_sizes() {
let opts = Options::getopts(&[ "--long".to_string(), "--binary".to_string(), "--bytes".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Conflict("binary", "bytes"))
}
#[test]
fn just_binary() {
let opts = Options::getopts(&[ "--binary".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("binary", false, "long"))
}
#[test]
fn just_bytes() {
let opts = Options::getopts(&[ "--bytes".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("bytes", false, "long"))
}
#[test]
fn long_across() {
let opts = Options::getopts(&[ "--long".to_string(), "--across".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("across", true, "long"))
}
#[test]
fn oneline_across() {
let opts = Options::getopts(&[ "--oneline".to_string(), "--across".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("across", true, "oneline"))
}
#[test]
fn just_header() {
let opts = Options::getopts(&[ "--header".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("header", false, "long"))
}
#[test]
fn just_group() {
let opts = Options::getopts(&[ "--group".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("group", false, "long"))
}
#[test]
fn just_inode() {
let opts = Options::getopts(&[ "--inode".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("inode", false, "long"))
}
#[test]
fn just_links() {
let opts = Options::getopts(&[ "--links".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("links", false, "long"))
}
#[test]
fn just_blocks() {
let opts = Options::getopts(&[ "--blocks".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("blocks", false, "long"))
}
#[test]
#[cfg(feature="git")]
fn just_git() {
let opts = Options::getopts(&[ "--git".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("git", false, "long"))
}
#[test]
fn extended_without_long() {
if Attribute::feature_implemented() {
let opts = Options::getopts(&[ "--extended".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless("extended", false, "long"))
}
}
#[test]
fn level_without_recurse_or_tree() {
let opts = Options::getopts(&[ "--level".to_string(), "69105".to_string() ]);
assert_eq!(opts.unwrap_err(), Misfire::Useless2("level", "recurse", "tree"))
}
}
|
use std;
const WORD_SIZE: usize = 63;
const DISTANCE: usize = 23;
const GEN: &'static [u16] = &[
0b1000000000000000,
0b0100000000000000,
0b0010000000000000,
0b0001000000000000,
0b0000100000000000,
0b0000010000000000,
0b0000001000000000,
0b0000000100000000,
0b0000000010000000,
0b0000000001000000,
0b0000000000100000,
0b0000000000010000,
0b0000000000001000,
0b0000000000000100,
0b0000000000000010,
0b0000000000000001,
0b1110110001000111,
0b1001101001100100,
0b0100110100110010,
0b0010011010011001,
0b1111111100001011,
0b1001001111000010,
0b0100100111100001,
0b1100100010110111,
0b1000100000011100,
0b0100010000001110,
0b0010001000000111,
0b1111110101000100,
0b0111111010100010,
0b0011111101010001,
0b1111001111101111,
0b1001010110110000,
0b0100101011011000,
0b0010010101101100,
0b0001001010110110,
0b0000100101011011,
0b1110100011101010,
0b0111010001110101,
0b1101011001111101,
0b1000011101111001,
0b1010111111111011,
0b1011101110111010,
0b0101110111011101,
0b1100001010101001,
0b1000110100010011,
0b1010101011001110,
0b0101010101100111,
0b1100011011110100,
0b0110001101111010,
0b0011000110111101,
0b1111010010011001,
0b1001011000001011,
0b1010011101000010,
0b0101001110100001,
0b1100010110010111,
0b1000111010001100,
0b0100011101000110,
0b0010001110100011,
0b1111110110010110,
0b0111111011001011,
0b1101001100100010,
0b0110100110010001,
0b1101100010001111,
0b0000000000000011,
];
pub fn encode(word: u16) -> u64 {
GEN.iter().fold(0, |accum, row| {
accum << 1 | ((word & row).count_ones() % 2) as u64
})
}
pub fn decode(word: u64) -> Option<(u64, usize)> {
let poly = BCHDecoder::new(syndromes(word)).decode();
let errors = match poly.degree() {
Some(deg) => deg,
None => return None,
};
let locs = ErrorLocations::new(poly.coefs().iter().cloned());
let (word, count) = locs.take(errors).fold((word, 0), |(word, s), loc| {
(word ^ 1 << loc, s + 1)
});
// "If the Chien Search fails to find v roots of a error locator polynomial of degree
// v, then the error pattern is an uncorrectable error pattern" -- Lecture 17:
// Berlekamp-Massey Algorithm for Binary BCH Codes
if count == errors {
Some((word, errors))
} else {
None
}
}
// word has r_{n-1} as MSB and r_0 as LSB
fn syndromes(word: u64) -> Vec<Codeword> {
(1..DISTANCE).map(|t| {
(0..WORD_SIZE).fold(Codeword::default(), |s, b| {
if word >> b & 1 == 0 {
s
} else {
s + Codeword::for_power(b * t)
}
})
}).collect()
}
#[derive(Copy, Clone)]
struct Codeword(u8);
impl std::fmt::Debug for Codeword {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "Codeword({:?})", self.power())
}
}
impl Codeword {
pub fn new(codeword: u8) -> Codeword {
Codeword(codeword)
}
pub fn zero(&self) -> bool { self.0 == 0 }
pub fn power(&self) -> Option<usize> {
if self.zero() {
None
} else {
Some(POWERS[self.0 as usize - 1])
}
}
pub fn for_power(power: usize) -> Codeword {
Codeword::new(CODEWORDS[power % POWERS.len()])
}
pub fn invert(self) -> Codeword {
match self.power() {
Some(p) => Codeword::for_power(POWERS.len() - p),
None => panic!("divide by zero"),
}
}
}
impl Default for Codeword {
fn default() -> Self {
Codeword::new(0)
}
}
impl std::ops::Mul for Codeword {
type Output = Codeword;
fn mul(self, rhs: Codeword) -> Self::Output {
match (self.power(), rhs.power()) {
(Some(p), Some(q)) => Codeword::for_power(p + q),
_ => Codeword::default(),
}
}
}
impl std::ops::Div for Codeword {
type Output = Codeword;
fn div(self, rhs: Codeword) -> Self::Output {
match (self.power(), rhs.power()) {
// min(power) = -62 => 63+min(power) > 0
(Some(p), Some(q)) => Codeword::for_power(p + POWERS.len() - q),
(None, Some(_)) => Codeword::default(),
(_, None) => panic!("divide by zero"),
}
}
}
impl std::ops::Add for Codeword {
type Output = Codeword;
fn add(self, rhs: Codeword) -> Self::Output {
Codeword::new(self.0 ^ rhs.0)
}
}
impl std::ops::Sub for Codeword {
type Output = Codeword;
fn sub(self, rhs: Codeword) -> Self::Output {
self + rhs
}
}
impl std::cmp::PartialEq for Codeword {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl std::cmp::Eq for Codeword {}
impl std::cmp::PartialOrd for Codeword {
fn partial_cmp(&self, rhs: &Self) -> Option<std::cmp::Ordering> {
use std::cmp::Ordering::*;
match (self.power(), rhs.power()) {
(Some(p), Some(q)) => Some(p.cmp(&q)),
(Some(_), None) => Some(Greater),
(None, Some(_)) => Some(Less),
(None, None) => Some(Equal),
}
}
}
impl std::cmp::Ord for Codeword {
fn cmp(&self, rhs: &Self) -> std::cmp::Ordering {
self.partial_cmp(rhs).unwrap()
}
}
// 2t+1 = 23 => t = 11
const ERRORS: usize = 11;
const SYNDROMES: usize = 2 * ERRORS;
#[derive(Copy, Clone)]
struct Polynomial {
/// Coefficients of the polynomial.
coefs: [Codeword; SYNDROMES + 2],
/// Index into `coefs` of the degree-0 coefficient.
start: usize,
}
impl Polynomial {
pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> Polynomial {
let mut c = [Codeword::default(); SYNDROMES + 2];
for (i, coef) in coefs.enumerate() {
c[i] = c[i] + coef;
}
Polynomial {
coefs: c,
start: 0,
}
}
pub fn constant(&self) -> Codeword {
self.coefs[self.start]
}
pub fn coefs(&self) -> &[Codeword] {
&self.coefs[self.start..]
}
pub fn degree(&self) -> Option<usize> {
for (deg, coef) in self.coefs.iter().enumerate().rev() {
if !coef.zero() {
return Some(deg - self.start);
}
}
None
}
pub fn shift(mut self) -> Polynomial {
self.coefs[self.start] = Codeword::default();
self.start += 1;
self
}
fn get(&self, idx: usize) -> Codeword {
match self.coefs.get(idx) {
Some(&c) => c,
None => Codeword::default(),
}
}
pub fn coef(&self, deg: usize) -> Codeword {
self.get(self.start + deg)
}
}
impl std::fmt::Display for Polynomial {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
for (i, coef) in self.coefs().iter().enumerate() {
match coef.power() {
Some(p) => try!(write!(fmt, "a^{}*x^{} + ", p, i)),
None => {},
}
}
try!(write!(fmt, "0"));
Ok(())
}
}
impl std::ops::Add for Polynomial {
type Output = Polynomial;
fn add(mut self, rhs: Polynomial) -> Self::Output {
for i in 0..self.coefs.len() {
self.coefs[i] = self.coef(i) + rhs.coef(i);
}
self.start = 0;
self
}
}
impl std::ops::Mul<Codeword> for Polynomial {
type Output = Polynomial;
fn mul(mut self, rhs: Codeword) -> Self::Output {
for coef in self.coefs.iter_mut() {
*coef = *coef * rhs;
}
self
}
}
struct BCHDecoder {
p_cur: Polynomial,
p_saved: Polynomial,
q_cur: Polynomial,
q_saved: Polynomial,
deg_cur: usize,
deg_saved: usize,
}
impl BCHDecoder {
pub fn new(syndromes: Vec<Codeword>) -> BCHDecoder {
BCHDecoder {
q_saved: Polynomial::new(
std::iter::once(Codeword::for_power(0))
.chain(syndromes.iter().cloned())),
q_cur: Polynomial::new(syndromes.iter().cloned()),
p_saved: Polynomial::new(
(0..SYNDROMES+1).map(|_| Codeword::default())
.chain(std::iter::once(Codeword::for_power(0)))),
p_cur: Polynomial::new(
(0..SYNDROMES).map(|_| Codeword::default())
.chain(std::iter::once(Codeword::for_power(0)))),
deg_cur: 1,
deg_saved: 0,
}
}
pub fn decode(mut self) -> Polynomial {
for _ in 0..SYNDROMES {
self.step();
}
self.p_cur
}
fn step(&mut self) {
let (save, q, p, d) = if self.q_cur.constant().zero() {
self.reduce()
} else {
self.transform()
};
if save {
self.q_saved = self.q_cur;
self.p_saved = self.p_cur;
self.deg_saved = self.deg_cur;
}
self.q_cur = q;
self.p_cur = p;
self.deg_cur = d;
}
fn reduce(&mut self) -> (bool, Polynomial, Polynomial, usize) {
(
false,
self.q_cur.shift(),
self.p_cur.shift(),
2 + self.deg_cur,
)
}
fn transform(&mut self) -> (bool, Polynomial, Polynomial, usize) {
let mult = self.q_cur.constant() / self.q_saved.constant();
(
self.deg_cur >= self.deg_saved,
(self.q_cur + self.q_saved * mult).shift(),
(self.p_cur + self.p_saved * mult).shift(),
2 + std::cmp::min(self.deg_cur, self.deg_saved),
)
}
}
struct ErrorLocations {
terms: Vec<Codeword>,
pow: usize,
}
impl ErrorLocations {
// Λ(x) = coefs[0] + coefs[1]*x + coefs[2]*x^2 + ...
pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> ErrorLocations {
ErrorLocations {
terms: coefs.enumerate().map(|(p, c)| {
c / Codeword::for_power(p)
}).collect(),
pow: 0,
}
}
fn update_terms(&mut self) {
for (j, term) in self.terms.iter_mut().enumerate() {
*term = *term * Codeword::for_power(j);
}
}
fn sum_terms(&self) -> Codeword {
self.terms.iter().fold(Codeword::default(), |s, &x| {
s + x
})
}
}
impl Iterator for ErrorLocations {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
while self.pow < POWERS.len() {
let pow = self.pow;
self.pow += 1;
self.update_terms();
if self.sum_terms().zero() {
return Some(Codeword::for_power(pow).invert().power().unwrap());
}
}
None
}
}
// Maps α^i to codewords.
const CODEWORDS: &'static [u8] = &[
0b100000,
0b010000,
0b001000,
0b000100,
0b000010,
0b000001,
0b110000,
0b011000,
0b001100,
0b000110,
0b000011,
0b110001,
0b101000,
0b010100,
0b001010,
0b000101,
0b110010,
0b011001,
0b111100,
0b011110,
0b001111,
0b110111,
0b101011,
0b100101,
0b100010,
0b010001,
0b111000,
0b011100,
0b001110,
0b000111,
0b110011,
0b101001,
0b100100,
0b010010,
0b001001,
0b110100,
0b011010,
0b001101,
0b110110,
0b011011,
0b111101,
0b101110,
0b010111,
0b111011,
0b101101,
0b100110,
0b010011,
0b111001,
0b101100,
0b010110,
0b001011,
0b110101,
0b101010,
0b010101,
0b111010,
0b011101,
0b111110,
0b011111,
0b111111,
0b101111,
0b100111,
0b100011,
0b100001
];
// Maps codewords to α^i.
const POWERS: &'static [usize] = &[
5,
4,
10,
3,
15,
9,
29,
2,
34,
14,
50,
8,
37,
28,
20,
1,
25,
33,
46,
13,
53,
49,
42,
7,
17,
36,
39,
27,
55,
19,
57,
0,
62,
24,
61,
32,
23,
45,
60,
12,
31,
52,
22,
48,
44,
41,
59,
6,
11,
16,
30,
35,
51,
38,
21,
26,
47,
54,
43,
18,
40,
56,
58,
];
#[cfg(test)]
mod test {
use super::{encode, syndromes, Codeword, Polynomial, decode};
#[test]
fn test_for_power() {
assert_eq!(Codeword::for_power(0).0, 0b100000);
assert_eq!(Codeword::for_power(62).0, 0b100001);
assert_eq!(Codeword::for_power(63).0, 0b100000);
}
#[test]
fn test_add_sub() {
assert_eq!((Codeword::new(0b100000) + Codeword::new(0b010000)).0, 0b110000);
assert_eq!((Codeword::new(0b100000) - Codeword::new(0b010000)).0, 0b110000);
assert_eq!((Codeword::new(0b100001) + Codeword::new(0b100001)).0, 0b000000);
assert_eq!((Codeword::new(0b100001) - Codeword::new(0b100001)).0, 0b000000);
assert_eq!((Codeword::new(0b100001) + Codeword::new(0b110100)).0, 0b010101);
assert_eq!((Codeword::new(0b100001) - Codeword::new(0b110100)).0, 0b010101);
}
#[test]
fn test_mul() {
assert_eq!((Codeword::new(0b011000) * Codeword::new(0b101000)).0, 0b011110);
assert_eq!((Codeword::new(0b000000) * Codeword::new(0b101000)).0, 0b000000);
assert_eq!((Codeword::new(0b011000) * Codeword::new(0b000000)).0, 0b000000);
assert_eq!((Codeword::new(0b000000) * Codeword::new(0b000000)).0, 0b000000);
assert_eq!((Codeword::new(0b100001) * Codeword::new(0b100000)).0, 0b100001);
assert_eq!((Codeword::new(0b100001) * Codeword::new(0b010000)).0, 0b100000);
assert_eq!((Codeword::new(0b110011) * Codeword::new(0b110011)).0, 0b100111);
assert_eq!((Codeword::new(0b111101) * Codeword::new(0b111101)).0, 0b011001);
}
#[test]
fn test_div() {
assert_eq!((Codeword::new(0b000100) / Codeword::new(0b101000)).0, 0b111010);
assert_eq!((Codeword::new(0b000000) / Codeword::new(0b101000)).0, 0b000000);
assert_eq!((Codeword::new(0b011110) / Codeword::new(0b100000)).0, 0b011110);
assert_eq!((Codeword::new(0b011110) / Codeword::new(0b011110)).0, 0b100000);
}
#[test]
fn test_cmp() {
assert!(Codeword::new(0b100000) > Codeword::new(0b000000));
assert!(Codeword::new(0b000000) == Codeword::new(0b000000));
assert!(Codeword::new(0b010000) > Codeword::new(0b100000));
assert!(Codeword::new(0b100001) > Codeword::new(0b100000));
}
#[test]
fn test_encode() {
assert_eq!(encode(0b1111111100000000), 0b1111111100000000100100110001000011000010001100000110100001101000);
assert_eq!(encode(0b0011)&1, 0);
assert_eq!(encode(0b0101)&1, 1);
assert_eq!(encode(0b1010)&1, 1);
assert_eq!(encode(0b1100)&1, 0);
assert_eq!(encode(0b1111)&1, 0);
}
#[test]
fn test_syndromes() {
let w = encode(0b1111111100000000)>>1;
assert!(syndromes(w).iter().all(|s| s.zero()));
assert!(!syndromes(w ^ 1<<60).iter().all(|s| s.zero()));
}
#[test]
fn test_polynomial() {
let p = Polynomial::new((0..23).map(|i| {
Codeword::for_power(i)
}));
assert!(p.degree().unwrap() == 22);
assert!(p.constant() == Codeword::for_power(0));
let p = p.shift();
assert!(p.degree().unwrap() == 21);
assert!(p.constant() == Codeword::for_power(1));
let q = p.clone() * Codeword::for_power(0);
assert!(q.degree().unwrap() == 21);
assert!(q.constant() == Codeword::for_power(1));
let q = p.clone() * Codeword::for_power(2);
assert!(q.degree().unwrap() == 21);
assert!(q.constant() == Codeword::for_power(3));
let q = p.clone() + p.clone();
assert!(q.constant().zero());
for coef in q.coefs() {
assert!(coef.zero());
}
let p = Polynomial::new((4..27).map(|i| {
Codeword::for_power(i)
}));
let q = Polynomial::new((3..26).map(|i| {
Codeword::for_power(i)
}));
let r = p + q.shift();
assert!(r.coefs[0].zero());
assert!(r.coefs[1].zero());
assert!(r.coefs[2].zero());
assert!(r.coefs[3].zero());
assert!(r.coefs[4].zero());
assert!(!r.coefs[22].zero());
let p = Polynomial::new((0..2).map(|_| {
Codeword::for_power(0)
}));
let q = Polynomial::new((0..4).map(|_| {
Codeword::for_power(1)
}));
let r = p + q;
assert!(r.coef(0) == Codeword::for_power(6));
}
#[test]
fn test_decode() {
let w = encode(0b1111111100000000)>>1 ^ 0b11010011<<30;
let d = decode(w);
println!("{:?}", d);
match d {
Some((9187424089929167924, 5)) => {},
_ => panic!(),
}
}
}
Further optimize bch by using an iterator for syndromes
use std;
const WORD_SIZE: usize = 63;
const DISTANCE: usize = 23;
const GEN: &'static [u16] = &[
0b1000000000000000,
0b0100000000000000,
0b0010000000000000,
0b0001000000000000,
0b0000100000000000,
0b0000010000000000,
0b0000001000000000,
0b0000000100000000,
0b0000000010000000,
0b0000000001000000,
0b0000000000100000,
0b0000000000010000,
0b0000000000001000,
0b0000000000000100,
0b0000000000000010,
0b0000000000000001,
0b1110110001000111,
0b1001101001100100,
0b0100110100110010,
0b0010011010011001,
0b1111111100001011,
0b1001001111000010,
0b0100100111100001,
0b1100100010110111,
0b1000100000011100,
0b0100010000001110,
0b0010001000000111,
0b1111110101000100,
0b0111111010100010,
0b0011111101010001,
0b1111001111101111,
0b1001010110110000,
0b0100101011011000,
0b0010010101101100,
0b0001001010110110,
0b0000100101011011,
0b1110100011101010,
0b0111010001110101,
0b1101011001111101,
0b1000011101111001,
0b1010111111111011,
0b1011101110111010,
0b0101110111011101,
0b1100001010101001,
0b1000110100010011,
0b1010101011001110,
0b0101010101100111,
0b1100011011110100,
0b0110001101111010,
0b0011000110111101,
0b1111010010011001,
0b1001011000001011,
0b1010011101000010,
0b0101001110100001,
0b1100010110010111,
0b1000111010001100,
0b0100011101000110,
0b0010001110100011,
0b1111110110010110,
0b0111111011001011,
0b1101001100100010,
0b0110100110010001,
0b1101100010001111,
0b0000000000000011,
];
pub fn encode(word: u16) -> u64 {
GEN.iter().fold(0, |accum, row| {
accum << 1 | ((word & row).count_ones() % 2) as u64
})
}
pub fn decode(word: u64) -> Option<(u64, usize)> {
let poly = BCHDecoder::new(Syndromes::new(word)).decode();
let errors = match poly.degree() {
Some(deg) => deg,
None => return None,
};
let locs = ErrorLocations::new(poly.coefs().iter().cloned());
let (word, count) = locs.take(errors).fold((word, 0), |(word, s), loc| {
(word ^ 1 << loc, s + 1)
});
// "If the Chien Search fails to find v roots of a error locator polynomial of degree
// v, then the error pattern is an uncorrectable error pattern" -- Lecture 17:
// Berlekamp-Massey Algorithm for Binary BCH Codes
if count == errors {
Some((word, errors))
} else {
None
}
}
struct Syndromes {
pow: std::ops::Range<usize>,
word: u64,
}
impl Syndromes {
pub fn new(word: u64) -> Syndromes {
Syndromes {
pow: 1..DISTANCE,
word: word,
}
}
}
impl Iterator for Syndromes {
type Item = Codeword;
fn next(&mut self) -> Option<Self::Item> {
match self.pow.next() {
Some(pow) => Some((0..WORD_SIZE).fold(Codeword::default(), |s, b| {
if self.word >> b & 1 == 0 {
s
} else {
s + Codeword::for_power(b * pow)
}
})),
None => None,
}
}
}
#[derive(Copy, Clone)]
struct Codeword(u8);
impl std::fmt::Debug for Codeword {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "Codeword({:?})", self.power())
}
}
impl Codeword {
pub fn new(codeword: u8) -> Codeword {
Codeword(codeword)
}
pub fn zero(&self) -> bool { self.0 == 0 }
pub fn power(&self) -> Option<usize> {
if self.zero() {
None
} else {
Some(POWERS[self.0 as usize - 1])
}
}
pub fn for_power(power: usize) -> Codeword {
Codeword::new(CODEWORDS[power % POWERS.len()])
}
pub fn invert(self) -> Codeword {
match self.power() {
Some(p) => Codeword::for_power(POWERS.len() - p),
None => panic!("divide by zero"),
}
}
}
impl Default for Codeword {
fn default() -> Self {
Codeword::new(0)
}
}
impl std::ops::Mul for Codeword {
type Output = Codeword;
fn mul(self, rhs: Codeword) -> Self::Output {
match (self.power(), rhs.power()) {
(Some(p), Some(q)) => Codeword::for_power(p + q),
_ => Codeword::default(),
}
}
}
impl std::ops::Div for Codeword {
type Output = Codeword;
fn div(self, rhs: Codeword) -> Self::Output {
match (self.power(), rhs.power()) {
// min(power) = -62 => 63+min(power) > 0
(Some(p), Some(q)) => Codeword::for_power(p + POWERS.len() - q),
(None, Some(_)) => Codeword::default(),
(_, None) => panic!("divide by zero"),
}
}
}
impl std::ops::Add for Codeword {
type Output = Codeword;
fn add(self, rhs: Codeword) -> Self::Output {
Codeword::new(self.0 ^ rhs.0)
}
}
impl std::ops::Sub for Codeword {
type Output = Codeword;
fn sub(self, rhs: Codeword) -> Self::Output {
self + rhs
}
}
impl std::cmp::PartialEq for Codeword {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl std::cmp::Eq for Codeword {}
impl std::cmp::PartialOrd for Codeword {
fn partial_cmp(&self, rhs: &Self) -> Option<std::cmp::Ordering> {
use std::cmp::Ordering::*;
match (self.power(), rhs.power()) {
(Some(p), Some(q)) => Some(p.cmp(&q)),
(Some(_), None) => Some(Greater),
(None, Some(_)) => Some(Less),
(None, None) => Some(Equal),
}
}
}
impl std::cmp::Ord for Codeword {
fn cmp(&self, rhs: &Self) -> std::cmp::Ordering {
self.partial_cmp(rhs).unwrap()
}
}
// 2t+1 = 23 => t = 11
const ERRORS: usize = 11;
const SYNDROMES: usize = 2 * ERRORS;
#[derive(Copy, Clone)]
struct Polynomial {
/// Coefficients of the polynomial.
coefs: [Codeword; SYNDROMES + 2],
/// Index into `coefs` of the degree-0 coefficient.
start: usize,
}
impl Polynomial {
pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> Polynomial {
let mut c = [Codeword::default(); SYNDROMES + 2];
for (i, coef) in coefs.enumerate() {
c[i] = c[i] + coef;
}
Polynomial {
coefs: c,
start: 0,
}
}
pub fn constant(&self) -> Codeword {
self.coefs[self.start]
}
pub fn coefs(&self) -> &[Codeword] {
&self.coefs[self.start..]
}
pub fn degree(&self) -> Option<usize> {
for (deg, coef) in self.coefs.iter().enumerate().rev() {
if !coef.zero() {
return Some(deg - self.start);
}
}
None
}
pub fn shift(mut self) -> Polynomial {
self.coefs[self.start] = Codeword::default();
self.start += 1;
self
}
fn get(&self, idx: usize) -> Codeword {
match self.coefs.get(idx) {
Some(&c) => c,
None => Codeword::default(),
}
}
pub fn coef(&self, deg: usize) -> Codeword {
self.get(self.start + deg)
}
}
impl std::fmt::Display for Polynomial {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
for (i, coef) in self.coefs().iter().enumerate() {
match coef.power() {
Some(p) => try!(write!(fmt, "a^{}*x^{} + ", p, i)),
None => {},
}
}
try!(write!(fmt, "0"));
Ok(())
}
}
impl std::ops::Add for Polynomial {
type Output = Polynomial;
fn add(mut self, rhs: Polynomial) -> Self::Output {
for i in 0..self.coefs.len() {
self.coefs[i] = self.coef(i) + rhs.coef(i);
}
self.start = 0;
self
}
}
impl std::ops::Mul<Codeword> for Polynomial {
type Output = Polynomial;
fn mul(mut self, rhs: Codeword) -> Self::Output {
for coef in self.coefs.iter_mut() {
*coef = *coef * rhs;
}
self
}
}
struct BCHDecoder {
p_cur: Polynomial,
p_saved: Polynomial,
q_cur: Polynomial,
q_saved: Polynomial,
deg_saved: usize,
deg_cur: usize,
}
impl BCHDecoder {
pub fn new<T: Iterator<Item = Codeword>>(syndromes: T) -> BCHDecoder {
let q = Polynomial::new(std::iter::once(Codeword::for_power(0))
.chain(syndromes.into_iter()));
let p = Polynomial::new((0..SYNDROMES+1).map(|_| Codeword::default())
.chain(std::iter::once(Codeword::for_power(0))));
BCHDecoder {
q_saved: q,
q_cur: q.shift(),
p_saved: p,
p_cur: p.shift(),
deg_saved: 0,
deg_cur: 1,
}
}
pub fn decode(mut self) -> Polynomial {
for _ in 0..SYNDROMES {
self.step();
}
self.p_cur
}
fn step(&mut self) {
let (save, q, p, d) = if self.q_cur.constant().zero() {
self.reduce()
} else {
self.transform()
};
if save {
self.q_saved = self.q_cur;
self.p_saved = self.p_cur;
self.deg_saved = self.deg_cur;
}
self.q_cur = q;
self.p_cur = p;
self.deg_cur = d;
}
fn reduce(&mut self) -> (bool, Polynomial, Polynomial, usize) {
(
false,
self.q_cur.shift(),
self.p_cur.shift(),
2 + self.deg_cur,
)
}
fn transform(&mut self) -> (bool, Polynomial, Polynomial, usize) {
let mult = self.q_cur.constant() / self.q_saved.constant();
(
self.deg_cur >= self.deg_saved,
(self.q_cur + self.q_saved * mult).shift(),
(self.p_cur + self.p_saved * mult).shift(),
2 + std::cmp::min(self.deg_cur, self.deg_saved),
)
}
}
struct ErrorLocations {
terms: Vec<Codeword>,
pow: usize,
}
impl ErrorLocations {
// Λ(x) = coefs[0] + coefs[1]*x + coefs[2]*x^2 + ...
pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> ErrorLocations {
ErrorLocations {
terms: coefs.enumerate().map(|(p, c)| {
c / Codeword::for_power(p)
}).collect(),
pow: 0,
}
}
fn update_terms(&mut self) {
for (j, term) in self.terms.iter_mut().enumerate() {
*term = *term * Codeword::for_power(j);
}
}
fn sum_terms(&self) -> Codeword {
self.terms.iter().fold(Codeword::default(), |s, &x| {
s + x
})
}
}
impl Iterator for ErrorLocations {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
while self.pow < POWERS.len() {
let pow = self.pow;
self.pow += 1;
self.update_terms();
if self.sum_terms().zero() {
return Some(Codeword::for_power(pow).invert().power().unwrap());
}
}
None
}
}
// Maps α^i to codewords.
const CODEWORDS: &'static [u8] = &[
0b100000,
0b010000,
0b001000,
0b000100,
0b000010,
0b000001,
0b110000,
0b011000,
0b001100,
0b000110,
0b000011,
0b110001,
0b101000,
0b010100,
0b001010,
0b000101,
0b110010,
0b011001,
0b111100,
0b011110,
0b001111,
0b110111,
0b101011,
0b100101,
0b100010,
0b010001,
0b111000,
0b011100,
0b001110,
0b000111,
0b110011,
0b101001,
0b100100,
0b010010,
0b001001,
0b110100,
0b011010,
0b001101,
0b110110,
0b011011,
0b111101,
0b101110,
0b010111,
0b111011,
0b101101,
0b100110,
0b010011,
0b111001,
0b101100,
0b010110,
0b001011,
0b110101,
0b101010,
0b010101,
0b111010,
0b011101,
0b111110,
0b011111,
0b111111,
0b101111,
0b100111,
0b100011,
0b100001
];
// Maps codewords to α^i.
const POWERS: &'static [usize] = &[
5,
4,
10,
3,
15,
9,
29,
2,
34,
14,
50,
8,
37,
28,
20,
1,
25,
33,
46,
13,
53,
49,
42,
7,
17,
36,
39,
27,
55,
19,
57,
0,
62,
24,
61,
32,
23,
45,
60,
12,
31,
52,
22,
48,
44,
41,
59,
6,
11,
16,
30,
35,
51,
38,
21,
26,
47,
54,
43,
18,
40,
56,
58,
];
#[cfg(test)]
mod test {
use super::{encode, Syndromes, Codeword, Polynomial, decode};
#[test]
fn test_for_power() {
assert_eq!(Codeword::for_power(0).0, 0b100000);
assert_eq!(Codeword::for_power(62).0, 0b100001);
assert_eq!(Codeword::for_power(63).0, 0b100000);
}
#[test]
fn test_add_sub() {
assert_eq!((Codeword::new(0b100000) + Codeword::new(0b010000)).0, 0b110000);
assert_eq!((Codeword::new(0b100000) - Codeword::new(0b010000)).0, 0b110000);
assert_eq!((Codeword::new(0b100001) + Codeword::new(0b100001)).0, 0b000000);
assert_eq!((Codeword::new(0b100001) - Codeword::new(0b100001)).0, 0b000000);
assert_eq!((Codeword::new(0b100001) + Codeword::new(0b110100)).0, 0b010101);
assert_eq!((Codeword::new(0b100001) - Codeword::new(0b110100)).0, 0b010101);
}
#[test]
fn test_mul() {
assert_eq!((Codeword::new(0b011000) * Codeword::new(0b101000)).0, 0b011110);
assert_eq!((Codeword::new(0b000000) * Codeword::new(0b101000)).0, 0b000000);
assert_eq!((Codeword::new(0b011000) * Codeword::new(0b000000)).0, 0b000000);
assert_eq!((Codeword::new(0b000000) * Codeword::new(0b000000)).0, 0b000000);
assert_eq!((Codeword::new(0b100001) * Codeword::new(0b100000)).0, 0b100001);
assert_eq!((Codeword::new(0b100001) * Codeword::new(0b010000)).0, 0b100000);
assert_eq!((Codeword::new(0b110011) * Codeword::new(0b110011)).0, 0b100111);
assert_eq!((Codeword::new(0b111101) * Codeword::new(0b111101)).0, 0b011001);
}
#[test]
fn test_div() {
assert_eq!((Codeword::new(0b000100) / Codeword::new(0b101000)).0, 0b111010);
assert_eq!((Codeword::new(0b000000) / Codeword::new(0b101000)).0, 0b000000);
assert_eq!((Codeword::new(0b011110) / Codeword::new(0b100000)).0, 0b011110);
assert_eq!((Codeword::new(0b011110) / Codeword::new(0b011110)).0, 0b100000);
}
#[test]
fn test_cmp() {
assert!(Codeword::new(0b100000) > Codeword::new(0b000000));
assert!(Codeword::new(0b000000) == Codeword::new(0b000000));
assert!(Codeword::new(0b010000) > Codeword::new(0b100000));
assert!(Codeword::new(0b100001) > Codeword::new(0b100000));
}
#[test]
fn test_encode() {
assert_eq!(encode(0b1111111100000000), 0b1111111100000000100100110001000011000010001100000110100001101000);
assert_eq!(encode(0b0011)&1, 0);
assert_eq!(encode(0b0101)&1, 1);
assert_eq!(encode(0b1010)&1, 1);
assert_eq!(encode(0b1100)&1, 0);
assert_eq!(encode(0b1111)&1, 0);
}
#[test]
fn test_syndromes() {
let w = encode(0b1111111100000000)>>1;
assert!(Syndromes::new(w).all(|s| s.zero()));
assert!(!Syndromes::new(w ^ 1<<60).all(|s| s.zero()));
}
#[test]
fn test_polynomial() {
let p = Polynomial::new((0..23).map(|i| {
Codeword::for_power(i)
}));
assert!(p.degree().unwrap() == 22);
assert!(p.constant() == Codeword::for_power(0));
let p = p.shift();
assert!(p.degree().unwrap() == 21);
assert!(p.constant() == Codeword::for_power(1));
let q = p.clone() * Codeword::for_power(0);
assert!(q.degree().unwrap() == 21);
assert!(q.constant() == Codeword::for_power(1));
let q = p.clone() * Codeword::for_power(2);
assert!(q.degree().unwrap() == 21);
assert!(q.constant() == Codeword::for_power(3));
let q = p.clone() + p.clone();
assert!(q.constant().zero());
for coef in q.coefs() {
assert!(coef.zero());
}
let p = Polynomial::new((4..27).map(|i| {
Codeword::for_power(i)
}));
let q = Polynomial::new((3..26).map(|i| {
Codeword::for_power(i)
}));
let r = p + q.shift();
assert!(r.coefs[0].zero());
assert!(r.coefs[1].zero());
assert!(r.coefs[2].zero());
assert!(r.coefs[3].zero());
assert!(r.coefs[4].zero());
assert!(!r.coefs[22].zero());
let p = Polynomial::new((0..2).map(|_| {
Codeword::for_power(0)
}));
let q = Polynomial::new((0..4).map(|_| {
Codeword::for_power(1)
}));
let r = p + q;
assert!(r.coef(0) == Codeword::for_power(6));
}
#[test]
fn test_decode() {
let w = encode(0b1111111100000000)>>1 ^ 0b11010011<<30;
let d = decode(w);
println!("{:?}", d);
match d {
Some((9187424089929167924, 5)) => {},
_ => panic!(),
}
}
}
|
//! Basic parsers.
use {Input, SimpleResult};
use err;
use internal::InputModify;
/// Matches any item, returning it if present.
///
/// If the buffer length is 0 this parser is considered incomplete.
///
/// ```
/// use chomp::{Input, any};
///
/// let p = Input::new(b"abc");
///
/// assert_eq!(any(p).unwrap(), b'a');
/// ```
#[inline]
pub fn any<'a, I: 'a + Copy>(i: Input<'a, I>) -> SimpleResult<'a, I, I> {
match i.buffer().first() {
None => i.incomplete(1),
Some(&c) => i.modify(|b| &b[1..]).data(c),
}
}
/// Matches an item using ``f``, the item is returned if ``f`` yields true, otherwise this parser
/// fails.
///
/// If the buffer length is 0 this parser is considered incomplete.
///
/// ```
/// use chomp::{Input, satisfy};
///
/// let p = Input::new(b"abc");
///
/// assert_eq!(satisfy(p, |c| c == b'a').unwrap(), b'a');
/// ```
#[inline]
pub fn satisfy<'a, I: 'a + Copy, F>(i: Input<'a, I>, f: F) -> SimpleResult<'a, I, I>
where F: FnOnce(I) -> bool {
match i.buffer().first() {
None => i.incomplete(1),
Some(&c) if f(c) => i.modify(|b| &b[1..]).data(c),
Some(_) => i.error(err::unexpected()),
}
}
/// Matches a single token, returning the match on success.
///
/// If the buffer length is 0 this parser is considered incomplete.
///
/// ```
/// use chomp::{Input, token};
///
/// let p = Input::new(b"abc");
///
/// assert_eq!(token(p, b'a').unwrap(), b'a');
/// ```
#[inline]
pub fn token<'a, I: 'a + Copy + PartialEq>(i: Input<'a, I>, t: I) -> SimpleResult<'a, I, I> {
match i.buffer().first() {
None => i.incomplete(1),
Some(&c) if t == c => i.modify(|b| &b[1..]).data(c),
Some(_) => i.error(err::expected(t)),
}
}
/// Matches a single token as long as it is not equal to `t`, returning the match on success.
///
/// If the buffer length is 0 this parser is considered incomplete.
///
/// ```
/// use chomp::{Input, not_token};
///
/// let p1 = Input::new(b"abc");
///
/// assert_eq!(not_token(p1, b'b').unwrap(), b'a');
///
/// let p2 = Input::new(b"abc");
///
/// assert_eq!(not_token(p2, b'b').unwrap(), b'a');
/// ```
#[inline]
pub fn not_token<'a, I: 'a + Copy + PartialEq>(i: Input<'a, I>, t: I) -> SimpleResult<'a, I, I> {
match i.buffer().first() {
None => i.incomplete(1),
Some(&c) if t != c => i.modify(|b| &b[1..]).data(c),
Some(_) => i.error(err::unexpected()),
}
}
/// Matches any item but does not consume it, on success it gives ``Some`` but if no input remains
/// ``None`` is produced.
///
/// This parser is never considered incomplete.
///
/// ```
/// use chomp::{Input, peek};
///
/// let p1 = Input::new(b"abc");
///
/// assert_eq!(peek(p1).unwrap(), Some(b'a'));
///
/// let p2 = Input::new(b"");
///
/// assert_eq!(peek(p2).unwrap(), None);
/// ```
#[inline]
pub fn peek<'a, I: 'a + Copy>(i: Input<'a, I>) -> SimpleResult<'a, I, Option<I>> {
let d = i.buffer().first().map(|&c| c);
i.data(d)
}
/// Matches ``num`` items no matter what they are, returning a slice of the matched items.
///
/// If the buffer length is less than ``num`` this parser is considered incomplete.
///
/// ```
/// use chomp::{Input, take};
///
/// let p = Input::new(b"abcd");
///
/// assert_eq!(take(p, 3).unwrap(), b"abc");
/// ```
#[inline]
pub fn take<'a, I: 'a + Copy>(i: Input<'a, I>, num: usize) -> SimpleResult<'a, I, &'a [I]> {
let b = i.buffer();
if num <= b.len() {
i.replace(&b[num..]).data(&b[..num])
} else {
i.incomplete(num)
}
}
/// Matches all items while ``f`` returns false, returns a slice of all the matched items.
///
/// If no failure can be found the parser will be considered to be incomplete as there might be
/// more input which needs to be matched.
///
/// ```
/// use chomp::{Input, take_while};
///
/// let p = Input::new(b"abcdcba");
///
/// assert_eq!(take_while(p, |c| c == b'a' || c == b'b').unwrap(), b"ab");
/// ```
///
/// Without managing to match anything:
///
/// ```
/// use chomp::{Input, take_while};
///
/// let p = Input::new(b"abcdcba");
///
/// assert_eq!(take_while(p, |c| c == b'z').unwrap(), b"");
/// ```
#[inline]
pub fn take_while<'a, I: 'a + Copy, F>(i: Input<'a, I>, f: F) -> SimpleResult<'a, I, &'a [I]>
where F: Fn(I) -> bool {
let b = i.buffer();
match b.iter().map(|c| *c).position(|c| f(c) == false) {
Some(n) => i.replace(&b[n..]).data(&b[..n]),
// TODO: Should this following 1 be something else, seeing as take_while1 is potentially
// infinite?
None => i.incomplete(1),
}
}
/// Matches all items while ``f`` returns true, if at least one item matched this parser succeeds
/// and returns a slice of all the matched items.
///
/// If no failure can be found the parser will be considered to be incomplete as there might be
/// more input which needs to be matched. If zero items were matched an error will be returned.
///
/// ```
/// use chomp::{Input, take_while1};
///
/// let p = Input::new(b"abcdcba");
///
/// assert_eq!(take_while1(p, |c| c == b'a' || c == b'b').unwrap(), b"ab");
/// ```
#[inline]
pub fn take_while1<'a, I: 'a + Copy, F>(i: Input<'a, I>, f: F) -> SimpleResult<'a, I, &'a [I]>
where F: Fn(I) -> bool {
let b = i.buffer();
match b.iter().map(|c| *c).position(|c| f(c) == false) {
Some(0) => i.error(err::unexpected()),
Some(n) => i.replace(&b[n..]).data(&b[..n]),
// TODO: Should this following 1 be something else, seeing as take_while1 is potentially
// infinite?
None => i.incomplete(1),
}
}
/// Matches all items until ``f`` returns true, all items to that point will be returned as a slice
/// upon success.
///
/// If no failure can be found the parser will be considered to be incomplete as there might be
/// more input which needs to be matched.
///
/// ```
/// use chomp::{Input, take_till};
///
/// let p = Input::new(b"abcdef");
///
/// assert_eq!(take_till(p, |c| c == b'd').unwrap(), b"abc");
/// ```
#[inline]
pub fn take_till<'a, I: 'a + Copy, F>(i: Input<'a, I>, f: F) -> SimpleResult<'a, I, &'a [I]>
where F: Fn(I) -> bool {
let b = i.buffer();
match b.iter().map(|c| *c).position(f) {
Some(n) => i.replace(&b[n..]).data(&b[0..n]),
// TODO: Should this following 1 be something else, seeing as take_while1 is potentially
// infinite?
None => i.incomplete(1),
}
}
/// Matches the remainder of the buffer and returns it, always succeeds.
///
/// ```
/// use chomp::{Input, take_remainder};
///
/// let p = Input::new(b"abcd");
///
/// assert_eq!(take_remainder(p).unwrap(), b"abcd");
/// ```
#[inline]
pub fn take_remainder<'a, I: Copy>(i: Input<'a, I>) -> SimpleResult<'a, I, &'a [I]> {
let b = i.buffer();
// Last slice and we have just read everything of it, replace with zero-sized slice:
//
// Hack to avoid branch and overflow, does not matter where this zero-sized slice is
// allocated
i.replace(&b[..0]).data(b)
}
/// Matches the given slice against the parser, returning the matched slice upon success.
///
/// If the length of the contained data is shorter than the given slice this parser is considered
/// incomplete.
///
/// ```
/// use chomp::{Input, string};
///
/// let p = Input::new(b"abcdef");
///
/// assert_eq!(string(p, b"abc").unwrap(), b"abc");
/// ```
#[inline]
pub fn string<'a, 'b, I: Copy + PartialEq>(i: Input<'a, I>, s: &'b [I])
-> SimpleResult<'a, I, &'a [I]> {
let b = i.buffer();
if s.len() > b.len() {
return i.incomplete(s.len() - b.len());
}
let d = &b[..s.len()];
for j in 0..s.len() {
if s[j] != d[j] {
return err::string(i, j, s);
}
}
i.replace(&b[s.len()..]).data(d)
}
#[cfg(test)]
mod test {
use super::{take_while1, token, take_remainder};
use {Input, SimpleResult};
#[test]
fn parse_decimal() {
fn is_digit(c: u8) -> bool {
c >= b'0' && c <= b'9'
}
fn decimal(i: Input<u8>) -> SimpleResult<u8, usize> {
take_while1(i, is_digit).bind(|i, bytes|
i.ret(bytes.iter().fold(0, |a, b| a * 10 + (b - b'0') as usize)))
}
let i = Input::new(b"123.4567 ");
let p = decimal(i).bind(|i, real|
token(i, b'.').bind(|i, _|
decimal(i).bind(|i, frac|
i.ret((real, frac)))));
let d: SimpleResult<_, _> = p.bind(|i, num| take_remainder(i)
.bind(|i, r| i.ret((r, num))));
let (buf, state) = d.unwrap();
assert_eq!(buf, &[b' ']);
assert_eq!(state, (123, 4567));
}
#[test]
fn parse_remainder_empty() {
let i = Input::new(b"");
let r = take_remainder(i);
assert_eq!(r.unwrap(), b"" as &[u8]);
}
}
Parsers: take_while and take_while1 now respect END_OF_INPUT
//! Basic parsers.
use {Input, SimpleResult};
use err;
use internal::InputModify;
/// Matches any item, returning it if present.
///
/// If the buffer length is 0 this parser is considered incomplete.
///
/// ```
/// use chomp::{Input, any};
///
/// let p = Input::new(b"abc");
///
/// assert_eq!(any(p).unwrap(), b'a');
/// ```
#[inline]
pub fn any<'a, I: 'a + Copy>(i: Input<'a, I>) -> SimpleResult<'a, I, I> {
match i.buffer().first() {
None => i.incomplete(1),
Some(&c) => i.modify(|b| &b[1..]).data(c),
}
}
/// Matches an item using ``f``, the item is returned if ``f`` yields true, otherwise this parser
/// fails.
///
/// If the buffer length is 0 this parser is considered incomplete.
///
/// ```
/// use chomp::{Input, satisfy};
///
/// let p = Input::new(b"abc");
///
/// assert_eq!(satisfy(p, |c| c == b'a').unwrap(), b'a');
/// ```
#[inline]
pub fn satisfy<'a, I: 'a + Copy, F>(i: Input<'a, I>, f: F) -> SimpleResult<'a, I, I>
where F: FnOnce(I) -> bool {
match i.buffer().first() {
None => i.incomplete(1),
Some(&c) if f(c) => i.modify(|b| &b[1..]).data(c),
Some(_) => i.error(err::unexpected()),
}
}
/// Matches a single token, returning the match on success.
///
/// If the buffer length is 0 this parser is considered incomplete.
///
/// ```
/// use chomp::{Input, token};
///
/// let p = Input::new(b"abc");
///
/// assert_eq!(token(p, b'a').unwrap(), b'a');
/// ```
#[inline]
pub fn token<'a, I: 'a + Copy + PartialEq>(i: Input<'a, I>, t: I) -> SimpleResult<'a, I, I> {
match i.buffer().first() {
None => i.incomplete(1),
Some(&c) if t == c => i.modify(|b| &b[1..]).data(c),
Some(_) => i.error(err::expected(t)),
}
}
/// Matches a single token as long as it is not equal to `t`, returning the match on success.
///
/// If the buffer length is 0 this parser is considered incomplete.
///
/// ```
/// use chomp::{Input, not_token};
///
/// let p1 = Input::new(b"abc");
///
/// assert_eq!(not_token(p1, b'b').unwrap(), b'a');
///
/// let p2 = Input::new(b"abc");
///
/// assert_eq!(not_token(p2, b'b').unwrap(), b'a');
/// ```
#[inline]
pub fn not_token<'a, I: 'a + Copy + PartialEq>(i: Input<'a, I>, t: I) -> SimpleResult<'a, I, I> {
match i.buffer().first() {
None => i.incomplete(1),
Some(&c) if t != c => i.modify(|b| &b[1..]).data(c),
Some(_) => i.error(err::unexpected()),
}
}
/// Matches any item but does not consume it, on success it gives ``Some`` but if no input remains
/// ``None`` is produced.
///
/// This parser is never considered incomplete.
///
/// ```
/// use chomp::{Input, peek};
///
/// let p1 = Input::new(b"abc");
///
/// assert_eq!(peek(p1).unwrap(), Some(b'a'));
///
/// let p2 = Input::new(b"");
///
/// assert_eq!(peek(p2).unwrap(), None);
/// ```
#[inline]
pub fn peek<'a, I: 'a + Copy>(i: Input<'a, I>) -> SimpleResult<'a, I, Option<I>> {
let d = i.buffer().first().map(|&c| c);
i.data(d)
}
/// Matches ``num`` items no matter what they are, returning a slice of the matched items.
///
/// If the buffer length is less than ``num`` this parser is considered incomplete.
///
/// ```
/// use chomp::{Input, take};
///
/// let p = Input::new(b"abcd");
///
/// assert_eq!(take(p, 3).unwrap(), b"abc");
/// ```
#[inline]
pub fn take<'a, I: 'a + Copy>(i: Input<'a, I>, num: usize) -> SimpleResult<'a, I, &'a [I]> {
let b = i.buffer();
if num <= b.len() {
i.replace(&b[num..]).data(&b[..num])
} else {
i.incomplete(num)
}
}
/// Matches all items while ``f`` returns false, returns a slice of all the matched items.
///
/// If no failure can be found the parser will be considered to be incomplete as there might be
/// more input which needs to be matched.
///
/// ```
/// use chomp::{Input, take_while};
///
/// let p = Input::new(b"abcdcba");
///
/// assert_eq!(take_while(p, |c| c == b'a' || c == b'b').unwrap(), b"ab");
/// ```
///
/// Without managing to match anything:
///
/// ```
/// use chomp::{Input, take_while};
///
/// let p = Input::new(b"abcdcba");
///
/// assert_eq!(take_while(p, |c| c == b'z').unwrap(), b"");
/// ```
#[inline]
pub fn take_while<'a, I: 'a + Copy, F>(i: Input<'a, I>, f: F) -> SimpleResult<'a, I, &'a [I]>
where F: Fn(I) -> bool {
let b = i.buffer();
match b.iter().map(|c| *c).position(|c| f(c) == false) {
Some(n) => i.replace(&b[n..]).data(&b[..n]),
// TODO: Should this following 1 be something else, seeing as take_while1 is potentially
// infinite?
None => if i.is_last_slice() {
// Last slice and we have just read everything of it, replace with zero-sized slice:
// Hack to avoid branch and overflow, does not matter where this zero-sized slice is
// allocated
i.replace(&b[..0]).data(b)
} else {
i.incomplete(1)
},
}
}
/// Matches all items while ``f`` returns true, if at least one item matched this parser succeeds
/// and returns a slice of all the matched items.
///
/// If no failure can be found the parser will be considered to be incomplete as there might be
/// more input which needs to be matched. If zero items were matched an error will be returned.
///
/// ```
/// use chomp::{Input, take_while1};
///
/// let p = Input::new(b"abcdcba");
///
/// assert_eq!(take_while1(p, |c| c == b'a' || c == b'b').unwrap(), b"ab");
/// ```
#[inline]
pub fn take_while1<'a, I: 'a + Copy, F>(i: Input<'a, I>, f: F) -> SimpleResult<'a, I, &'a [I]>
where F: Fn(I) -> bool {
let b = i.buffer();
match b.iter().map(|c| *c).position(|c| f(c) == false) {
Some(0) => i.error(err::unexpected()),
Some(n) => i.replace(&b[n..]).data(&b[..n]),
// TODO: Should this following 1 be something else, seeing as take_while1 is potentially
// infinite?
None => if i.is_last_slice() {
// Last slice and we have just read everything of it, replace with zero-sized slice:
// Hack to avoid branch and overflow, does not matter where this zero-sized slice is
// allocated
i.replace(&b[..0]).data(b)
} else {
i.incomplete(1)
},
}
}
/// Matches all items until ``f`` returns true, all items to that point will be returned as a slice
/// upon success.
///
/// If no failure can be found the parser will be considered to be incomplete as there might be
/// more input which needs to be matched.
///
/// ```
/// use chomp::{Input, take_till};
///
/// let p = Input::new(b"abcdef");
///
/// assert_eq!(take_till(p, |c| c == b'd').unwrap(), b"abc");
/// ```
#[inline]
pub fn take_till<'a, I: 'a + Copy, F>(i: Input<'a, I>, f: F) -> SimpleResult<'a, I, &'a [I]>
where F: Fn(I) -> bool {
let b = i.buffer();
match b.iter().map(|c| *c).position(f) {
Some(n) => i.replace(&b[n..]).data(&b[0..n]),
// TODO: Should this following 1 be something else, seeing as take_while1 is potentially
// infinite?
None => i.incomplete(1),
}
}
/// Matches the remainder of the buffer and returns it, always succeeds.
///
/// ```
/// use chomp::{Input, take_remainder};
///
/// let p = Input::new(b"abcd");
///
/// assert_eq!(take_remainder(p).unwrap(), b"abcd");
/// ```
#[inline]
pub fn take_remainder<'a, I: Copy>(i: Input<'a, I>) -> SimpleResult<'a, I, &'a [I]> {
let b = i.buffer();
// Last slice and we have just read everything of it, replace with zero-sized slice:
//
// Hack to avoid branch and overflow, does not matter where this zero-sized slice is
// allocated
i.replace(&b[..0]).data(b)
}
/// Matches the given slice against the parser, returning the matched slice upon success.
///
/// If the length of the contained data is shorter than the given slice this parser is considered
/// incomplete.
///
/// ```
/// use chomp::{Input, string};
///
/// let p = Input::new(b"abcdef");
///
/// assert_eq!(string(p, b"abc").unwrap(), b"abc");
/// ```
#[inline]
pub fn string<'a, 'b, I: Copy + PartialEq>(i: Input<'a, I>, s: &'b [I])
-> SimpleResult<'a, I, &'a [I]> {
let b = i.buffer();
if s.len() > b.len() {
return i.incomplete(s.len() - b.len());
}
let d = &b[..s.len()];
for j in 0..s.len() {
if s[j] != d[j] {
return err::string(i, j, s);
}
}
i.replace(&b[s.len()..]).data(d)
}
#[cfg(test)]
mod test {
use super::{take_while1, token, take_remainder};
use {Input, SimpleResult};
#[test]
fn parse_decimal() {
fn is_digit(c: u8) -> bool {
c >= b'0' && c <= b'9'
}
fn decimal(i: Input<u8>) -> SimpleResult<u8, usize> {
take_while1(i, is_digit).bind(|i, bytes|
i.ret(bytes.iter().fold(0, |a, b| a * 10 + (b - b'0') as usize)))
}
let i = Input::new(b"123.4567 ");
let p = decimal(i).bind(|i, real|
token(i, b'.').bind(|i, _|
decimal(i).bind(|i, frac|
i.ret((real, frac)))));
let d: SimpleResult<_, _> = p.bind(|i, num| take_remainder(i)
.bind(|i, r| i.ret((r, num))));
let (buf, state) = d.unwrap();
assert_eq!(buf, &[b' ']);
assert_eq!(state, (123, 4567));
}
#[test]
fn parse_remainder_empty() {
let i = Input::new(b"");
let r = take_remainder(i);
assert_eq!(r.unwrap(), b"" as &[u8]);
}
}
|
//! Compiler plugin for Rust-PHF
//!
//! See the documentation for the `phf` crate for more details.
#[crate_id="github.com/sfackler/rust-phf/phf_mac"];
#[crate_type="dylib"];
#[doc(html_root_url="http://www.rust-ci.org/sfackler/rust-phf/doc")];
#[feature(managed_boxes, macro_registrar, quote)];
extern crate collections;
extern crate extra;
extern crate rand;
extern crate syntax;
extern crate time;
extern crate phf;
use collections::HashMap;
use std::os;
use std::vec_ng::Vec;
use syntax::ast;
use syntax::ast::{Name, TokenTree, LitStr, MutImmutable, Expr, ExprVec, ExprLit};
use syntax::codemap::Span;
use syntax::ext::base::{SyntaxExtension,
ExtCtxt,
MacResult,
MRExpr,
NormalTT,
BasicMacroExpander};
use syntax::parse;
use syntax::parse::token;
use syntax::parse::token::{InternedString, COMMA, EOF, FAT_ARROW};
static DEFAULT_LAMBDA: uint = 5;
#[macro_registrar]
#[doc(hidden)]
pub fn macro_registrar(register: |Name, SyntaxExtension|) {
register(token::intern("phf_map"),
NormalTT(~BasicMacroExpander {
expander: expand_mphf_map,
span: None
},
None));
}
struct Entry {
key_str: InternedString,
key: @Expr,
value: @Expr
}
fn expand_mphf_map(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> MacResult {
let mut entries = match parse_entries(cx, tts) {
Some(entries) => entries,
None => return MacResult::dummy_expr(sp)
};
entries.sort_by(|a, b| a.key_str.cmp(&b.key_str));
if has_duplicates(cx, sp, entries.as_slice()) {
return MacResult::dummy_expr(sp);
}
let start = time::precise_time_s();
let state;
loop {
match generate_hash(entries.as_slice()) {
Some(s) => {
state = s;
break;
}
None => {}
}
}
let time = time::precise_time_s() - start;
if os::getenv("PHF_STATS").is_some() {
cx.span_note(sp, format!("PHF generation took {} seconds", time));
}
create_map(cx, sp, entries, state)
}
fn parse_entries(cx: &mut ExtCtxt, tts: &[TokenTree]) -> Option<Vec<Entry>> {
let mut parser = parse::new_parser_from_tts(cx.parse_sess(), cx.cfg(),
tts.iter().map(|x| x.clone())
.collect());
let mut entries = Vec::new();
let mut bad = false;
while parser.token != EOF {
let key = cx.expand_expr(parser.parse_expr());
let key_str = match key.node {
ExprLit(lit) => {
match lit.node {
LitStr(ref s, _) => s.clone(),
_ => {
cx.span_err(key.span, "expected string literal");
bad = true;
InternedString::new("")
}
}
}
_ => {
cx.span_err(key.span, "expected string literal");
bad = true;
InternedString::new("")
}
};
if !parser.eat(&FAT_ARROW) {
cx.span_err(parser.span, "expected `=>`");
return None;
}
let value = parser.parse_expr();
entries.push(Entry {
key_str: key_str,
key: key,
value: value
});
if !parser.eat(&COMMA) && parser.token != EOF {
cx.span_err(parser.span, "expected `,`");
return None;
}
}
if entries.len() > phf::MAX_SIZE {
cx.span_err(parser.span,
format!("maps with more than {} entries are not supported",
phf::MAX_SIZE));
return None;
}
if bad {
return None;
}
Some(entries)
}
fn has_duplicates(cx: &mut ExtCtxt, sp: Span, entries: &[Entry]) -> bool {
let mut dups = false;
let mut in_dup = false;
for window in entries.windows(2) {
let ref a = window[0];
let ref b = window[1];
if a.key_str == b.key_str {
dups = true;
if !in_dup {
cx.span_err(sp, format!("duplicate key \"{}\"", a.key_str));
cx.span_note(a.key.span, "one occurrence here");
in_dup = true;
}
cx.span_note(b.key.span, "one occurrence here");
} else {
in_dup = false;
}
}
dups
}
struct HashState {
k1: u64,
k2: u64,
disps: Vec<(uint, uint)>,
map: Vec<Option<uint>>,
}
fn generate_hash(entries: &[Entry]) -> Option<HashState> {
struct Bucket {
idx: uint,
keys: Vec<uint>,
}
struct Hashes {
g: uint,
f1: uint,
f2: uint,
}
let k1 = rand::random();
let k2 = rand::random();
let hashes: Vec<Hashes> = entries.iter().map(|entry| {
let (g, f1, f2) = phf::hash(entry.key_str.get(), k1, k2);
Hashes {
g: g,
f1: f1,
f2: f2
}
}).collect();
let buckets_len = (entries.len() + DEFAULT_LAMBDA - 1) / DEFAULT_LAMBDA;
let mut buckets = Vec::from_fn(buckets_len,
|i| Bucket { idx: i, keys: Vec::new() });
for (i, hash) in hashes.iter().enumerate() {
buckets.get_mut(hash.g % buckets_len).keys.push(i);
}
// Sort descending
buckets.sort_by(|a, b| b.keys.len().cmp(&a.keys.len()));
let table_len = entries.len();
let mut map = Vec::from_elem(table_len, None);
let mut disps = Vec::from_elem(buckets_len, None);
let mut try_map = HashMap::new();
'buckets: for bucket in buckets.iter() {
for d1 in range(0, table_len) {
'disps_l: for d2 in range(0, table_len) {
try_map.clear();
for &key in bucket.keys.iter() {
let idx = phf::displace(hashes.get(key).f1,
hashes.get(key).f2,
d1,
d2) % table_len;
if try_map.find(&idx).is_some() || map.get(idx).is_some() {
continue 'disps_l;
}
try_map.insert(idx, key);
}
// We've picked a good set of disps
*disps.get_mut(bucket.idx) = Some((d1, d2));
for (&idx, &key) in try_map.iter() {
*map.get_mut(idx) = Some(key);
}
continue 'buckets;
}
}
// Unable to find displacements for a bucket
return None;
}
let disps = disps.move_iter().map(|i| i.expect("should have a bucket"))
.collect();
Some(HashState {
k1: k1,
k2: k2,
disps: disps,
map: map,
})
}
fn create_map(cx: &mut ExtCtxt, sp: Span, entries: Vec<Entry>, state: HashState)
-> MacResult {
let len = entries.len();
let k1 = state.k1;
let k2 = state.k2;
let disps = state.disps.iter().map(|&(d1, d2)| {
quote_expr!(&*cx, ($d1, $d2))
}).collect();
let disps = @Expr {
id: ast::DUMMY_NODE_ID,
node: ExprVec(disps, MutImmutable),
span: sp,
};
let entries = state.map.iter().map(|&idx| {
match idx {
Some(idx) => {
let &Entry { key, value, .. } = entries.get(idx);
quote_expr!(&*cx, Some(($key, $value)))
}
None => quote_expr!(&*cx, None),
}
}).collect();
let entries = @Expr {
id: ast::DUMMY_NODE_ID,
node: ExprVec(entries, MutImmutable),
span: sp,
};
MRExpr(quote_expr!(cx, PhfMap {
len: $len,
k1: $k1,
k2: $k2,
disps: &'static $disps,
entries: &'static $entries,
}))
}
Remove unneeded libextra dependency
//! Compiler plugin for Rust-PHF
//!
//! See the documentation for the `phf` crate for more details.
#[crate_id="github.com/sfackler/rust-phf/phf_mac"];
#[crate_type="dylib"];
#[doc(html_root_url="http://www.rust-ci.org/sfackler/rust-phf/doc")];
#[feature(managed_boxes, macro_registrar, quote)];
extern crate collections;
extern crate rand;
extern crate syntax;
extern crate time;
extern crate phf;
use collections::HashMap;
use std::os;
use std::vec_ng::Vec;
use syntax::ast;
use syntax::ast::{Name, TokenTree, LitStr, MutImmutable, Expr, ExprVec, ExprLit};
use syntax::codemap::Span;
use syntax::ext::base::{SyntaxExtension,
ExtCtxt,
MacResult,
MRExpr,
NormalTT,
BasicMacroExpander};
use syntax::parse;
use syntax::parse::token;
use syntax::parse::token::{InternedString, COMMA, EOF, FAT_ARROW};
static DEFAULT_LAMBDA: uint = 5;
#[macro_registrar]
#[doc(hidden)]
pub fn macro_registrar(register: |Name, SyntaxExtension|) {
register(token::intern("phf_map"),
NormalTT(~BasicMacroExpander {
expander: expand_mphf_map,
span: None
},
None));
}
struct Entry {
key_str: InternedString,
key: @Expr,
value: @Expr
}
fn expand_mphf_map(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> MacResult {
let mut entries = match parse_entries(cx, tts) {
Some(entries) => entries,
None => return MacResult::dummy_expr(sp)
};
entries.sort_by(|a, b| a.key_str.cmp(&b.key_str));
if has_duplicates(cx, sp, entries.as_slice()) {
return MacResult::dummy_expr(sp);
}
let start = time::precise_time_s();
let state;
loop {
match generate_hash(entries.as_slice()) {
Some(s) => {
state = s;
break;
}
None => {}
}
}
let time = time::precise_time_s() - start;
if os::getenv("PHF_STATS").is_some() {
cx.span_note(sp, format!("PHF generation took {} seconds", time));
}
create_map(cx, sp, entries, state)
}
fn parse_entries(cx: &mut ExtCtxt, tts: &[TokenTree]) -> Option<Vec<Entry>> {
let mut parser = parse::new_parser_from_tts(cx.parse_sess(), cx.cfg(),
tts.iter().map(|x| x.clone())
.collect());
let mut entries = Vec::new();
let mut bad = false;
while parser.token != EOF {
let key = cx.expand_expr(parser.parse_expr());
let key_str = match key.node {
ExprLit(lit) => {
match lit.node {
LitStr(ref s, _) => s.clone(),
_ => {
cx.span_err(key.span, "expected string literal");
bad = true;
InternedString::new("")
}
}
}
_ => {
cx.span_err(key.span, "expected string literal");
bad = true;
InternedString::new("")
}
};
if !parser.eat(&FAT_ARROW) {
cx.span_err(parser.span, "expected `=>`");
return None;
}
let value = parser.parse_expr();
entries.push(Entry {
key_str: key_str,
key: key,
value: value
});
if !parser.eat(&COMMA) && parser.token != EOF {
cx.span_err(parser.span, "expected `,`");
return None;
}
}
if entries.len() > phf::MAX_SIZE {
cx.span_err(parser.span,
format!("maps with more than {} entries are not supported",
phf::MAX_SIZE));
return None;
}
if bad {
return None;
}
Some(entries)
}
fn has_duplicates(cx: &mut ExtCtxt, sp: Span, entries: &[Entry]) -> bool {
let mut dups = false;
let mut in_dup = false;
for window in entries.windows(2) {
let ref a = window[0];
let ref b = window[1];
if a.key_str == b.key_str {
dups = true;
if !in_dup {
cx.span_err(sp, format!("duplicate key \"{}\"", a.key_str));
cx.span_note(a.key.span, "one occurrence here");
in_dup = true;
}
cx.span_note(b.key.span, "one occurrence here");
} else {
in_dup = false;
}
}
dups
}
struct HashState {
k1: u64,
k2: u64,
disps: Vec<(uint, uint)>,
map: Vec<Option<uint>>,
}
fn generate_hash(entries: &[Entry]) -> Option<HashState> {
struct Bucket {
idx: uint,
keys: Vec<uint>,
}
struct Hashes {
g: uint,
f1: uint,
f2: uint,
}
let k1 = rand::random();
let k2 = rand::random();
let hashes: Vec<Hashes> = entries.iter().map(|entry| {
let (g, f1, f2) = phf::hash(entry.key_str.get(), k1, k2);
Hashes {
g: g,
f1: f1,
f2: f2
}
}).collect();
let buckets_len = (entries.len() + DEFAULT_LAMBDA - 1) / DEFAULT_LAMBDA;
let mut buckets = Vec::from_fn(buckets_len,
|i| Bucket { idx: i, keys: Vec::new() });
for (i, hash) in hashes.iter().enumerate() {
buckets.get_mut(hash.g % buckets_len).keys.push(i);
}
// Sort descending
buckets.sort_by(|a, b| b.keys.len().cmp(&a.keys.len()));
let table_len = entries.len();
let mut map = Vec::from_elem(table_len, None);
let mut disps = Vec::from_elem(buckets_len, None);
let mut try_map = HashMap::new();
'buckets: for bucket in buckets.iter() {
for d1 in range(0, table_len) {
'disps_l: for d2 in range(0, table_len) {
try_map.clear();
for &key in bucket.keys.iter() {
let idx = phf::displace(hashes.get(key).f1,
hashes.get(key).f2,
d1,
d2) % table_len;
if try_map.find(&idx).is_some() || map.get(idx).is_some() {
continue 'disps_l;
}
try_map.insert(idx, key);
}
// We've picked a good set of disps
*disps.get_mut(bucket.idx) = Some((d1, d2));
for (&idx, &key) in try_map.iter() {
*map.get_mut(idx) = Some(key);
}
continue 'buckets;
}
}
// Unable to find displacements for a bucket
return None;
}
let disps = disps.move_iter().map(|i| i.expect("should have a bucket"))
.collect();
Some(HashState {
k1: k1,
k2: k2,
disps: disps,
map: map,
})
}
fn create_map(cx: &mut ExtCtxt, sp: Span, entries: Vec<Entry>, state: HashState)
-> MacResult {
let len = entries.len();
let k1 = state.k1;
let k2 = state.k2;
let disps = state.disps.iter().map(|&(d1, d2)| {
quote_expr!(&*cx, ($d1, $d2))
}).collect();
let disps = @Expr {
id: ast::DUMMY_NODE_ID,
node: ExprVec(disps, MutImmutable),
span: sp,
};
let entries = state.map.iter().map(|&idx| {
match idx {
Some(idx) => {
let &Entry { key, value, .. } = entries.get(idx);
quote_expr!(&*cx, Some(($key, $value)))
}
None => quote_expr!(&*cx, None),
}
}).collect();
let entries = @Expr {
id: ast::DUMMY_NODE_ID,
node: ExprVec(entries, MutImmutable),
span: sp,
};
MRExpr(quote_expr!(cx, PhfMap {
len: $len,
k1: $k1,
k2: $k2,
disps: &'static $disps,
entries: &'static $entries,
}))
}
|
use {Value, Response};
use parser::parse_response;
use error::RequestError;
use utils::escape_xml;
use hyper::client::{Client, Body};
use hyper::header::{ContentType, UserAgent};
use std::io::{self, Write};
/// A request to call a procedure.
pub struct Request<'a> {
name: &'a str,
args: Vec<(&'a str, Value)>,
}
impl<'a> Request<'a> {
/// Creates a new request to call a function named `name`.
///
/// By default, no arguments are passed. Use the `arg` method to append arguments.
pub fn new(name: &'a str) -> Self {
Request {
name: name,
args: Vec::new(),
}
}
/// Appends an argument to be passed to the current list of arguments.
pub fn arg<T: Into<Value>>(mut self, name: &'a str, value: T) -> Self {
self.args.push((name, value.into()));
Request {
name: self.name,
args: self.args,
}
}
/// Calls the method using the given `Client`.
///
/// This will send the request to the `/` URL.
///
/// Returns a `RequestResult` indicating whether the request was sent and processed successfully
/// (according to the rules of XML-RPC).
pub fn call(self, client: Client) -> RequestResult {
use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};
// First, build the body XML
let mut body = Vec::new();
try!(self.write_as_xml(&mut body));
// Send XML-RPC request
let mut response = try!(client.post("/")
.header(UserAgent("Rust xmlrpc".to_string()))
.header(ContentType(Mime(TopLevel::Text, SubLevel::Xml, vec![(Attr::Charset, Value::Utf8)])))
.body(Body::BufBody(&body, body.len()))
.send());
// FIXME Check that the response headers are correct
// Read the response and parse it
// FIXME `BufRead`?
Ok(try!(parse_response(&mut response)))
}
/// Formats this `Request` as XML.
pub fn write_as_xml<W: Write>(&self, fmt: &mut W) -> io::Result<()> {
try!(write!(fmt, r#"<?xml version="1.1" encoding="utf-8"?>"#));
try!(write!(fmt, r#"<methodCall>"#));
try!(write!(fmt, r#" <methodName>{}</methodName>"#, escape_xml(&self.name)));
try!(write!(fmt, r#" <params>"#));
for &(_, ref value) in &self.args {
try!(write!(fmt, r#" <param>"#));
try!(value.format(fmt));
try!(write!(fmt, r#" </param>"#));
}
try!(write!(fmt, r#" </params>"#));
try!(write!(fmt, r#"</methodCall>"#));
Ok(())
}
}
/// The result of executing a request.
///
/// When the request was executed without major errors (like an HTTP error or a malformed response),
/// this is `Ok`. The `Response` can still denote a `Fault` if the server returned a `<fault>`
/// response.
pub type RequestResult = Result<Response, RequestError>;
#[cfg(test)]
mod tests {
use super::*;
use std::str;
#[test]
fn escapes_method_names() {
let mut output: Vec<u8> = Vec::new();
let req = Request::new("x<&x");
req.write_as_xml(&mut output).unwrap();
assert!(
str::from_utf8(&output)
.unwrap()
.contains("<methodName>x<&x</methodName>"));
}
}
Add url parameter for XMLRPC calls
use {Value, Response};
use parser::parse_response;
use error::RequestError;
use utils::escape_xml;
use hyper::client::{Client, Body};
use hyper::header::{ContentType, UserAgent};
use std::io::{self, Write};
/// A request to call a procedure.
pub struct Request<'a> {
name: &'a str,
args: Vec<(&'a str, Value)>,
}
impl<'a> Request<'a> {
/// Creates a new request to call a function named `name`.
///
/// By default, no arguments are passed. Use the `arg` method to append arguments.
pub fn new(name: &'a str) -> Self {
Request {
name: name,
args: Vec::new(),
}
}
/// Appends an argument to be passed to the current list of arguments.
pub fn arg<T: Into<Value>>(mut self, name: &'a str, value: T) -> Self {
self.args.push((name, value.into()));
Request {
name: self.name,
args: self.args,
}
}
/// Calls the method using the given `Client`.
///
/// Returns a `RequestResult` indicating whether the request was sent and processed successfully
/// (according to the rules of XML-RPC).
pub fn call(self, client: Client, url: &str) -> RequestResult {
use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};
// First, build the body XML
let mut body = Vec::new();
try!(self.write_as_xml(&mut body));
// Send XML-RPC request
let mut response = try!(client.post(url)
.header(UserAgent("Rust xmlrpc".to_string()))
.header(ContentType(Mime(TopLevel::Text, SubLevel::Xml, vec![(Attr::Charset, Value::Utf8)])))
.body(Body::BufBody(&body, body.len()))
.send());
// FIXME Check that the response headers are correct
// Read the response and parse it
// FIXME `BufRead`?
Ok(try!(parse_response(&mut response)))
}
/// Formats this `Request` as XML.
pub fn write_as_xml<W: Write>(&self, fmt: &mut W) -> io::Result<()> {
try!(write!(fmt, r#"<?xml version="1.1" encoding="utf-8"?>"#));
try!(write!(fmt, r#"<methodCall>"#));
try!(write!(fmt, r#" <methodName>{}</methodName>"#, escape_xml(&self.name)));
try!(write!(fmt, r#" <params>"#));
for &(_, ref value) in &self.args {
try!(write!(fmt, r#" <param>"#));
try!(value.format(fmt));
try!(write!(fmt, r#" </param>"#));
}
try!(write!(fmt, r#" </params>"#));
try!(write!(fmt, r#"</methodCall>"#));
Ok(())
}
}
/// The result of executing a request.
///
/// When the request was executed without major errors (like an HTTP error or a malformed response),
/// this is `Ok`. The `Response` can still denote a `Fault` if the server returned a `<fault>`
/// response.
pub type RequestResult = Result<Response, RequestError>;
#[cfg(test)]
mod tests {
use super::*;
use std::str;
#[test]
fn escapes_method_names() {
let mut output: Vec<u8> = Vec::new();
let req = Request::new("x<&x");
req.write_as_xml(&mut output).unwrap();
assert!(
str::from_utf8(&output)
.unwrap()
.contains("<methodName>x<&x</methodName>"));
}
}
|
// Copyright (c) 2016
// Jeff Nettleton
//
// Licensed under the MIT license (http://opensource.org/licenses/MIT). This
// file may not be copied, modified, or distributed except according to those
// terms
use std;
use std::collections::HashMap;
use serde_json;
use serde::de::DeserializeOwned;
/// This enum represents the various types of HTTP requests.
#[derive(PartialEq, Eq, Hash, Debug, Copy, Clone)]
pub enum Method {
Get,
Put,
Post,
Delete,
Options,
NoImpl,
}
/// This enum represents the errors that might be encountered.
#[derive(Debug)]
pub enum RequestError {
JsonStrError(serde_json::Error),
StrCopyError(std::string::FromUtf8Error),
}
impl From<serde_json::Error> for RequestError {
fn from(err: serde_json::Error) -> RequestError {
RequestError::JsonStrError(err)
}
}
impl From<std::string::FromUtf8Error> for RequestError {
fn from(err: std::string::FromUtf8Error) -> RequestError {
RequestError::StrCopyError(err)
}
}
impl std::fmt::Display for RequestError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
RequestError::JsonStrError(err) => write!(f, "JSON error: {}", err),
RequestError::StrCopyError(err) => write!(f, "UTF-8 error: {}", err),
}
}
}
impl std::error::Error for RequestError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
RequestError::JsonStrError(err) => Some(err),
RequestError::StrCopyError(err) => Some(err),
}
}
}
/// A trait that allows for extracting variables from URIs.
pub trait FromUri {
/// A function to parse a string into the correct type.
fn from_uri(data: &str) -> Self;
}
impl FromUri for String {
fn from_uri(data: &str) -> String {
String::from(data)
}
}
impl FromUri for i32 {
fn from_uri(data: &str) -> i32 {
data.parse::<i32>().expect("matched integer can't be parsed")
}
}
impl FromUri for u32 {
fn from_uri(data: &str) -> u32 {
data.parse::<u32>().expect("matched integer can't be parsed")
}
}
impl FromUri for f32 {
fn from_uri(data: &str) -> f32 {
data.parse::<f32>().expect("matched float can't be parsed")
}
}
/// This struct represents a request from an HTTP client.
#[derive(Debug)]
pub struct Request {
pub method: Method,
pub path: String,
pub payload: Vec<u8>,
pub params: HashMap<String, String>,
headers: HashMap<String, String>,
}
impl Request {
/// Create a new, empty Request.
pub fn new() -> Request {
Request {
method: Method::NoImpl,
path: String::new(),
headers: HashMap::new(),
params: HashMap::new(),
payload: Vec::with_capacity(2048),
}
}
/// Get an HTTP header contained in the Request.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the route "/hello"
/// fn handler(req: &Request) -> Response {
/// let browser = req.get_header("User-Agent");
///
/// match browser {
/// Some(ua) => utils::make_response(format!("You're using {}!", ua), "text/plain", 200),
/// None => utils::make_response("Bad browser, no user agent!", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_header(&self, name: &str) -> Option<String> {
let key = String::from(name.to_lowercase());
match self.headers.get(&key) {
Some(val) => Some(val.clone()),
None => None,
}
}
/// Get a variable from the URI.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the route "/hello/<str:name>"
/// fn handler(req: &Request) -> Response {
/// let name: String = req.get("name");
/// utils::make_response(format!("<b>Hello, {}!</b>", name), "text/html", 200)
/// }
/// ```
pub fn get<T: FromUri>(&self, name: &str) -> T {
if !self.params.contains_key(name) {
panic!("invalid route parameter {:?}", name);
}
FromUri::from_uri(&self.params[name])
}
/// Get a raw JSON payload from the request.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the POST route "/hello"
/// fn handler(req: &Request) -> Response {
/// let data = req.get_json();
///
/// match data {
/// Ok(val) => utils::make_response(format!("We got: {}", val), "text/plain", 200),
/// Err(_) => utils::make_response("We got nothing :(", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_json(&self) -> Result<serde_json::Value, RequestError> {
let payload = String::from_utf8(self.payload.clone())?;
let data = serde_json::from_str(&payload)?;
Ok(data)
}
/// Get a composed JSON payload from the request.
///
/// # Examples
///
/// ```rust,ignore
/// use canteen::{Request, Response};
///
/// #[derive(RustcDecodable)]
/// struct Foo {
/// item: i32,
/// }
///
/// // Given the POST route "/hello"
/// fn handler(req: &Request) -> Response {
/// let data: Foo = req.get_json_obj();
///
/// match data {
/// Ok(foo) => utils::make_response(format!("We got: {}!", data.item), "text/plain", 200),
/// Err(_) => utils::make_response("We got nothing :(", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_json_obj<T>(&self) -> Result<T, RequestError>
where T: DeserializeOwned {
let payload = String::from_utf8(self.payload.clone())?;
let data = serde_json::from_str(&payload)?;
Ok(data)
}
fn parse(&mut self, rqstr: &str) {
let mut buf: Vec<&str> = rqstr.splitn(2, "\r\n").collect();
let ask: Vec<&str> = buf[0].splitn(3, ' ').collect();
self.method = match ask[0] {
"GET" => Method::Get,
"PUT" | "PATCH" => Method::Put,
"POST" => Method::Post,
"DELETE" => Method::Delete,
"OPTIONS" => Method::Options,
_ => Method::NoImpl,
};
self.path = String::from(ask[1]);
loop {
buf = buf[1].splitn(2, "\r\n").collect();
if buf[0] == "" {
if buf.len() == 1 || buf[1] == "" {
// no payload
break;
}
self.payload.extend(buf[1].as_bytes());
break;
}
let hdr: Vec<&str> = buf[0].splitn(2, ": ").collect();
if hdr.len() == 2 {
self.headers.insert(String::from(hdr[0].to_lowercase()), String::from(hdr[1]));
}
}
}
}
impl Default for Request {
fn default() -> Self {
Self::new()
}
}
impl std::str::FromStr for Request {
type Err = RequestError;
/// Create a Request from an HTTP request string.
fn from_str(rqstr: &str) -> Result<Self, Self::Err> {
let mut req = Request::new();
req.parse(rqstr);
Ok(req)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Deserialize)]
struct Foo {
item: i32,
}
#[test]
fn test_fromuri_trait_i32() {
let pos = String::from("1234");
assert_eq!(1234, <i32 as FromUri>::from_uri(&pos));
let neg = String::from("-4321");
assert_eq!(-4321, <i32 as FromUri>::from_uri(&neg));
}
#[test]
fn test_fromuri_trait_u32() {
let orig = String::from("1234");
assert_eq!(1234, <u32 as FromUri>::from_uri(&orig));
}
#[test]
fn test_fromuri_trait_string() {
let orig = String::from("foobar");
assert_eq!("foobar", <String as FromUri>::from_uri(&orig));
}
#[test]
fn test_fromuri_trait_float() {
let pos = String::from("123.45");
assert_eq!(123.45f32, <f32 as FromUri>::from_uri(&pos));
let neg = String::from("-54.321");
assert_eq!(-54.321f32, <f32 as FromUri>::from_uri(&neg));
}
#[test]
fn test_get_fromuri_i32() {
let mut req = Request::new();
req.params.insert(String::from("test"), String::from("1234"));
let val: i32 = req.get("test");
assert_eq!(1234, val);
}
#[test]
fn test_get_json() {
let mut req = Request::new();
req.payload.extend_from_slice("{ \"item\": 123 }".as_bytes());
let data = req.get_json().unwrap();
assert_eq!(true, data.is_object());
let obj = data.as_object().unwrap();
let val = obj.get("item").unwrap();
assert_eq!(true, val.is_u64());
assert_eq!(123u64, val.as_u64().unwrap());
}
#[test]
fn test_get_json_obj() {
let mut req = Request::new();
req.payload.extend_from_slice("{ \"item\": 123 }".as_bytes());
let data: Foo = req.get_json_obj().unwrap();
assert_eq!(123, data.item);
}
}
Add URI query argument handling
// Copyright (c) 2016
// Jeff Nettleton
//
// Licensed under the MIT license (http://opensource.org/licenses/MIT). This
// file may not be copied, modified, or distributed except according to those
// terms
use std;
use std::collections::HashMap;
use serde_json;
use serde::de::DeserializeOwned;
use crate::utils::replace_escape;
/// This enum represents the various types of HTTP requests.
#[derive(PartialEq, Eq, Hash, Debug, Copy, Clone)]
pub enum Method {
Get,
Put,
Post,
Delete,
Options,
NoImpl,
}
/// Storage for URI query parameters -- either single or multiple.
#[derive(PartialEq, Eq, Hash, Debug, Clone)]
pub enum QueryArg {
Single(String),
Multiple(Vec<String>),
}
/// This enum represents the errors that might be encountered.
#[derive(Debug)]
pub enum RequestError {
JsonStrError(serde_json::Error),
StrCopyError(std::string::FromUtf8Error),
}
impl From<serde_json::Error> for RequestError {
fn from(err: serde_json::Error) -> RequestError {
RequestError::JsonStrError(err)
}
}
impl From<std::string::FromUtf8Error> for RequestError {
fn from(err: std::string::FromUtf8Error) -> RequestError {
RequestError::StrCopyError(err)
}
}
impl std::fmt::Display for RequestError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
RequestError::JsonStrError(err) => write!(f, "JSON error: {}", err),
RequestError::StrCopyError(err) => write!(f, "UTF-8 error: {}", err),
}
}
}
impl std::error::Error for RequestError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
RequestError::JsonStrError(err) => Some(err),
RequestError::StrCopyError(err) => Some(err),
}
}
}
/// A trait that allows for extracting variables from URIs.
pub trait FromUri {
/// A function to parse a string into the correct type.
fn from_uri(data: &str) -> Self;
}
impl FromUri for String {
fn from_uri(data: &str) -> String {
String::from(data)
}
}
impl FromUri for i32 {
fn from_uri(data: &str) -> i32 {
data.parse::<i32>().expect("matched integer can't be parsed")
}
}
impl FromUri for u32 {
fn from_uri(data: &str) -> u32 {
data.parse::<u32>().expect("matched integer can't be parsed")
}
}
impl FromUri for f32 {
fn from_uri(data: &str) -> f32 {
data.parse::<f32>().expect("matched float can't be parsed")
}
}
/// This struct represents a request from an HTTP client.
#[derive(Debug)]
pub struct Request {
pub method: Method,
pub uri: String,
pub path: String,
pub query: String,
pub payload: Vec<u8>,
pub params: HashMap<String, String>,
pub args: HashMap<String, QueryArg>,
headers: HashMap<String, String>,
}
impl Request {
/// Create a new, empty Request.
pub fn new() -> Request {
Request {
method: Method::NoImpl,
uri: String::new(),
path: String::new(),
query: String::new(),
headers: HashMap::new(),
params: HashMap::new(),
args: HashMap::new(),
payload: Vec::with_capacity(2048),
}
}
/// Get an HTTP header contained in the Request.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the route "/hello"
/// fn handler(req: &Request) -> Response {
/// let browser = req.get_header("User-Agent");
///
/// match browser {
/// Some(ua) => utils::make_response(format!("You're using {}!", ua), "text/plain", 200),
/// None => utils::make_response("Bad browser, no user agent!", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_header(&self, name: &str) -> Option<String> {
let key = String::from(name.to_lowercase());
match self.headers.get(&key) {
Some(val) => Some(val.clone()),
None => None,
}
}
/// Get a variable from the URI.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the route "/hello/<str:name>"
/// fn handler(req: &Request) -> Response {
/// let name: String = req.get("name");
/// utils::make_response(format!("<b>Hello, {}!</b>", name), "text/html", 200)
/// }
/// ```
pub fn get<T: FromUri>(&self, name: &str) -> T {
if !self.params.contains_key(name) {
panic!("invalid route parameter {:?}", name);
}
FromUri::from_uri(&self.params[name])
}
/// Get a raw JSON payload from the request.
///
/// # Examples
///
/// ```rust
/// use canteen::{Request, Response};
/// use canteen::utils;
///
/// // Given the POST route "/hello"
/// fn handler(req: &Request) -> Response {
/// let data = req.get_json();
///
/// match data {
/// Ok(val) => utils::make_response(format!("We got: {}", val), "text/plain", 200),
/// Err(_) => utils::make_response("We got nothing :(", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_json(&self) -> Result<serde_json::Value, RequestError> {
let payload = String::from_utf8(self.payload.clone())?;
let data = serde_json::from_str(&payload)?;
Ok(data)
}
/// Get a composed JSON payload from the request.
///
/// # Examples
///
/// ```rust,ignore
/// use canteen::{Request, Response};
///
/// #[derive(RustcDecodable)]
/// struct Foo {
/// item: i32,
/// }
///
/// // Given the POST route "/hello"
/// fn handler(req: &Request) -> Response {
/// let data: Foo = req.get_json_obj();
///
/// match data {
/// Ok(foo) => utils::make_response(format!("We got: {}!", data.item), "text/plain", 200),
/// Err(_) => utils::make_response("We got nothing :(", "text/plain", 200),
/// }
/// }
/// ```
pub fn get_json_obj<T>(&self) -> Result<T, RequestError>
where T: DeserializeOwned {
let payload = String::from_utf8(self.payload.clone())?;
let data = serde_json::from_str(&payload)?;
Ok(data)
}
fn parse(&mut self, rqstr: &str) {
let mut buf: Vec<&str> = rqstr.splitn(2, "\r\n").collect();
let ask: Vec<&str> = buf[0].splitn(3, ' ').collect();
self.method = match ask[0] {
"GET" => Method::Get,
"PUT" | "PATCH" => Method::Put,
"POST" => Method::Post,
"DELETE" => Method::Delete,
"OPTIONS" => Method::Options,
_ => Method::NoImpl,
};
self.uri = String::from(ask[1]);
// Fetch any ?foo=bar&baz=quux query parameters.
let mut split_uri = ask[1].splitn(2, '?');
self.path = String::from(split_uri.next().unwrap());
self.query = String::from(split_uri.next().unwrap_or(""));
let mut tmp_query_args: HashMap<String, Vec<String>> = HashMap::new();
for pair in self.query.clone().split('&') {
let mut split_pair = pair.splitn(2, '=');
let key = String::from(replace_escape(split_pair.next().unwrap()));
let val = String::from(replace_escape(split_pair.next().unwrap_or("")));
if val.len() > 0 {
let key_entry = tmp_query_args.entry(key).or_insert(Vec::new());
key_entry.push(val);
}
}
for (key, mut vals) in tmp_query_args.into_iter() {
match vals.len() {
0 => continue,
1 => self.args.insert(key, QueryArg::Single(vals.pop().unwrap())),
_ => self.args.insert(key, QueryArg::Multiple(vals)),
};
}
loop {
buf = buf[1].splitn(2, "\r\n").collect();
if buf[0] == "" {
if buf.len() == 1 || buf[1] == "" {
// no payload
break;
}
self.payload.extend(buf[1].as_bytes());
break;
}
let hdr: Vec<&str> = buf[0].splitn(2, ": ").collect();
if hdr.len() == 2 {
self.headers.insert(String::from(hdr[0].to_lowercase()), String::from(hdr[1]));
}
}
}
}
impl Default for Request {
fn default() -> Self {
Self::new()
}
}
impl std::str::FromStr for Request {
type Err = RequestError;
/// Create a Request from an HTTP request string.
fn from_str(rqstr: &str) -> Result<Self, Self::Err> {
let mut req = Request::new();
req.parse(rqstr);
Ok(req)
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use super::*;
#[derive(Deserialize)]
struct Foo {
item: i32,
}
#[test]
fn test_fromuri_trait_i32() {
let pos = String::from("1234");
assert_eq!(1234, <i32 as FromUri>::from_uri(&pos));
let neg = String::from("-4321");
assert_eq!(-4321, <i32 as FromUri>::from_uri(&neg));
}
#[test]
fn test_fromuri_trait_u32() {
let orig = String::from("1234");
assert_eq!(1234, <u32 as FromUri>::from_uri(&orig));
}
#[test]
fn test_fromuri_trait_string() {
let orig = String::from("foobar");
assert_eq!("foobar", <String as FromUri>::from_uri(&orig));
}
#[test]
fn test_fromuri_trait_float() {
let pos = String::from("123.45");
assert_eq!(123.45f32, <f32 as FromUri>::from_uri(&pos));
let neg = String::from("-54.321");
assert_eq!(-54.321f32, <f32 as FromUri>::from_uri(&neg));
}
#[test]
fn test_get_fromuri_i32() {
let mut req = Request::new();
req.params.insert(String::from("test"), String::from("1234"));
let val: i32 = req.get("test");
assert_eq!(1234, val);
}
#[test]
fn test_get_json() {
let mut req = Request::new();
req.payload.extend_from_slice("{ \"item\": 123 }".as_bytes());
let data = req.get_json().unwrap();
assert_eq!(true, data.is_object());
let obj = data.as_object().unwrap();
let val = obj.get("item").unwrap();
assert_eq!(true, val.is_u64());
assert_eq!(123u64, val.as_u64().unwrap());
}
#[test]
fn test_get_json_obj() {
let mut req = Request::new();
req.payload.extend_from_slice("{ \"item\": 123 }".as_bytes());
let data: Foo = req.get_json_obj().unwrap();
assert_eq!(123, data.item);
}
#[test]
fn test_parse() {
let req = Request::from_str("GET /item?foo=bar&baz=%6C%6F%6C HTTP/1.1\r\n\r\n").unwrap();
assert_eq!(req.args.get("foo").unwrap(), &QueryArg::Single("bar".into()));
assert_eq!(req.args.get("baz").unwrap(), &QueryArg::Single("lol".into()));
}
}
|
// Copyright 2014 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use std::collections::BTreeMap;
use std::ffi::CString;
use std::fs;
use std::ops::Deref;
use std::path::Path;
use std::slice;
use std::str::from_utf8;
use libc::{self, c_int, c_void, size_t};
use rocksdb_ffi::{self, DBCFHandle, error_message};
use rocksdb_options::{Options, WriteOptions};
const DEFAULT_COLUMN_FAMILY: &'static str = "default";
pub struct DB {
inner: rocksdb_ffi::DBInstance,
cfs: BTreeMap<String, DBCFHandle>,
}
unsafe impl Send for DB {}
unsafe impl Sync for DB {}
pub struct WriteBatch {
inner: rocksdb_ffi::DBWriteBatch,
}
pub struct ReadOptions {
inner: rocksdb_ffi::DBReadOptions,
}
pub struct Snapshot<'a> {
db: &'a DB,
inner: rocksdb_ffi::DBSnapshot,
}
// We need to find a better way to add a lifetime in here.
#[allow(dead_code)]
pub struct DBIterator<'a> {
db: &'a DB,
inner: rocksdb_ffi::DBIterator,
}
pub enum SeekKey<'a> {
Start,
End,
Key(&'a [u8]),
}
impl<'a> From<&'a [u8]> for SeekKey<'a> {
fn from(bs: &'a [u8]) -> SeekKey {
SeekKey::Key(bs)
}
}
impl<'a> DBIterator<'a> {
fn new(db: &'a DB, readopts: &ReadOptions) -> DBIterator<'a> {
unsafe {
let iterator = rocksdb_ffi::rocksdb_create_iterator(db.inner,
readopts.inner);
DBIterator {
db: db,
inner: iterator,
}
}
}
pub fn seek(&mut self, key: SeekKey) -> bool {
unsafe {
match key {
SeekKey::Start => {
rocksdb_ffi::rocksdb_iter_seek_to_first(self.inner)
}
SeekKey::End => {
rocksdb_ffi::rocksdb_iter_seek_to_last(self.inner)
}
SeekKey::Key(key) => {
rocksdb_ffi::rocksdb_iter_seek(self.inner,
key.as_ptr(),
key.len() as size_t)
}
}
}
self.valid()
}
pub fn prev(&mut self) -> bool {
unsafe {
rocksdb_ffi::rocksdb_iter_prev(self.inner);
}
self.valid()
}
pub fn next(&mut self) -> bool {
unsafe {
rocksdb_ffi::rocksdb_iter_next(self.inner);
}
self.valid()
}
pub fn key(&self) -> &[u8] {
assert!(self.valid());
let mut key_len: size_t = 0;
let key_len_ptr: *mut size_t = &mut key_len;
unsafe {
let key_ptr = rocksdb_ffi::rocksdb_iter_key(self.inner,
key_len_ptr);
slice::from_raw_parts(key_ptr, key_len as usize)
}
}
pub fn value(&self) -> &[u8] {
assert!(self.valid());
let mut val_len: size_t = 0;
let val_len_ptr: *mut size_t = &mut val_len;
unsafe {
let val_ptr = rocksdb_ffi::rocksdb_iter_value(self.inner,
val_len_ptr);
slice::from_raw_parts(val_ptr, val_len as usize)
}
}
pub fn kv(&self) -> Option<(Vec<u8>, Vec<u8>)> {
if self.valid() {
Some((self.key().to_vec(), self.value().to_vec()))
} else {
None
}
}
pub fn valid(&self) -> bool {
unsafe { rocksdb_ffi::rocksdb_iter_valid(self.inner) }
}
fn new_cf(db: &'a DB,
cf_handle: DBCFHandle,
readopts: &ReadOptions,
key: SeekKey)
-> Result<DBIterator<'a>, String> {
unsafe {
let iterator =
rocksdb_ffi::rocksdb_create_iterator_cf(db.inner,
readopts.inner,
cf_handle);
let mut rv = DBIterator {
db: db,
inner: iterator,
};
rv.seek(key);
Ok(rv)
}
}
}
pub type Kv = (Vec<u8>, Vec<u8>);
impl<'b, 'a> Iterator for &'b mut DBIterator<'a> {
type Item = Kv;
fn next(&mut self) -> Option<Kv> {
let kv = self.kv();
if kv.is_some() {
DBIterator::next(self);
}
kv
}
}
impl<'a> Drop for DBIterator<'a> {
fn drop(&mut self) {
unsafe {
rocksdb_ffi::rocksdb_iter_destroy(self.inner);
}
}
}
impl<'a> Snapshot<'a> {
pub fn new(db: &DB) -> Snapshot {
let snapshot =
unsafe { rocksdb_ffi::rocksdb_create_snapshot(db.inner) };
Snapshot {
db: db,
inner: snapshot,
}
}
pub fn iter(&self) -> DBIterator {
let mut readopts = ReadOptions::new();
readopts.set_snapshot(self);
DBIterator::new(self.db, &readopts)
}
pub fn get(&self, key: &[u8]) -> Result<Option<DBVector>, String> {
let mut readopts = ReadOptions::new();
readopts.set_snapshot(self);
self.db.get_opt(key, &readopts)
}
pub fn get_cf(&self,
cf: DBCFHandle,
key: &[u8])
-> Result<Option<DBVector>, String> {
let mut readopts = ReadOptions::new();
readopts.set_snapshot(self);
self.db.get_cf_opt(cf, key, &readopts)
}
}
impl<'a> Drop for Snapshot<'a> {
fn drop(&mut self) {
unsafe {
rocksdb_ffi::rocksdb_release_snapshot(self.db.inner, self.inner);
}
}
}
// This is for the DB and write batches to share the same API
pub trait Writable {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String>;
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String>;
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String>;
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String>;
fn delete(&self, key: &[u8]) -> Result<(), String>;
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String>;
}
/// A range of keys, `start_key` is included, but not `end_key`.
///
/// You should make sure `end_key` is not less than `start_key`.
pub struct Range<'a> {
start_key: &'a [u8],
end_key: &'a [u8],
}
impl<'a> Range<'a> {
pub fn new(start_key: &'a [u8], end_key: &'a [u8]) -> Range<'a> {
assert!(start_key <= end_key);
Range {
start_key: start_key,
end_key: end_key,
}
}
}
impl DB {
pub fn open_default(path: &str) -> Result<DB, String> {
let mut opts = Options::new();
opts.create_if_missing(true);
DB::open(&opts, path)
}
pub fn open(opts: &Options, path: &str) -> Result<DB, String> {
DB::open_cf(opts, path, &[])
}
pub fn open_cf(opts: &Options,
path: &str,
cfs: &[&str])
-> Result<DB, String> {
let cpath = match CString::new(path.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err("Failed to convert path to CString when opening \
rocksdb"
.to_owned())
}
};
let cpath_ptr = cpath.as_ptr();
let ospath = Path::new(path);
if let Err(e) = fs::create_dir_all(&ospath) {
return Err(format!("Failed to create rocksdb directory: \
src/rocksdb.rs: \
{:?}",
e));
}
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let db: rocksdb_ffi::DBInstance;
let mut cf_map = BTreeMap::new();
if cfs.len() == 0 {
unsafe {
db = rocksdb_ffi::rocksdb_open(opts.inner,
cpath_ptr as *const _,
err_ptr);
}
} else {
let mut cfs_v = cfs.to_vec();
// Always open the default column family
if !cfs_v.contains(&DEFAULT_COLUMN_FAMILY) {
cfs_v.push(DEFAULT_COLUMN_FAMILY);
}
// We need to store our CStrings in an intermediate vector
// so that their pointers remain valid.
let c_cfs: Vec<CString> = cfs_v.iter()
.map(|cf| CString::new(cf.as_bytes()).unwrap())
.collect();
let cfnames: Vec<*const _> = c_cfs.iter()
.map(|cf| cf.as_ptr())
.collect();
// These handles will be populated by DB.
let cfhandles: Vec<rocksdb_ffi::DBCFHandle> = cfs_v.iter()
.map(|_| rocksdb_ffi::DBCFHandle(0 as *mut c_void))
.collect();
// TODO(tyler) allow options to be passed in.
let cfopts: Vec<rocksdb_ffi::DBOptions> = cfs_v.iter()
.map(|_| unsafe { rocksdb_ffi::rocksdb_options_create() })
.collect();
// Prepare to ship to C.
let cfopts_ptr: *const rocksdb_ffi::DBOptions = cfopts.as_ptr();
let handles: *const rocksdb_ffi::DBCFHandle = cfhandles.as_ptr();
let nfam = cfs_v.len();
unsafe {
db = rocksdb_ffi::rocksdb_open_column_families(opts.inner, cpath_ptr as *const _,
nfam as c_int,
cfnames.as_ptr() as *const _,
cfopts_ptr, handles, err_ptr);
}
for handle in &cfhandles {
if handle.0.is_null() {
return Err("Received null column family handle from DB."
.to_owned());
}
}
for (n, h) in cfs_v.iter().zip(cfhandles) {
cf_map.insert((*n).to_owned(), h);
}
}
if !err.is_null() {
return Err(error_message(err));
}
if db.0.is_null() {
return Err("Could not initialize database.".to_owned());
}
Ok(DB {
inner: db,
cfs: cf_map,
})
}
pub fn destroy(opts: &Options, path: &str) -> Result<(), String> {
let cpath = CString::new(path.as_bytes()).unwrap();
let cpath_ptr = cpath.as_ptr();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_destroy_db(opts.inner,
cpath_ptr as *const _,
err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn repair(opts: Options, path: &str) -> Result<(), String> {
let cpath = CString::new(path.as_bytes()).unwrap();
let cpath_ptr = cpath.as_ptr();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_repair_db(opts.inner,
cpath_ptr as *const _,
err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn write_opt(&self,
batch: WriteBatch,
writeopts: &WriteOptions)
-> Result<(), String> {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_write(self.inner,
writeopts.inner,
batch.inner,
err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn write(&self, batch: WriteBatch) -> Result<(), String> {
self.write_opt(batch, &WriteOptions::new())
}
pub fn write_without_wal(&self, batch: WriteBatch) -> Result<(), String> {
let mut wo = WriteOptions::new();
wo.disable_wal(true);
self.write_opt(batch, &wo)
}
pub fn get_opt(&self,
key: &[u8],
readopts: &ReadOptions)
-> Result<Option<DBVector>, String> {
if readopts.inner.0.is_null() {
return Err("Unable to create rocksdb read options. This is a \
fairly trivial call, and its failure may be \
indicative of a mis-compiled or mis-loaded rocksdb \
library."
.to_owned());
}
unsafe {
let val_len: size_t = 0;
let val_len_ptr = &val_len as *const size_t;
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let val =
rocksdb_ffi::rocksdb_get(self.inner,
readopts.inner,
key.as_ptr(),
key.len() as size_t,
val_len_ptr,
err_ptr) as *mut u8;
if !err.is_null() {
return Err(error_message(err));
}
if val.is_null() {
Ok(None)
} else {
Ok(Some(DBVector::from_c(val, val_len)))
}
}
}
pub fn get(&self, key: &[u8]) -> Result<Option<DBVector>, String> {
self.get_opt(key, &ReadOptions::new())
}
pub fn get_cf_opt(&self,
cf: DBCFHandle,
key: &[u8],
readopts: &ReadOptions)
-> Result<Option<DBVector>, String> {
if readopts.inner.0.is_null() {
return Err("Unable to create rocksdb read options. This is a \
fairly trivial call, and its failure may be \
indicative of a mis-compiled or mis-loaded rocksdb \
library."
.to_owned());
}
unsafe {
let val_len: size_t = 0;
let val_len_ptr = &val_len as *const size_t;
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let val =
rocksdb_ffi::rocksdb_get_cf(self.inner,
readopts.inner,
cf,
key.as_ptr(),
key.len() as size_t,
val_len_ptr,
err_ptr) as *mut u8;
if !err.is_null() {
return Err(error_message(err));
}
if val.is_null() {
Ok(None)
} else {
Ok(Some(DBVector::from_c(val, val_len)))
}
}
}
pub fn get_cf(&self,
cf: DBCFHandle,
key: &[u8])
-> Result<Option<DBVector>, String> {
self.get_cf_opt(cf, key, &ReadOptions::new())
}
pub fn create_cf(&mut self,
name: &str,
opts: &Options)
-> Result<DBCFHandle, String> {
let cname = match CString::new(name.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err("Failed to convert path to CString when opening \
rocksdb"
.to_owned())
}
};
let cname_ptr = cname.as_ptr();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let cf_handler = unsafe {
let cf_handler =
rocksdb_ffi::rocksdb_create_column_family(self.inner,
opts.inner,
cname_ptr as *const _,
err_ptr);
self.cfs.insert(name.to_owned(), cf_handler);
cf_handler
};
if !err.is_null() {
return Err(error_message(err));
}
Ok(cf_handler)
}
pub fn drop_cf(&mut self, name: &str) -> Result<(), String> {
let cf = self.cfs.get(name);
if cf.is_none() {
return Err(format!("Invalid column family: {}", name).clone());
}
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_drop_column_family(self.inner,
*cf.unwrap(),
err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn cf_handle(&self, name: &str) -> Option<&DBCFHandle> {
self.cfs.get(name)
}
pub fn iter(&self) -> DBIterator {
let opts = ReadOptions::new();
DBIterator::new(&self, &opts)
}
pub fn iter_cf(&self,
cf_handle: DBCFHandle,
key: SeekKey)
-> Result<DBIterator, String> {
let opts = ReadOptions::new();
DBIterator::new_cf(&self, cf_handle, &opts, key)
}
pub fn snapshot(&self) -> Snapshot {
Snapshot::new(self)
}
pub fn put_opt(&self,
key: &[u8],
value: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_put(self.inner,
writeopts.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
pub fn put_cf_opt(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_put_cf(self.inner,
writeopts.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
pub fn merge_opt(&self,
key: &[u8],
value: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_merge(self.inner,
writeopts.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn merge_cf_opt(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_merge_cf(self.inner,
writeopts.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn delete_opt(&self,
key: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_delete(self.inner,
writeopts.inner,
key.as_ptr(),
key.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn delete_cf_opt(&self,
cf: DBCFHandle,
key: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_delete_cf(self.inner,
writeopts.inner,
cf,
key.as_ptr(),
key.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
/// Flush all memtable data.
///
/// Due to lack of abi, only default cf is supported.
///
/// If sync, the flush will wait until the flush is done.
pub fn flush(&self, sync: bool) -> Result<(), String> {
unsafe {
let opts = rocksdb_ffi::rocksdb_flushoptions_create();
rocksdb_ffi::rocksdb_flushoptions_set_wait(opts, sync);
let mut err = 0 as *const i8;
rocksdb_ffi::rocksdb_flush(self.inner, opts, &mut err);
rocksdb_ffi::rocksdb_flushoptions_destroy(opts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
/// Return the approximate file system space used by keys in each ranges.
///
/// Note that the returned sizes measure file system space usage, so
/// if the user data compresses by a factor of ten, the returned
/// sizes will be one-tenth the size of the corresponding user data size.
///
/// Due to lack of abi, only data flushed to disk is taken into account.
pub fn get_approximate_sizes(&self, ranges: &[Range]) -> Vec<u64> {
self.get_approximate_sizes_cfopt(None, ranges)
}
pub fn get_approximate_sizes_cf(&self,
cf: DBCFHandle,
ranges: &[Range])
-> Vec<u64> {
self.get_approximate_sizes_cfopt(Some(cf), ranges)
}
fn get_approximate_sizes_cfopt(&self,
cf: Option<DBCFHandle>,
ranges: &[Range])
-> Vec<u64> {
let start_keys: Vec<*const u8> = ranges.iter()
.map(|x| x.start_key.as_ptr())
.collect();
let start_key_lens: Vec<u64> = ranges.iter()
.map(|x| x.start_key.len() as u64)
.collect();
let end_keys: Vec<*const u8> = ranges.iter()
.map(|x| x.end_key.as_ptr())
.collect();
let end_key_lens: Vec<u64> = ranges.iter()
.map(|x| x.end_key.len() as u64)
.collect();
let mut sizes: Vec<u64> = vec![0; ranges.len()];
let (n,
start_key_ptr,
start_key_len_ptr,
end_key_ptr,
end_key_len_ptr,
size_ptr) = (ranges.len() as i32,
start_keys.as_ptr(),
start_key_lens.as_ptr(),
end_keys.as_ptr(),
end_key_lens.as_ptr(),
sizes.as_mut_ptr());
match cf {
None => unsafe {
rocksdb_ffi::rocksdb_approximate_sizes(self.inner,
n,
start_key_ptr,
start_key_len_ptr,
end_key_ptr,
end_key_len_ptr,
size_ptr)
},
Some(cf) => unsafe {
rocksdb_ffi::rocksdb_approximate_sizes_cf(self.inner,
cf,
n,
start_key_ptr,
start_key_len_ptr,
end_key_ptr,
end_key_len_ptr,
size_ptr)
},
}
sizes
}
}
impl Writable for DB {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
self.put_opt(key, value, &WriteOptions::new())
}
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
self.put_cf_opt(cf, key, value, &WriteOptions::new())
}
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
self.merge_opt(key, value, &WriteOptions::new())
}
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
self.merge_cf_opt(cf, key, value, &WriteOptions::new())
}
fn delete(&self, key: &[u8]) -> Result<(), String> {
self.delete_opt(key, &WriteOptions::new())
}
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String> {
self.delete_cf_opt(cf, key, &WriteOptions::new())
}
}
impl Default for WriteBatch {
fn default() -> WriteBatch {
WriteBatch {
inner: unsafe { rocksdb_ffi::rocksdb_writebatch_create() },
}
}
}
impl WriteBatch {
pub fn new() -> WriteBatch {
WriteBatch::default()
}
}
impl Drop for WriteBatch {
fn drop(&mut self) {
unsafe { rocksdb_ffi::rocksdb_writebatch_destroy(self.inner) }
}
}
impl Drop for DB {
fn drop(&mut self) {
unsafe {
for cf in self.cfs.values() {
rocksdb_ffi::rocksdb_column_family_handle_destroy(*cf);
}
rocksdb_ffi::rocksdb_close(self.inner);
}
}
}
impl Writable for WriteBatch {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_put(self.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_put_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_merge(self.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_merge_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn delete(&self, key: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_delete(self.inner,
key.as_ptr(),
key.len() as size_t);
Ok(())
}
}
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_delete_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t);
Ok(())
}
}
}
impl Drop for ReadOptions {
fn drop(&mut self) {
unsafe { rocksdb_ffi::rocksdb_readoptions_destroy(self.inner) }
}
}
impl Default for ReadOptions {
fn default() -> ReadOptions {
unsafe {
ReadOptions { inner: rocksdb_ffi::rocksdb_readoptions_create() }
}
}
}
impl ReadOptions {
fn new() -> ReadOptions {
ReadOptions::default()
}
// TODO add snapshot setting here
// TODO add snapshot wrapper structs with proper destructors;
// that struct needs an "iterator" impl too.
#[allow(dead_code)]
fn fill_cache(&mut self, v: bool) {
unsafe {
rocksdb_ffi::rocksdb_readoptions_set_fill_cache(self.inner, v);
}
}
fn set_snapshot(&mut self, snapshot: &Snapshot) {
unsafe {
rocksdb_ffi::rocksdb_readoptions_set_snapshot(self.inner,
snapshot.inner);
}
}
}
pub struct DBVector {
base: *mut u8,
len: usize,
}
impl Deref for DBVector {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.base, self.len) }
}
}
impl Drop for DBVector {
fn drop(&mut self) {
unsafe {
libc::free(self.base as *mut c_void);
}
}
}
impl DBVector {
pub fn from_c(val: *mut u8, val_len: size_t) -> DBVector {
DBVector {
base: val,
len: val_len as usize,
}
}
pub fn to_utf8(&self) -> Option<&str> {
from_utf8(self.deref()).ok()
}
}
#[cfg(test)]
mod test {
use super::*;
use rocksdb_options::*;
use std::str;
use tempdir::TempDir;
#[test]
fn external() {
let path = TempDir::new("_rust_rocksdb_externaltest").expect("");
let db = DB::open_default(path.path().to_str().unwrap()).unwrap();
let p = db.put(b"k1", b"v1111");
assert!(p.is_ok());
let r: Result<Option<DBVector>, String> = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
assert!(db.delete(b"k1").is_ok());
assert!(db.get(b"k1").unwrap().is_none());
}
#[allow(unused_variables)]
#[test]
fn errors_do_stuff() {
let path = TempDir::new("_rust_rocksdb_error").expect("");
let path_str = path.path().to_str().unwrap();
let db = DB::open_default(path_str).unwrap();
let opts = Options::new();
// The DB will still be open when we try to destroy and the lock should fail
match DB::destroy(&opts, path_str) {
Err(ref s) => assert!(s.contains("LOCK: No locks available")),
Ok(_) => panic!("should fail"),
}
}
#[test]
fn writebatch_works() {
let path = TempDir::new("_rust_rocksdb_writebacktest").expect("");
let db = DB::open_default(path.path().to_str().unwrap()).unwrap();
// test put
let batch = WriteBatch::new();
assert!(db.get(b"k1").unwrap().is_none());
let _ = batch.put(b"k1", b"v1111");
assert!(db.get(b"k1").unwrap().is_none());
let p = db.write(batch);
assert!(p.is_ok());
let r: Result<Option<DBVector>, String> = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
// test delete
let batch = WriteBatch::new();
let _ = batch.delete(b"k1");
let p = db.write(batch);
assert!(p.is_ok());
assert!(db.get(b"k1").unwrap().is_none());
}
#[test]
fn iterator_test() {
let path = TempDir::new("_rust_rocksdb_iteratortest").expect("");
let db = DB::open_default(path.path().to_str().unwrap()).unwrap();
db.put(b"k1", b"v1111").expect("");
db.put(b"k2", b"v2222").expect("");
db.put(b"k3", b"v3333").expect("");
let mut iter = db.iter();
iter.seek(SeekKey::Start);
for (k, v) in &mut iter {
println!("Hello {}: {}",
str::from_utf8(&*k).unwrap(),
str::from_utf8(&*v).unwrap());
}
}
#[test]
fn approximate_size_test() {
let path = TempDir::new("_rust_rocksdb_iteratortest").expect("");
let db = DB::open_default(path.path().to_str().unwrap()).unwrap();
for i in 1..8000 {
db.put(format!("{:04}", i).as_bytes(),
format!("{:04}", i).as_bytes())
.expect("");
}
db.flush(true).expect("");
assert!(db.get(b"0001").expect("").is_some());
db.flush(true).expect("");
let sizes = db.get_approximate_sizes(&[Range::new(b"0000", b"2000"),
Range::new(b"2000", b"4000"),
Range::new(b"4000", b"6000"),
Range::new(b"6000", b"8000"),
Range::new(b"8000", b"9999")]);
assert_eq!(sizes.len(), 5);
for s in &sizes[0..4] {
assert!(*s > 0);
}
assert_eq!(sizes[4], 0);
}
}
#[test]
fn snapshot_test() {
let path = "_rust_rocksdb_snapshottest";
{
let db = DB::open_default(path).unwrap();
let p = db.put(b"k1", b"v1111");
assert!(p.is_ok());
let snap = db.snapshot();
let mut r: Result<Option<DBVector>, String> = snap.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
r = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
let p = db.put(b"k2", b"v2222");
assert!(p.is_ok());
assert!(db.get(b"k2").unwrap().is_some());
assert!(snap.get(b"k2").unwrap().is_none());
}
let opts = Options::new();
assert!(DB::destroy(&opts, path).is_ok());
}
save path for later use
// Copyright 2014 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use std::collections::BTreeMap;
use std::ffi::CString;
use std::fs;
use std::ops::Deref;
use std::path::Path;
use std::slice;
use std::str::from_utf8;
use libc::{self, c_int, c_void, size_t};
use rocksdb_ffi::{self, DBCFHandle, error_message};
use rocksdb_options::{Options, WriteOptions};
const DEFAULT_COLUMN_FAMILY: &'static str = "default";
pub struct DB {
inner: rocksdb_ffi::DBInstance,
cfs: BTreeMap<String, DBCFHandle>,
path: String,
}
unsafe impl Send for DB {}
unsafe impl Sync for DB {}
pub struct WriteBatch {
inner: rocksdb_ffi::DBWriteBatch,
}
pub struct ReadOptions {
inner: rocksdb_ffi::DBReadOptions,
}
pub struct Snapshot<'a> {
db: &'a DB,
inner: rocksdb_ffi::DBSnapshot,
}
// We need to find a better way to add a lifetime in here.
#[allow(dead_code)]
pub struct DBIterator<'a> {
db: &'a DB,
inner: rocksdb_ffi::DBIterator,
}
pub enum SeekKey<'a> {
Start,
End,
Key(&'a [u8]),
}
impl<'a> From<&'a [u8]> for SeekKey<'a> {
fn from(bs: &'a [u8]) -> SeekKey {
SeekKey::Key(bs)
}
}
impl<'a> DBIterator<'a> {
fn new(db: &'a DB, readopts: &ReadOptions) -> DBIterator<'a> {
unsafe {
let iterator = rocksdb_ffi::rocksdb_create_iterator(db.inner,
readopts.inner);
DBIterator {
db: db,
inner: iterator,
}
}
}
pub fn seek(&mut self, key: SeekKey) -> bool {
unsafe {
match key {
SeekKey::Start => {
rocksdb_ffi::rocksdb_iter_seek_to_first(self.inner)
}
SeekKey::End => {
rocksdb_ffi::rocksdb_iter_seek_to_last(self.inner)
}
SeekKey::Key(key) => {
rocksdb_ffi::rocksdb_iter_seek(self.inner,
key.as_ptr(),
key.len() as size_t)
}
}
}
self.valid()
}
pub fn prev(&mut self) -> bool {
unsafe {
rocksdb_ffi::rocksdb_iter_prev(self.inner);
}
self.valid()
}
pub fn next(&mut self) -> bool {
unsafe {
rocksdb_ffi::rocksdb_iter_next(self.inner);
}
self.valid()
}
pub fn key(&self) -> &[u8] {
assert!(self.valid());
let mut key_len: size_t = 0;
let key_len_ptr: *mut size_t = &mut key_len;
unsafe {
let key_ptr = rocksdb_ffi::rocksdb_iter_key(self.inner,
key_len_ptr);
slice::from_raw_parts(key_ptr, key_len as usize)
}
}
pub fn value(&self) -> &[u8] {
assert!(self.valid());
let mut val_len: size_t = 0;
let val_len_ptr: *mut size_t = &mut val_len;
unsafe {
let val_ptr = rocksdb_ffi::rocksdb_iter_value(self.inner,
val_len_ptr);
slice::from_raw_parts(val_ptr, val_len as usize)
}
}
pub fn kv(&self) -> Option<(Vec<u8>, Vec<u8>)> {
if self.valid() {
Some((self.key().to_vec(), self.value().to_vec()))
} else {
None
}
}
pub fn valid(&self) -> bool {
unsafe { rocksdb_ffi::rocksdb_iter_valid(self.inner) }
}
fn new_cf(db: &'a DB,
cf_handle: DBCFHandle,
readopts: &ReadOptions,
key: SeekKey)
-> Result<DBIterator<'a>, String> {
unsafe {
let iterator =
rocksdb_ffi::rocksdb_create_iterator_cf(db.inner,
readopts.inner,
cf_handle);
let mut rv = DBIterator {
db: db,
inner: iterator,
};
rv.seek(key);
Ok(rv)
}
}
}
pub type Kv = (Vec<u8>, Vec<u8>);
impl<'b, 'a> Iterator for &'b mut DBIterator<'a> {
type Item = Kv;
fn next(&mut self) -> Option<Kv> {
let kv = self.kv();
if kv.is_some() {
DBIterator::next(self);
}
kv
}
}
impl<'a> Drop for DBIterator<'a> {
fn drop(&mut self) {
unsafe {
rocksdb_ffi::rocksdb_iter_destroy(self.inner);
}
}
}
impl<'a> Snapshot<'a> {
pub fn new(db: &DB) -> Snapshot {
let snapshot =
unsafe { rocksdb_ffi::rocksdb_create_snapshot(db.inner) };
Snapshot {
db: db,
inner: snapshot,
}
}
pub fn iter(&self) -> DBIterator {
let mut readopts = ReadOptions::new();
readopts.set_snapshot(self);
DBIterator::new(self.db, &readopts)
}
pub fn get(&self, key: &[u8]) -> Result<Option<DBVector>, String> {
let mut readopts = ReadOptions::new();
readopts.set_snapshot(self);
self.db.get_opt(key, &readopts)
}
pub fn get_cf(&self,
cf: DBCFHandle,
key: &[u8])
-> Result<Option<DBVector>, String> {
let mut readopts = ReadOptions::new();
readopts.set_snapshot(self);
self.db.get_cf_opt(cf, key, &readopts)
}
}
impl<'a> Drop for Snapshot<'a> {
fn drop(&mut self) {
unsafe {
rocksdb_ffi::rocksdb_release_snapshot(self.db.inner, self.inner);
}
}
}
// This is for the DB and write batches to share the same API
pub trait Writable {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String>;
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String>;
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String>;
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String>;
fn delete(&self, key: &[u8]) -> Result<(), String>;
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String>;
}
/// A range of keys, `start_key` is included, but not `end_key`.
///
/// You should make sure `end_key` is not less than `start_key`.
pub struct Range<'a> {
start_key: &'a [u8],
end_key: &'a [u8],
}
impl<'a> Range<'a> {
pub fn new(start_key: &'a [u8], end_key: &'a [u8]) -> Range<'a> {
assert!(start_key <= end_key);
Range {
start_key: start_key,
end_key: end_key,
}
}
}
impl DB {
pub fn open_default(path: &str) -> Result<DB, String> {
let mut opts = Options::new();
opts.create_if_missing(true);
DB::open(&opts, path)
}
pub fn open(opts: &Options, path: &str) -> Result<DB, String> {
DB::open_cf(opts, path, &[])
}
pub fn open_cf(opts: &Options,
path: &str,
cfs: &[&str])
-> Result<DB, String> {
let cpath = match CString::new(path.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err("Failed to convert path to CString when opening \
rocksdb"
.to_owned())
}
};
let cpath_ptr = cpath.as_ptr();
let ospath = Path::new(path);
if let Err(e) = fs::create_dir_all(&ospath) {
return Err(format!("Failed to create rocksdb directory: \
src/rocksdb.rs: \
{:?}",
e));
}
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let db: rocksdb_ffi::DBInstance;
let mut cf_map = BTreeMap::new();
if cfs.len() == 0 {
unsafe {
db = rocksdb_ffi::rocksdb_open(opts.inner,
cpath_ptr as *const _,
err_ptr);
}
} else {
let mut cfs_v = cfs.to_vec();
// Always open the default column family
if !cfs_v.contains(&DEFAULT_COLUMN_FAMILY) {
cfs_v.push(DEFAULT_COLUMN_FAMILY);
}
// We need to store our CStrings in an intermediate vector
// so that their pointers remain valid.
let c_cfs: Vec<CString> = cfs_v.iter()
.map(|cf| CString::new(cf.as_bytes()).unwrap())
.collect();
let cfnames: Vec<*const _> = c_cfs.iter()
.map(|cf| cf.as_ptr())
.collect();
// These handles will be populated by DB.
let cfhandles: Vec<rocksdb_ffi::DBCFHandle> = cfs_v.iter()
.map(|_| rocksdb_ffi::DBCFHandle(0 as *mut c_void))
.collect();
// TODO(tyler) allow options to be passed in.
let cfopts: Vec<rocksdb_ffi::DBOptions> = cfs_v.iter()
.map(|_| unsafe { rocksdb_ffi::rocksdb_options_create() })
.collect();
// Prepare to ship to C.
let cfopts_ptr: *const rocksdb_ffi::DBOptions = cfopts.as_ptr();
let handles: *const rocksdb_ffi::DBCFHandle = cfhandles.as_ptr();
let nfam = cfs_v.len();
unsafe {
db = rocksdb_ffi::rocksdb_open_column_families(opts.inner, cpath_ptr as *const _,
nfam as c_int,
cfnames.as_ptr() as *const _,
cfopts_ptr, handles, err_ptr);
}
for handle in &cfhandles {
if handle.0.is_null() {
return Err("Received null column family handle from DB."
.to_owned());
}
}
for (n, h) in cfs_v.iter().zip(cfhandles) {
cf_map.insert((*n).to_owned(), h);
}
}
if !err.is_null() {
return Err(error_message(err));
}
if db.0.is_null() {
return Err("Could not initialize database.".to_owned());
}
Ok(DB {
inner: db,
cfs: cf_map,
path: path.to_owned(),
})
}
pub fn destroy(opts: &Options, path: &str) -> Result<(), String> {
let cpath = CString::new(path.as_bytes()).unwrap();
let cpath_ptr = cpath.as_ptr();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_destroy_db(opts.inner,
cpath_ptr as *const _,
err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn repair(opts: Options, path: &str) -> Result<(), String> {
let cpath = CString::new(path.as_bytes()).unwrap();
let cpath_ptr = cpath.as_ptr();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_repair_db(opts.inner,
cpath_ptr as *const _,
err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn path(&self) -> &str {
&self.path
}
pub fn write_opt(&self,
batch: WriteBatch,
writeopts: &WriteOptions)
-> Result<(), String> {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_write(self.inner,
writeopts.inner,
batch.inner,
err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn write(&self, batch: WriteBatch) -> Result<(), String> {
self.write_opt(batch, &WriteOptions::new())
}
pub fn write_without_wal(&self, batch: WriteBatch) -> Result<(), String> {
let mut wo = WriteOptions::new();
wo.disable_wal(true);
self.write_opt(batch, &wo)
}
pub fn get_opt(&self,
key: &[u8],
readopts: &ReadOptions)
-> Result<Option<DBVector>, String> {
if readopts.inner.0.is_null() {
return Err("Unable to create rocksdb read options. This is a \
fairly trivial call, and its failure may be \
indicative of a mis-compiled or mis-loaded rocksdb \
library."
.to_owned());
}
unsafe {
let val_len: size_t = 0;
let val_len_ptr = &val_len as *const size_t;
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let val =
rocksdb_ffi::rocksdb_get(self.inner,
readopts.inner,
key.as_ptr(),
key.len() as size_t,
val_len_ptr,
err_ptr) as *mut u8;
if !err.is_null() {
return Err(error_message(err));
}
if val.is_null() {
Ok(None)
} else {
Ok(Some(DBVector::from_c(val, val_len)))
}
}
}
pub fn get(&self, key: &[u8]) -> Result<Option<DBVector>, String> {
self.get_opt(key, &ReadOptions::new())
}
pub fn get_cf_opt(&self,
cf: DBCFHandle,
key: &[u8],
readopts: &ReadOptions)
-> Result<Option<DBVector>, String> {
if readopts.inner.0.is_null() {
return Err("Unable to create rocksdb read options. This is a \
fairly trivial call, and its failure may be \
indicative of a mis-compiled or mis-loaded rocksdb \
library."
.to_owned());
}
unsafe {
let val_len: size_t = 0;
let val_len_ptr = &val_len as *const size_t;
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let val =
rocksdb_ffi::rocksdb_get_cf(self.inner,
readopts.inner,
cf,
key.as_ptr(),
key.len() as size_t,
val_len_ptr,
err_ptr) as *mut u8;
if !err.is_null() {
return Err(error_message(err));
}
if val.is_null() {
Ok(None)
} else {
Ok(Some(DBVector::from_c(val, val_len)))
}
}
}
pub fn get_cf(&self,
cf: DBCFHandle,
key: &[u8])
-> Result<Option<DBVector>, String> {
self.get_cf_opt(cf, key, &ReadOptions::new())
}
pub fn create_cf(&mut self,
name: &str,
opts: &Options)
-> Result<DBCFHandle, String> {
let cname = match CString::new(name.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err("Failed to convert path to CString when opening \
rocksdb"
.to_owned())
}
};
let cname_ptr = cname.as_ptr();
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
let cf_handler = unsafe {
let cf_handler =
rocksdb_ffi::rocksdb_create_column_family(self.inner,
opts.inner,
cname_ptr as *const _,
err_ptr);
self.cfs.insert(name.to_owned(), cf_handler);
cf_handler
};
if !err.is_null() {
return Err(error_message(err));
}
Ok(cf_handler)
}
pub fn drop_cf(&mut self, name: &str) -> Result<(), String> {
let cf = self.cfs.get(name);
if cf.is_none() {
return Err(format!("Invalid column family: {}", name).clone());
}
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
unsafe {
rocksdb_ffi::rocksdb_drop_column_family(self.inner,
*cf.unwrap(),
err_ptr);
}
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
pub fn cf_handle(&self, name: &str) -> Option<&DBCFHandle> {
self.cfs.get(name)
}
pub fn iter(&self) -> DBIterator {
let opts = ReadOptions::new();
DBIterator::new(&self, &opts)
}
pub fn iter_cf(&self,
cf_handle: DBCFHandle,
key: SeekKey)
-> Result<DBIterator, String> {
let opts = ReadOptions::new();
DBIterator::new_cf(&self, cf_handle, &opts, key)
}
pub fn snapshot(&self) -> Snapshot {
Snapshot::new(self)
}
pub fn put_opt(&self,
key: &[u8],
value: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_put(self.inner,
writeopts.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
pub fn put_cf_opt(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_put_cf(self.inner,
writeopts.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
pub fn merge_opt(&self,
key: &[u8],
value: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_merge(self.inner,
writeopts.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn merge_cf_opt(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_merge_cf(self.inner,
writeopts.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn delete_opt(&self,
key: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_delete(self.inner,
writeopts.inner,
key.as_ptr(),
key.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
fn delete_cf_opt(&self,
cf: DBCFHandle,
key: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
unsafe {
let mut err: *const i8 = 0 as *const i8;
let err_ptr: *mut *const i8 = &mut err;
rocksdb_ffi::rocksdb_delete_cf(self.inner,
writeopts.inner,
cf,
key.as_ptr(),
key.len() as size_t,
err_ptr);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
/// Flush all memtable data.
///
/// Due to lack of abi, only default cf is supported.
///
/// If sync, the flush will wait until the flush is done.
pub fn flush(&self, sync: bool) -> Result<(), String> {
unsafe {
let opts = rocksdb_ffi::rocksdb_flushoptions_create();
rocksdb_ffi::rocksdb_flushoptions_set_wait(opts, sync);
let mut err = 0 as *const i8;
rocksdb_ffi::rocksdb_flush(self.inner, opts, &mut err);
rocksdb_ffi::rocksdb_flushoptions_destroy(opts);
if !err.is_null() {
return Err(error_message(err));
}
Ok(())
}
}
/// Return the approximate file system space used by keys in each ranges.
///
/// Note that the returned sizes measure file system space usage, so
/// if the user data compresses by a factor of ten, the returned
/// sizes will be one-tenth the size of the corresponding user data size.
///
/// Due to lack of abi, only data flushed to disk is taken into account.
pub fn get_approximate_sizes(&self, ranges: &[Range]) -> Vec<u64> {
self.get_approximate_sizes_cfopt(None, ranges)
}
pub fn get_approximate_sizes_cf(&self,
cf: DBCFHandle,
ranges: &[Range])
-> Vec<u64> {
self.get_approximate_sizes_cfopt(Some(cf), ranges)
}
fn get_approximate_sizes_cfopt(&self,
cf: Option<DBCFHandle>,
ranges: &[Range])
-> Vec<u64> {
let start_keys: Vec<*const u8> = ranges.iter()
.map(|x| x.start_key.as_ptr())
.collect();
let start_key_lens: Vec<u64> = ranges.iter()
.map(|x| x.start_key.len() as u64)
.collect();
let end_keys: Vec<*const u8> = ranges.iter()
.map(|x| x.end_key.as_ptr())
.collect();
let end_key_lens: Vec<u64> = ranges.iter()
.map(|x| x.end_key.len() as u64)
.collect();
let mut sizes: Vec<u64> = vec![0; ranges.len()];
let (n,
start_key_ptr,
start_key_len_ptr,
end_key_ptr,
end_key_len_ptr,
size_ptr) = (ranges.len() as i32,
start_keys.as_ptr(),
start_key_lens.as_ptr(),
end_keys.as_ptr(),
end_key_lens.as_ptr(),
sizes.as_mut_ptr());
match cf {
None => unsafe {
rocksdb_ffi::rocksdb_approximate_sizes(self.inner,
n,
start_key_ptr,
start_key_len_ptr,
end_key_ptr,
end_key_len_ptr,
size_ptr)
},
Some(cf) => unsafe {
rocksdb_ffi::rocksdb_approximate_sizes_cf(self.inner,
cf,
n,
start_key_ptr,
start_key_len_ptr,
end_key_ptr,
end_key_len_ptr,
size_ptr)
},
}
sizes
}
}
impl Writable for DB {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
self.put_opt(key, value, &WriteOptions::new())
}
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
self.put_cf_opt(cf, key, value, &WriteOptions::new())
}
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
self.merge_opt(key, value, &WriteOptions::new())
}
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
self.merge_cf_opt(cf, key, value, &WriteOptions::new())
}
fn delete(&self, key: &[u8]) -> Result<(), String> {
self.delete_opt(key, &WriteOptions::new())
}
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String> {
self.delete_cf_opt(cf, key, &WriteOptions::new())
}
}
impl Default for WriteBatch {
fn default() -> WriteBatch {
WriteBatch {
inner: unsafe { rocksdb_ffi::rocksdb_writebatch_create() },
}
}
}
impl WriteBatch {
pub fn new() -> WriteBatch {
WriteBatch::default()
}
}
impl Drop for WriteBatch {
fn drop(&mut self) {
unsafe { rocksdb_ffi::rocksdb_writebatch_destroy(self.inner) }
}
}
impl Drop for DB {
fn drop(&mut self) {
unsafe {
for cf in self.cfs.values() {
rocksdb_ffi::rocksdb_column_family_handle_destroy(*cf);
}
rocksdb_ffi::rocksdb_close(self.inner);
}
}
}
impl Writable for WriteBatch {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_put(self.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn put_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_put_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_merge(self.inner,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn merge_cf(&self,
cf: DBCFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_merge_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t,
value.as_ptr(),
value.len() as size_t);
Ok(())
}
}
fn delete(&self, key: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_delete(self.inner,
key.as_ptr(),
key.len() as size_t);
Ok(())
}
}
fn delete_cf(&self, cf: DBCFHandle, key: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_delete_cf(self.inner,
cf,
key.as_ptr(),
key.len() as size_t);
Ok(())
}
}
}
impl Drop for ReadOptions {
fn drop(&mut self) {
unsafe { rocksdb_ffi::rocksdb_readoptions_destroy(self.inner) }
}
}
impl Default for ReadOptions {
fn default() -> ReadOptions {
unsafe {
ReadOptions { inner: rocksdb_ffi::rocksdb_readoptions_create() }
}
}
}
impl ReadOptions {
fn new() -> ReadOptions {
ReadOptions::default()
}
// TODO add snapshot setting here
// TODO add snapshot wrapper structs with proper destructors;
// that struct needs an "iterator" impl too.
#[allow(dead_code)]
fn fill_cache(&mut self, v: bool) {
unsafe {
rocksdb_ffi::rocksdb_readoptions_set_fill_cache(self.inner, v);
}
}
fn set_snapshot(&mut self, snapshot: &Snapshot) {
unsafe {
rocksdb_ffi::rocksdb_readoptions_set_snapshot(self.inner,
snapshot.inner);
}
}
}
pub struct DBVector {
base: *mut u8,
len: usize,
}
impl Deref for DBVector {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.base, self.len) }
}
}
impl Drop for DBVector {
fn drop(&mut self) {
unsafe {
libc::free(self.base as *mut c_void);
}
}
}
impl DBVector {
pub fn from_c(val: *mut u8, val_len: size_t) -> DBVector {
DBVector {
base: val,
len: val_len as usize,
}
}
pub fn to_utf8(&self) -> Option<&str> {
from_utf8(self.deref()).ok()
}
}
#[cfg(test)]
mod test {
use super::*;
use rocksdb_options::*;
use std::str;
use tempdir::TempDir;
#[test]
fn external() {
let path = TempDir::new("_rust_rocksdb_externaltest").expect("");
let db = DB::open_default(path.path().to_str().unwrap()).unwrap();
let p = db.put(b"k1", b"v1111");
assert!(p.is_ok());
let r: Result<Option<DBVector>, String> = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
assert!(db.delete(b"k1").is_ok());
assert!(db.get(b"k1").unwrap().is_none());
}
#[allow(unused_variables)]
#[test]
fn errors_do_stuff() {
let path = TempDir::new("_rust_rocksdb_error").expect("");
let path_str = path.path().to_str().unwrap();
let db = DB::open_default(path_str).unwrap();
let opts = Options::new();
// The DB will still be open when we try to destroy and the lock should fail
match DB::destroy(&opts, path_str) {
Err(ref s) => assert!(s.contains("LOCK: No locks available")),
Ok(_) => panic!("should fail"),
}
}
#[test]
fn writebatch_works() {
let path = TempDir::new("_rust_rocksdb_writebacktest").expect("");
let db = DB::open_default(path.path().to_str().unwrap()).unwrap();
// test put
let batch = WriteBatch::new();
assert!(db.get(b"k1").unwrap().is_none());
let _ = batch.put(b"k1", b"v1111");
assert!(db.get(b"k1").unwrap().is_none());
let p = db.write(batch);
assert!(p.is_ok());
let r: Result<Option<DBVector>, String> = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
// test delete
let batch = WriteBatch::new();
let _ = batch.delete(b"k1");
let p = db.write(batch);
assert!(p.is_ok());
assert!(db.get(b"k1").unwrap().is_none());
}
#[test]
fn iterator_test() {
let path = TempDir::new("_rust_rocksdb_iteratortest").expect("");
let db = DB::open_default(path.path().to_str().unwrap()).unwrap();
db.put(b"k1", b"v1111").expect("");
db.put(b"k2", b"v2222").expect("");
db.put(b"k3", b"v3333").expect("");
let mut iter = db.iter();
iter.seek(SeekKey::Start);
for (k, v) in &mut iter {
println!("Hello {}: {}",
str::from_utf8(&*k).unwrap(),
str::from_utf8(&*v).unwrap());
}
}
#[test]
fn approximate_size_test() {
let path = TempDir::new("_rust_rocksdb_iteratortest").expect("");
let db = DB::open_default(path.path().to_str().unwrap()).unwrap();
for i in 1..8000 {
db.put(format!("{:04}", i).as_bytes(),
format!("{:04}", i).as_bytes())
.expect("");
}
db.flush(true).expect("");
assert!(db.get(b"0001").expect("").is_some());
db.flush(true).expect("");
let sizes = db.get_approximate_sizes(&[Range::new(b"0000", b"2000"),
Range::new(b"2000", b"4000"),
Range::new(b"4000", b"6000"),
Range::new(b"6000", b"8000"),
Range::new(b"8000", b"9999")]);
assert_eq!(sizes.len(), 5);
for s in &sizes[0..4] {
assert!(*s > 0);
}
assert_eq!(sizes[4], 0);
}
}
#[test]
fn snapshot_test() {
let path = "_rust_rocksdb_snapshottest";
{
let db = DB::open_default(path).unwrap();
let p = db.put(b"k1", b"v1111");
assert!(p.is_ok());
let snap = db.snapshot();
let mut r: Result<Option<DBVector>, String> = snap.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
r = db.get(b"k1");
assert!(r.unwrap().unwrap().to_utf8().unwrap() == "v1111");
let p = db.put(b"k2", b"v2222");
assert!(p.is_ok());
assert!(db.get(b"k2").unwrap().is_some());
assert!(snap.get(b"k2").unwrap().is_none());
}
let opts = Options::new();
assert!(DB::destroy(&opts, path).is_ok());
}
|
#![feature(optin_builtin_traits)]
#![allow(unstable)]
extern crate libc;
extern crate "termbox-sys" as termbox;
#[macro_use] extern crate bitflags;
pub use self::running::running;
pub use self::style::{Style, RB_BOLD, RB_UNDERLINE, RB_REVERSE, RB_NORMAL};
use std::error::Error;
use std::fmt;
use std::time::duration::Duration;
use std::num::FromPrimitive;
use termbox::RawEvent;
use libc::{c_int, c_uint};
#[derive(Copy)]
pub enum Event {
KeyEvent(u8, u16, u32),
ResizeEvent(i32, i32),
NoEvent
}
#[derive(Copy, Show)]
pub enum InputMode {
Current = 0x00,
/// When ESC sequence is in the buffer and it doesn't match any known
/// ESC sequence => ESC means TB_KEY_ESC
Esc = 0x01,
/// When ESC sequence is in the buffer and it doesn't match any known
/// sequence => ESC enables TB_MOD_ALT modifier for the next keyboard event.
Alt = 0x02,
}
#[derive(Copy, PartialEq)]
#[repr(C,u16)]
pub enum Color {
Default = 0x00,
Black = 0x01,
Red = 0x02,
Green = 0x03,
Yellow = 0x04,
Blue = 0x05,
Magenta = 0x06,
Cyan = 0x07,
White = 0x08,
}
mod style {
bitflags! {
#[repr(C)]
flags Style: u16 {
const TB_NORMAL_COLOR = 0x000F,
const RB_BOLD = 0x0100,
const RB_UNDERLINE = 0x0200,
const RB_REVERSE = 0x0400,
const RB_NORMAL = 0x0000,
const TB_ATTRIB = RB_BOLD.bits | RB_UNDERLINE.bits | RB_REVERSE.bits,
}
}
impl Style {
pub fn from_color(color: super::Color) -> Style {
Style { bits: color as u16 & TB_NORMAL_COLOR.bits }
}
}
}
const NIL_RAW_EVENT: RawEvent = RawEvent { etype: 0, emod: 0, key: 0, ch: 0, w: 0, h: 0 };
// FIXME: Rust doesn't support this enum representation.
// #[derive(Copy,FromPrimitive,Show)]
// #[repr(C,int)]
// pub enum EventErrorKind {
// Error = -1,
// }
// pub type EventError = Option<EventErrorKind>;
#[allow(non_snake_case)]
pub mod EventErrorKind {
#[derive(Copy,Show)]
pub struct Error;
}
pub type EventError = Option<EventErrorKind::Error>;
pub type EventResult<T> = Result<T, EventError>;
impl Error for EventError {
fn description(&self) -> &str {
match *self {
// TODO: Check errno here
Some(EventErrorKind::Error) => "Unknown error.",
None => "Unexpected return code."
}
}
}
fn unpack_event(ev_type: c_int, ev: &RawEvent) -> EventResult<Event> {
match ev_type {
0 => Ok(Event::NoEvent),
1 => Ok(Event::KeyEvent(ev.emod, ev.key, ev.ch)),
2 => Ok(Event::ResizeEvent(ev.w, ev.h)),
// FIXME: Rust doesn't support this error representation
// res => FromPrimitive::from_int(res as isize),
-1 => Err(Some(EventErrorKind::Error)),
_ => Err(None)
}
}
#[derive(Copy,FromPrimitive,Show)]
#[repr(C,isize)]
pub enum InitErrorKind {
UnsupportedTerminal = -1,
FailedToOpenTty = -2,
PipeTrapError = -3,
}
pub enum InitError {
Opt(InitOption, Option<Box<Error>>),
AlreadyOpen,
TermBox(Option<InitErrorKind>),
}
impl fmt::Show for InitError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.description())
}
}
impl Error for InitError {
fn description(&self) -> &str {
match *self {
InitError::Opt(InitOption::BufferStderr, _) => "Could not redirect stderr.",
InitError::Opt(InitOption::InputMode(_), _) => "Could not set input mode.",
InitError::AlreadyOpen => "RustBox is already open.",
InitError::TermBox(e) => e.map_or("Unexpected TermBox return code.", |e| match e {
InitErrorKind::UnsupportedTerminal => "Unsupported terminal.",
InitErrorKind::FailedToOpenTty => "Failed to open TTY.",
InitErrorKind::PipeTrapError => "Pipe trap error.",
}),
}
}
fn cause(&self) -> Option<&Error> {
match *self {
InitError::Opt(_, Some(ref e)) => Some(&**e),
_ => None
}
}
}
mod running {
use std::sync::atomic::{self, AtomicBool};
// The state of the RustBox is protected by the lock. Yay, global state!
static RUSTBOX_RUNNING: AtomicBool = atomic::ATOMIC_BOOL_INIT;
/// true iff RustBox is currently running. Beware of races here--don't rely on this for anything
/// critical unless you happen to know that RustBox cannot change state when it is called (a good
/// usecase would be checking to see if it's worth risking double printing backtraces to avoid
/// having them swallowed up by RustBox).
pub fn running() -> bool {
RUSTBOX_RUNNING.load(atomic::Ordering::SeqCst)
}
// Internal RAII guard used to ensure we release the running lock whenever we acquire it.
#[allow(missing_copy_implementations)]
pub struct RunningGuard(());
pub fn run() -> Option<RunningGuard> {
// Ensure that we are not already running and simultaneously set RUSTBOX_RUNNING using an
// atomic swap. This ensures that contending threads don't trample each other.
if RUSTBOX_RUNNING.swap(true, atomic::Ordering::SeqCst) {
// The Rustbox was already running.
None
} else {
// The RustBox was not already running, and now we have the lock.
Some(RunningGuard(()))
}
}
impl Drop for RunningGuard {
fn drop(&mut self) {
// Indicate that we're free now. We could probably get away with lower atomicity here,
// but there's no reason to take that chance.
RUSTBOX_RUNNING.store(false, atomic::Ordering::SeqCst);
}
}
}
// RAII guard for input redirection
#[cfg(unix)]
mod redirect {
use std::error::Error;
use libc;
use std::io::{util, IoError, PipeStream};
use std::io::pipe::PipePair;
use std::os::unix::AsRawFd;
use super::{InitError, InitOption};
use super::running::RunningGuard;
pub struct Redirect {
pair: PipePair,
fd: PipeStream,
}
impl Drop for Redirect {
fn drop(&mut self) {
// We make sure that we never actually create the Redirect without also taking a
// RunningGuard. This means that we know that this will always be dropped immediately
// before the RunningGuard is destroyed, and *after* a RustBox containing one is
// destroyed.
//
// We rely on destructor order here: destructors are always executed top-down, so as
// long as this is included above the RunningGuard in the RustBox struct, we can be
// confident that it is destroyed while we're still holding onto the lock.
unsafe {
let old_fd = self.pair.writer.as_raw_fd();
let new_fd = self.fd.as_raw_fd();
// Reopen new_fd as writer.
// (Note that if we fail here, we can't really do anything about it, so just ignore any
// errors).
if libc::dup2(old_fd, new_fd) != new_fd { return }
}
// Copy from reader to writer.
drop(util::copy(&mut self.pair.reader, &mut self.pair.writer));
}
}
// The reason we take the RunningGuard is to make sure we don't try to redirect before the
// TermBox is set up. Otherwise it is possible to race with other threads trying to set up the
// RustBox.
fn redirect(new: PipeStream, _: &RunningGuard) -> Result<Redirect, Option<Box<Error>>> {
// Create a pipe pair.
let mut pair = try!(PipeStream::pair().map_err( |e| Some(Box::new(e) as Box<Error>)));
unsafe {
let new_fd = new.as_raw_fd();
// Copy new_fd to dup_fd.
let dup_fd = match libc::dup(new_fd) {
-1 => return Err(Some(Box::new(IoError::last_error()) as Box<Error>)),
fd => try!(PipeStream::open(fd).map_err( |e| Some(Box::new(e) as Box<Error>))),
};
// Make the writer nonblocking. This means that even if the stderr pipe fills up,
// exceptions from stack traces will not block the program. Unfortunately, if this
// does happen stderr outputwill be lost until RustBox exits.
let old_fd = pair.writer.as_raw_fd();
let res = libc::fcntl(old_fd, libc::F_SETFL, libc::O_NONBLOCK);
if res != 0 {
return Err(if res == -1 {
Some(Box::new(IoError::last_error()) as Box<Error>)
} else { None }) // This should really never happen, but no reason to unwind here.
}
// Reopen new_fd as writer.
let fd = libc::dup2(old_fd, new_fd);
if fd == new_fd {
// On success, the new file descriptor should be returned. Replace the old one
// with dup_fd, since we no longer need an explicit reference to the writer.
// Note that it is *possible* that some other thread tried to take over stderr
// between when we did and now, causing a race here. RustBox won't do it, though.
// And it's honestly not clear how to guarantee correct behavior there anyway,
// since if the change had come a fraction of a second later we still probably
// wouldn't want to overwite it. In general this is a good argument for why the
// redirect behavior is optional.
pair.writer = dup_fd;
Ok(Redirect {
pair: pair,
fd: new,
})
} else {
Err(if fd == -1 { Some(Box::new(IoError::last_error()) as Box<Error>) } else { None })
}
}
}
pub fn redirect_stderr(stderr: &mut Option<Redirect>,
rg: &RunningGuard) -> Result<(), InitError> {
match *stderr {
Some(_) => {
// Can only redirect once.
Err(InitError::Opt(InitOption::BufferStderr, None))
},
None => {
*stderr = Some(try!(redirect(
try!(PipeStream::open(libc::STDERR_FILENO)
.map_err( |e| InitError::Opt(InitOption::BufferStderr,
Some(Box::new(e) as Box<Error>)) )),
rg)
.map_err( |e| InitError::Opt(InitOption::BufferStderr, e))));
Ok(())
}
}
}
}
#[cfg(not(unix))]
// Not sure how we'll do this on Windows, unimplemented for now.
mod redirect {
pub enum Redirect { }
pub fn redirect_stderr(_: &mut Option<Redirect>,
_: &super::RunningGuard) -> Result<(), super::InitError> {
Err(super::InitError::Opt(super::InitOption::BufferStderr, None))
}
}
#[allow(missing_copy_implementations)]
pub struct RustBox {
// We only bother to redirect stderr for the moment, since it's used for panic!
_stderr: Option<redirect::Redirect>,
// RAII lock.
//
// Note that running *MUST* be the last field in the destructor, since destructors run in
// top-down order. Otherwise it will not properly protect the above fields.
_running: running::RunningGuard,
}
// Termbox is not thread safe
impl !Send for RustBox {}
#[derive(Copy,Show)]
pub enum InitOption {
/// Use this option to automatically buffer stderr while RustBox is running. It will be
/// written when RustBox exits.
///
/// This option uses a nonblocking OS pipe to buffer stderr output. This means that if the
/// pipe fills up, subsequent writes will fail until RustBox exits. If this is a concern for
/// your program, don't use RustBox's default pipe-based redirection; instead, redirect stderr
/// to a log file or another process that is capable of handling it better.
BufferStderr,
/// Use this option to initialize with a specific input mode
///
/// See InputMode enum for details on the variants.
InputMode(InputMode),
}
impl RustBox {
pub fn init(opts: &[Option<InitOption>]) -> Result<RustBox, InitError> {
// Acquire RAII lock. This might seem like overkill, but it is easy to forget to release
// it in the maze of error conditions below.
let running = match running::run() {
Some(r) => r,
None => return Err(InitError::AlreadyOpen)
};
// Time to check our options.
let mut stderr = None;
for opt in opts.iter().filter_map(|&opt| opt) {
match opt {
InitOption::BufferStderr => try!(redirect::redirect_stderr(&mut stderr, &running)),
InitOption::InputMode(mode) => unsafe { termbox::tb_select_input_mode(mode as c_int); },
}
}
// Create the RustBox.
Ok(unsafe {
match termbox::tb_init() {
0 => RustBox {
_stderr: stderr,
_running: running,
},
res => {
return Err(InitError::TermBox(FromPrimitive::from_int(res as isize)))
}
}
})
}
pub fn width(&self) -> usize {
unsafe { termbox::tb_width() as usize }
}
pub fn height(&self) -> usize {
unsafe { termbox::tb_height() as usize }
}
pub fn clear(&self) {
unsafe { termbox::tb_clear() }
}
pub fn present(&self) {
unsafe { termbox::tb_present() }
}
pub fn set_cursor(&self, x: isize, y: isize) {
unsafe { termbox::tb_set_cursor(x as c_int, y as c_int) }
}
// Unsafe because u8 is not guaranteed to be a UTF-8 character
pub unsafe fn change_cell(&self, x: usize, y: usize, ch: u32, fg: u16, bg: u16) {
termbox::tb_change_cell(x as c_uint, y as c_uint, ch, fg, bg)
}
pub fn print(&self, x: usize, y: usize, sty: Style, fg: Color, bg: Color, s: &str) {
let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB);
let bg = Style::from_color(bg);
for (i, ch) in s.chars().enumerate() {
unsafe {
self.change_cell(x+i, y, ch as u32, fg.bits(), bg.bits());
}
}
}
pub fn print_char(&self, x: usize, y: usize, sty: Style, fg: Color, bg: Color, ch: char) {
let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB);
let bg = Style::from_color(bg);
unsafe {
self.change_cell(x, y, ch as u32, fg.bits(), bg.bits());
}
}
pub fn poll_event(&self) -> EventResult<Event> {
let ev = NIL_RAW_EVENT;
let rc = unsafe {
termbox::tb_poll_event(&ev as *const RawEvent)
};
unpack_event(rc, &ev)
}
pub fn peek_event(&self, timeout: Duration) -> EventResult<Event> {
let ev = NIL_RAW_EVENT;
let rc = unsafe {
termbox::tb_peek_event(&ev as *const RawEvent, timeout.num_milliseconds() as c_uint)
};
unpack_event(rc, &ev)
}
pub fn set_input_mode(&self, mode: InputMode) {
unsafe {
termbox::tb_select_input_mode(mode as c_int);
}
}
}
impl Drop for RustBox {
fn drop(&mut self) {
// Since only one instance of the RustBox is ever accessible, we should not
// need to do this atomically.
// Note: we should definitely have RUSTBOX_RUNNING = true here.
unsafe {
termbox::tb_shutdown();
}
}
}
Resolved build problems with rust nightly
#![feature(optin_builtin_traits)]
#![allow(unstable)]
extern crate libc;
extern crate "termbox-sys" as termbox;
#[macro_use] extern crate bitflags;
pub use self::running::running;
pub use self::style::{Style, RB_BOLD, RB_UNDERLINE, RB_REVERSE, RB_NORMAL};
use std::error::Error;
use std::fmt;
use std::time::duration::Duration;
use std::num::FromPrimitive;
use termbox::RawEvent;
use libc::{c_int, c_uint};
#[derive(Copy)]
pub enum Event {
KeyEvent(u8, u16, u32),
ResizeEvent(i32, i32),
NoEvent
}
#[derive(Copy, Show)]
pub enum InputMode {
Current = 0x00,
/// When ESC sequence is in the buffer and it doesn't match any known
/// ESC sequence => ESC means TB_KEY_ESC
Esc = 0x01,
/// When ESC sequence is in the buffer and it doesn't match any known
/// sequence => ESC enables TB_MOD_ALT modifier for the next keyboard event.
Alt = 0x02,
}
#[derive(Copy, PartialEq)]
#[repr(C,u16)]
pub enum Color {
Default = 0x00,
Black = 0x01,
Red = 0x02,
Green = 0x03,
Yellow = 0x04,
Blue = 0x05,
Magenta = 0x06,
Cyan = 0x07,
White = 0x08,
}
mod style {
bitflags! {
#[repr(C)]
flags Style: u16 {
const TB_NORMAL_COLOR = 0x000F,
const RB_BOLD = 0x0100,
const RB_UNDERLINE = 0x0200,
const RB_REVERSE = 0x0400,
const RB_NORMAL = 0x0000,
const TB_ATTRIB = RB_BOLD.bits | RB_UNDERLINE.bits | RB_REVERSE.bits,
}
}
impl Style {
pub fn from_color(color: super::Color) -> Style {
Style { bits: color as u16 & TB_NORMAL_COLOR.bits }
}
}
}
const NIL_RAW_EVENT: RawEvent = RawEvent { etype: 0, emod: 0, key: 0, ch: 0, w: 0, h: 0 };
// FIXME: Rust doesn't support this enum representation.
// #[derive(Copy,FromPrimitive,Show)]
// #[repr(C,int)]
// pub enum EventErrorKind {
// Error = -1,
// }
// pub type EventError = Option<EventErrorKind>;
#[allow(non_snake_case)]
pub mod EventErrorKind {
#[derive(Copy,Show)]
pub struct Error;
}
pub type EventError = Option<EventErrorKind::Error>;
pub type EventResult<T> = Result<T, EventError>;
impl Error for EventError {
fn description(&self) -> &str {
match *self {
// TODO: Check errno here
Some(EventErrorKind::Error) => "Unknown error.",
None => "Unexpected return code."
}
}
}
fn unpack_event(ev_type: c_int, ev: &RawEvent) -> EventResult<Event> {
match ev_type {
0 => Ok(Event::NoEvent),
1 => Ok(Event::KeyEvent(ev.emod, ev.key, ev.ch)),
2 => Ok(Event::ResizeEvent(ev.w, ev.h)),
// FIXME: Rust doesn't support this error representation
// res => FromPrimitive::from_int(res as isize),
-1 => Err(Some(EventErrorKind::Error)),
_ => Err(None)
}
}
#[derive(Copy,FromPrimitive,Show)]
#[repr(C,isize)]
pub enum InitErrorKind {
UnsupportedTerminal = -1,
FailedToOpenTty = -2,
PipeTrapError = -3,
}
pub enum InitError {
Opt(InitOption, Option<Box<Error + 'static>>),
AlreadyOpen,
TermBox(Option<InitErrorKind>),
}
impl fmt::Show for InitError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.description())
}
}
impl Error for InitError {
fn description(&self) -> &str {
match *self {
InitError::Opt(InitOption::BufferStderr, _) => "Could not redirect stderr.",
InitError::Opt(InitOption::InputMode(_), _) => "Could not set input mode.",
InitError::AlreadyOpen => "RustBox is already open.",
InitError::TermBox(e) => e.map_or("Unexpected TermBox return code.", |e| match e {
InitErrorKind::UnsupportedTerminal => "Unsupported terminal.",
InitErrorKind::FailedToOpenTty => "Failed to open TTY.",
InitErrorKind::PipeTrapError => "Pipe trap error.",
}),
}
}
fn cause(&self) -> Option<&Error> {
match *self {
InitError::Opt(_, Some(ref e)) => Some(&**e),
_ => None
}
}
}
mod running {
use std::sync::atomic::{self, AtomicBool};
// The state of the RustBox is protected by the lock. Yay, global state!
static RUSTBOX_RUNNING: AtomicBool = atomic::ATOMIC_BOOL_INIT;
/// true iff RustBox is currently running. Beware of races here--don't rely on this for anything
/// critical unless you happen to know that RustBox cannot change state when it is called (a good
/// usecase would be checking to see if it's worth risking double printing backtraces to avoid
/// having them swallowed up by RustBox).
pub fn running() -> bool {
RUSTBOX_RUNNING.load(atomic::Ordering::SeqCst)
}
// Internal RAII guard used to ensure we release the running lock whenever we acquire it.
#[allow(missing_copy_implementations)]
pub struct RunningGuard(());
pub fn run() -> Option<RunningGuard> {
// Ensure that we are not already running and simultaneously set RUSTBOX_RUNNING using an
// atomic swap. This ensures that contending threads don't trample each other.
if RUSTBOX_RUNNING.swap(true, atomic::Ordering::SeqCst) {
// The Rustbox was already running.
None
} else {
// The RustBox was not already running, and now we have the lock.
Some(RunningGuard(()))
}
}
impl Drop for RunningGuard {
fn drop(&mut self) {
// Indicate that we're free now. We could probably get away with lower atomicity here,
// but there's no reason to take that chance.
RUSTBOX_RUNNING.store(false, atomic::Ordering::SeqCst);
}
}
}
// RAII guard for input redirection
#[cfg(unix)]
mod redirect {
use std::error::Error;
use libc;
use std::io::{util, IoError, PipeStream};
use std::io::pipe::PipePair;
use std::os::unix::AsRawFd;
use super::{InitError, InitOption};
use super::running::RunningGuard;
pub struct Redirect {
pair: PipePair,
fd: PipeStream,
}
impl Drop for Redirect {
fn drop(&mut self) {
// We make sure that we never actually create the Redirect without also taking a
// RunningGuard. This means that we know that this will always be dropped immediately
// before the RunningGuard is destroyed, and *after* a RustBox containing one is
// destroyed.
//
// We rely on destructor order here: destructors are always executed top-down, so as
// long as this is included above the RunningGuard in the RustBox struct, we can be
// confident that it is destroyed while we're still holding onto the lock.
unsafe {
let old_fd = self.pair.writer.as_raw_fd();
let new_fd = self.fd.as_raw_fd();
// Reopen new_fd as writer.
// (Note that if we fail here, we can't really do anything about it, so just ignore any
// errors).
if libc::dup2(old_fd, new_fd) != new_fd { return }
}
// Copy from reader to writer.
drop(util::copy(&mut self.pair.reader, &mut self.pair.writer));
}
}
// The reason we take the RunningGuard is to make sure we don't try to redirect before the
// TermBox is set up. Otherwise it is possible to race with other threads trying to set up the
// RustBox.
fn redirect(new: PipeStream, _: &RunningGuard) -> Result<Redirect, Option<Box<Error + 'static>>> {
// Create a pipe pair.
let mut pair = try!(PipeStream::pair().map_err( |e| Some(Box::new(e) as Box<Error>)));
unsafe {
let new_fd = new.as_raw_fd();
// Copy new_fd to dup_fd.
let dup_fd = match libc::dup(new_fd) {
-1 => return Err(Some(Box::new(IoError::last_error()) as Box<Error>)),
fd => try!(PipeStream::open(fd).map_err( |e| Some(Box::new(e) as Box<Error>))),
};
// Make the writer nonblocking. This means that even if the stderr pipe fills up,
// exceptions from stack traces will not block the program. Unfortunately, if this
// does happen stderr outputwill be lost until RustBox exits.
let old_fd = pair.writer.as_raw_fd();
let res = libc::fcntl(old_fd, libc::F_SETFL, libc::O_NONBLOCK);
if res != 0 {
return Err(if res == -1 {
Some(Box::new(IoError::last_error()) as Box<Error>)
} else { None }) // This should really never happen, but no reason to unwind here.
}
// Reopen new_fd as writer.
let fd = libc::dup2(old_fd, new_fd);
if fd == new_fd {
// On success, the new file descriptor should be returned. Replace the old one
// with dup_fd, since we no longer need an explicit reference to the writer.
// Note that it is *possible* that some other thread tried to take over stderr
// between when we did and now, causing a race here. RustBox won't do it, though.
// And it's honestly not clear how to guarantee correct behavior there anyway,
// since if the change had come a fraction of a second later we still probably
// wouldn't want to overwite it. In general this is a good argument for why the
// redirect behavior is optional.
pair.writer = dup_fd;
Ok(Redirect {
pair: pair,
fd: new,
})
} else {
Err(if fd == -1 { Some(Box::new(IoError::last_error()) as Box<Error>) } else { None })
}
}
}
pub fn redirect_stderr(stderr: &mut Option<Redirect>,
rg: &RunningGuard) -> Result<(), InitError> {
match *stderr {
Some(_) => {
// Can only redirect once.
Err(InitError::Opt(InitOption::BufferStderr, None))
},
None => {
*stderr = Some(try!(redirect(
try!(PipeStream::open(libc::STDERR_FILENO)
.map_err( |e| InitError::Opt(InitOption::BufferStderr,
Some(Box::new(e) as Box<Error>)) )),
rg)
.map_err( |e| InitError::Opt(InitOption::BufferStderr, e))));
Ok(())
}
}
}
}
#[cfg(not(unix))]
// Not sure how we'll do this on Windows, unimplemented for now.
mod redirect {
pub enum Redirect { }
pub fn redirect_stderr(_: &mut Option<Redirect>,
_: &super::RunningGuard) -> Result<(), super::InitError> {
Err(super::InitError::Opt(super::InitOption::BufferStderr, None))
}
}
#[allow(missing_copy_implementations)]
pub struct RustBox {
// We only bother to redirect stderr for the moment, since it's used for panic!
_stderr: Option<redirect::Redirect>,
// RAII lock.
//
// Note that running *MUST* be the last field in the destructor, since destructors run in
// top-down order. Otherwise it will not properly protect the above fields.
_running: running::RunningGuard,
}
// Termbox is not thread safe
impl !Send for RustBox {}
#[derive(Copy,Show)]
pub enum InitOption {
/// Use this option to automatically buffer stderr while RustBox is running. It will be
/// written when RustBox exits.
///
/// This option uses a nonblocking OS pipe to buffer stderr output. This means that if the
/// pipe fills up, subsequent writes will fail until RustBox exits. If this is a concern for
/// your program, don't use RustBox's default pipe-based redirection; instead, redirect stderr
/// to a log file or another process that is capable of handling it better.
BufferStderr,
/// Use this option to initialize with a specific input mode
///
/// See InputMode enum for details on the variants.
InputMode(InputMode),
}
impl RustBox {
pub fn init(opts: &[Option<InitOption>]) -> Result<RustBox, InitError> {
// Acquire RAII lock. This might seem like overkill, but it is easy to forget to release
// it in the maze of error conditions below.
let running = match running::run() {
Some(r) => r,
None => return Err(InitError::AlreadyOpen)
};
// Time to check our options.
let mut stderr = None;
for opt in opts.iter().filter_map(|&opt| opt) {
match opt {
InitOption::BufferStderr => try!(redirect::redirect_stderr(&mut stderr, &running)),
InitOption::InputMode(mode) => unsafe { termbox::tb_select_input_mode(mode as c_int); },
}
}
// Create the RustBox.
Ok(unsafe {
match termbox::tb_init() {
0 => RustBox {
_stderr: stderr,
_running: running,
},
res => {
return Err(InitError::TermBox(FromPrimitive::from_int(res as isize)))
}
}
})
}
pub fn width(&self) -> usize {
unsafe { termbox::tb_width() as usize }
}
pub fn height(&self) -> usize {
unsafe { termbox::tb_height() as usize }
}
pub fn clear(&self) {
unsafe { termbox::tb_clear() }
}
pub fn present(&self) {
unsafe { termbox::tb_present() }
}
pub fn set_cursor(&self, x: isize, y: isize) {
unsafe { termbox::tb_set_cursor(x as c_int, y as c_int) }
}
// Unsafe because u8 is not guaranteed to be a UTF-8 character
pub unsafe fn change_cell(&self, x: usize, y: usize, ch: u32, fg: u16, bg: u16) {
termbox::tb_change_cell(x as c_uint, y as c_uint, ch, fg, bg)
}
pub fn print(&self, x: usize, y: usize, sty: Style, fg: Color, bg: Color, s: &str) {
let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB);
let bg = Style::from_color(bg);
for (i, ch) in s.chars().enumerate() {
unsafe {
self.change_cell(x+i, y, ch as u32, fg.bits(), bg.bits());
}
}
}
pub fn print_char(&self, x: usize, y: usize, sty: Style, fg: Color, bg: Color, ch: char) {
let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB);
let bg = Style::from_color(bg);
unsafe {
self.change_cell(x, y, ch as u32, fg.bits(), bg.bits());
}
}
pub fn poll_event(&self) -> EventResult<Event> {
let ev = NIL_RAW_EVENT;
let rc = unsafe {
termbox::tb_poll_event(&ev as *const RawEvent)
};
unpack_event(rc, &ev)
}
pub fn peek_event(&self, timeout: Duration) -> EventResult<Event> {
let ev = NIL_RAW_EVENT;
let rc = unsafe {
termbox::tb_peek_event(&ev as *const RawEvent, timeout.num_milliseconds() as c_uint)
};
unpack_event(rc, &ev)
}
pub fn set_input_mode(&self, mode: InputMode) {
unsafe {
termbox::tb_select_input_mode(mode as c_int);
}
}
}
impl Drop for RustBox {
fn drop(&mut self) {
// Since only one instance of the RustBox is ever accessible, we should not
// need to do this atomically.
// Note: we should definitely have RUSTBOX_RUNNING = true here.
unsafe {
termbox::tb_shutdown();
}
}
}
|
use std::mem;
use std::iter;
use std::fmt;
use std::ops;
use super::util;
pub struct Segment {
vecs: Vec<Vec<u8>>,
length: usize,
}
#[derive(Copy, Clone)]
struct Index {
outer: usize,
inner: usize,
}
struct Indexes<'a> {
seg: &'a Segment,
index: Index,
}
pub struct Items<'a> {
seg: &'a Segment,
index: Index,
num_elem: Option<usize>,
}
pub struct MutItems<'a> {
seg: &'a mut Segment,
index: Index,
num_elem: Option<usize>,
}
pub struct Slices<'a> {
seg: &'a Segment,
outer: usize,
}
static min_block_size: usize = 1024 * 1024;
static max_block_size: usize = 4 * 1024 * 1024;
impl Segment {
pub fn _internal_debug(&self) -> Vec<usize> {
self.vecs.iter().map(|v| v.len()).collect::<Vec<usize>>()
}
pub fn new() -> Segment {
Segment {
vecs: Vec::new(),
length: 0,
}
}
pub fn from_vec(values: Vec<u8>) -> Segment {
let len = values.len();
Segment {
vecs: vec!(values),
length: len,
}
}
pub fn from_slice(values: &[u8]) -> Segment {
Segment {
vecs: vec!(values.into()),
length: values.len(),
}
}
pub fn len(&self) -> usize {
self.length
}
fn calc_len(&mut self) {
self.length = 0;
for len in self.vecs.iter().map(|v| v.len()) {
self.length += len
}
}
fn pos_to_index(&self, pos: usize, for_insert: bool) -> Index {
if pos == 0 {
return Index { outer: 0, inner: 0 };
}
let mut cur_pos = pos;
for (i, vec) in self.vecs.iter().enumerate() {
if cur_pos < vec.len() || (for_insert && cur_pos == vec.len()) {
return Index {
outer: i,
inner: cur_pos,
}
}
cur_pos -= vec.len();
}
panic!("Position {} is out of bounds", pos);
}
pub fn iter_range<'a>(&'a self, from: usize, to: usize) -> Items<'a> {
if to < from {
panic!("to ({}) is smaller than from ({})!", to, from);
}
let idx = self.pos_to_index(from, false);
Items {
seg: self,
index: idx,
num_elem: Some(to - from),
}
}
pub fn mut_iter_range<'a>(&'a mut self, from: usize, to: usize) -> MutItems<'a> {
if to < from {
panic!("to ({}) is smaller than from ({})!", to, from);
}
let idx = self.pos_to_index(from, false);
MutItems {
seg: self,
index: idx,
num_elem: Some(to - from),
}
}
fn iter_index<'a>(&'a self, from: usize) -> Indexes<'a> {
let index = self.pos_to_index(from, false);
Indexes {
seg: self,
index: index,
}
}
pub fn iter_slices<'a>(&'a self) -> Slices<'a> {
Slices {
seg: self,
outer: 0,
}
}
fn prepare_insert(&mut self, index: Index) -> Index {
// TODO: Get self.vecs.get(index.outer) into a local variable without ruining lifetimes?
if index.outer >= self.vecs.len() {
self.vecs.push(Vec::new());
}
if self.vecs[index.outer].len() < max_block_size {
return index;
}
let page_start_idx = (index.inner / min_block_size) * min_block_size;
if page_start_idx == 0 {
if self.vecs[index.outer].len() > max_block_size {
let insert_vec: Vec < _ >= self.vecs[index.outer][min_block_size..].into(); self
.vecs.insert(index.outer + 1, insert_vec); self
.vecs[index.outer].truncate(min_block_size);
}
return index;
} else {
let insert_vec: Vec<_> = self.vecs[index.outer][page_start_idx..].into();
self.vecs.insert(index.outer + 1, insert_vec);
self.vecs[index.outer].truncate(page_start_idx);
return self.prepare_insert(Index {
outer: index.outer + 1,
inner: index.inner - page_start_idx
})
}
}
pub fn insert(&mut self, offset: usize, values: &[u8]) {
let mut index = self.pos_to_index(offset, true);
index = self.prepare_insert(index);
// This is needed for the mut borrow vec
{
let vec = &mut self.vecs[index.outer];
// TODO: There has to be a better way for this range
for val in values.into_iter().rev() {
vec.insert(index.inner, *val);
}
}
self.calc_len();
}
pub fn move_out_slice(&mut self, start_offset: usize, end_offset: usize) -> Vec<u8> {
assert!(start_offset <= end_offset);
let mut res = Vec::new();
let mut index = self.pos_to_index(start_offset, false);
let num_elem = end_offset - start_offset;
for _ in 0..num_elem {
let c = self.vecs[index.outer].remove(index.inner);
res.push(c);
if index.inner >= self.vecs[index.outer].len() {
if self.vecs[index.outer].len() == 0 {
self.vecs.remove(index.outer);
} else {
index.inner = 0;
index.outer += 1;
}
}
}
self.calc_len();
res
}
pub fn find_slice(&self, needle: &[u8]) {
self.find_slice_from(0, needle);
}
pub fn find_slice_from(&self, from: usize, needle: &[u8]) -> Option<usize> {
let len = self.len();
for i in from..self.len() {
if util::iter_equals(self.iter_range(i, len), needle.iter()) {
return Some(i);
}
}
None
}
}
impl ops::Index<usize> for Segment {
type Output = u8;
fn index<'a>(&'a self, _index: usize) -> &'a u8 {
let idx = self.pos_to_index(_index, false);
&self.vecs[idx.outer][idx.inner]
}
}
impl ops::IndexMut<usize> for Segment {
fn index_mut<'a>(&'a mut self, _index: usize) -> &'a mut u8 {
let idx = self.pos_to_index(_index, false);
&mut self.vecs[idx.outer][idx.inner]
}
}
impl fmt::Debug for Segment {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.vecs.fmt(f)
}
}
impl<'a> Iterator for Indexes<'a> {
type Item = Index;
fn next(&mut self) -> Option<Index> {
if self.index.outer >= self.seg.vecs.len() {
return None;
}
let res = self.index;
self.index.inner += 1;
if self.index.inner >= self.seg.vecs[self.index.outer].len() {
self.index.inner = 0;
self.index.outer += 1;
}
Some(res)
}
}
impl<'a> Iterator for Items<'a> {
type Item = &'a u8;
fn next(&mut self) -> Option<&'a u8> {
if self.index.outer >= self.seg.vecs.len() {
return None;
}
let elem = {
let vv = &self.seg.vecs[self.index.outer];
&vv[self.index.inner]
};
self.index.inner += 1;
if self.index.inner >= self.seg.vecs[self.index.outer].len() {
self.index.inner = 0;
self.index.outer += 1;
}
Some(elem)
}
}
impl<'a> Iterator for MutItems<'a> {
type Item = &'a mut u8;
fn next(&mut self) -> Option<&'a mut u8> {
if self.index.outer >= self.seg.vecs.len() {
return None;
}
let elem_raw: *mut u8 = {
let vv = &mut self.seg.vecs[self.index.outer];
&mut vv[self.index.inner]
};
self.index.inner += 1;
if self.index.inner >= self.seg.vecs[self.index.outer].len() {
self.index.inner = 0;
self.index.outer += 1;
}
Some(unsafe { &mut *elem_raw })
}
}
impl<'a> Iterator for Slices<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<&'a [u8]> {
if self.outer >= self.seg.vecs.len() {
None
} else {
let i = self.outer;
self.outer += 1;
Some(&self.seg.vecs[i])
}
}
}
#[test]
fn test_segment() {
let mut s = Segment::from_slice(&[1, 2, 3, 4]);
s.insert_slice(0, &[7, 7, 7, 7, 7]);
}
Change const to upper case
use std::mem;
use std::iter;
use std::fmt;
use std::ops;
use super::util;
pub struct Segment {
vecs: Vec<Vec<u8>>,
length: usize,
}
#[derive(Copy, Clone)]
struct Index {
outer: usize,
inner: usize,
}
struct Indexes<'a> {
seg: &'a Segment,
index: Index,
}
pub struct Items<'a> {
seg: &'a Segment,
index: Index,
num_elem: Option<usize>,
}
pub struct MutItems<'a> {
seg: &'a mut Segment,
index: Index,
num_elem: Option<usize>,
}
pub struct Slices<'a> {
seg: &'a Segment,
outer: usize,
}
static MIN_BLOCK_SIZE: usize = 1024 * 1024;
static MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024;
impl Segment {
pub fn _internal_debug(&self) -> Vec<usize> {
self.vecs.iter().map(|v| v.len()).collect::<Vec<usize>>()
}
pub fn new() -> Segment {
Segment {
vecs: Vec::new(),
length: 0,
}
}
pub fn from_vec(values: Vec<u8>) -> Segment {
let len = values.len();
Segment {
vecs: vec!(values),
length: len,
}
}
pub fn from_slice(values: &[u8]) -> Segment {
Segment {
vecs: vec!(values.into()),
length: values.len(),
}
}
pub fn len(&self) -> usize {
self.length
}
fn calc_len(&mut self) {
self.length = 0;
for len in self.vecs.iter().map(|v| v.len()) {
self.length += len
}
}
fn pos_to_index(&self, pos: usize, for_insert: bool) -> Index {
if pos == 0 {
return Index { outer: 0, inner: 0 };
}
let mut cur_pos = pos;
for (i, vec) in self.vecs.iter().enumerate() {
if cur_pos < vec.len() || (for_insert && cur_pos == vec.len()) {
return Index {
outer: i,
inner: cur_pos,
}
}
cur_pos -= vec.len();
}
panic!("Position {} is out of bounds", pos);
}
pub fn iter_range<'a>(&'a self, from: usize, to: usize) -> Items<'a> {
if to < from {
panic!("to ({}) is smaller than from ({})!", to, from);
}
let idx = self.pos_to_index(from, false);
Items {
seg: self,
index: idx,
num_elem: Some(to - from),
}
}
pub fn mut_iter_range<'a>(&'a mut self, from: usize, to: usize) -> MutItems<'a> {
if to < from {
panic!("to ({}) is smaller than from ({})!", to, from);
}
let idx = self.pos_to_index(from, false);
MutItems {
seg: self,
index: idx,
num_elem: Some(to - from),
}
}
fn iter_index<'a>(&'a self, from: usize) -> Indexes<'a> {
let index = self.pos_to_index(from, false);
Indexes {
seg: self,
index: index,
}
}
pub fn iter_slices<'a>(&'a self) -> Slices<'a> {
Slices {
seg: self,
outer: 0,
}
}
fn prepare_insert(&mut self, index: Index) -> Index {
// TODO: Get self.vecs.get(index.outer) into a local variable without ruining lifetimes?
if index.outer >= self.vecs.len() {
self.vecs.push(Vec::new());
}
if self.vecs[index.outer].len() < MAX_BLOCK_SIZE {
return index;
}
let page_start_idx = (index.inner / MIN_BLOCK_SIZE) * MIN_BLOCK_SIZE;
if page_start_idx == 0 {
if self.vecs[index.outer].len() > MAX_BLOCK_SIZE {
let insert_vec: Vec < _ >= self.vecs[index.outer][MIN_BLOCK_SIZE..].into(); self
.vecs.insert(index.outer + 1, insert_vec); self
.vecs[index.outer].truncate(MIN_BLOCK_SIZE);
}
return index;
} else {
let insert_vec: Vec<_> = self.vecs[index.outer][page_start_idx..].into();
self.vecs.insert(index.outer + 1, insert_vec);
self.vecs[index.outer].truncate(page_start_idx);
return self.prepare_insert(Index {
outer: index.outer + 1,
inner: index.inner - page_start_idx
})
}
}
pub fn insert(&mut self, offset: usize, values: &[u8]) {
let mut index = self.pos_to_index(offset, true);
index = self.prepare_insert(index);
// This is needed for the mut borrow vec
{
let vec = &mut self.vecs[index.outer];
// TODO: There has to be a better way for this range
for val in values.into_iter().rev() {
vec.insert(index.inner, *val);
}
}
self.calc_len();
}
pub fn move_out_slice(&mut self, start_offset: usize, end_offset: usize) -> Vec<u8> {
assert!(start_offset <= end_offset);
let mut res = Vec::new();
let mut index = self.pos_to_index(start_offset, false);
let num_elem = end_offset - start_offset;
for _ in 0..num_elem {
let c = self.vecs[index.outer].remove(index.inner);
res.push(c);
if index.inner >= self.vecs[index.outer].len() {
if self.vecs[index.outer].len() == 0 {
self.vecs.remove(index.outer);
} else {
index.inner = 0;
index.outer += 1;
}
}
}
self.calc_len();
res
}
pub fn find_slice(&self, needle: &[u8]) {
self.find_slice_from(0, needle);
}
pub fn find_slice_from(&self, from: usize, needle: &[u8]) -> Option<usize> {
let len = self.len();
for i in from..self.len() {
if util::iter_equals(self.iter_range(i, len), needle.iter()) {
return Some(i);
}
}
None
}
}
impl ops::Index<usize> for Segment {
type Output = u8;
fn index<'a>(&'a self, _index: usize) -> &'a u8 {
let idx = self.pos_to_index(_index, false);
&self.vecs[idx.outer][idx.inner]
}
}
impl ops::IndexMut<usize> for Segment {
fn index_mut<'a>(&'a mut self, _index: usize) -> &'a mut u8 {
let idx = self.pos_to_index(_index, false);
&mut self.vecs[idx.outer][idx.inner]
}
}
impl fmt::Debug for Segment {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.vecs.fmt(f)
}
}
impl<'a> Iterator for Indexes<'a> {
type Item = Index;
fn next(&mut self) -> Option<Index> {
if self.index.outer >= self.seg.vecs.len() {
return None;
}
let res = self.index;
self.index.inner += 1;
if self.index.inner >= self.seg.vecs[self.index.outer].len() {
self.index.inner = 0;
self.index.outer += 1;
}
Some(res)
}
}
impl<'a> Iterator for Items<'a> {
type Item = &'a u8;
fn next(&mut self) -> Option<&'a u8> {
if self.index.outer >= self.seg.vecs.len() {
return None;
}
let elem = {
let vv = &self.seg.vecs[self.index.outer];
&vv[self.index.inner]
};
self.index.inner += 1;
if self.index.inner >= self.seg.vecs[self.index.outer].len() {
self.index.inner = 0;
self.index.outer += 1;
}
Some(elem)
}
}
impl<'a> Iterator for MutItems<'a> {
type Item = &'a mut u8;
fn next(&mut self) -> Option<&'a mut u8> {
if self.index.outer >= self.seg.vecs.len() {
return None;
}
let elem_raw: *mut u8 = {
let vv = &mut self.seg.vecs[self.index.outer];
&mut vv[self.index.inner]
};
self.index.inner += 1;
if self.index.inner >= self.seg.vecs[self.index.outer].len() {
self.index.inner = 0;
self.index.outer += 1;
}
Some(unsafe { &mut *elem_raw })
}
}
impl<'a> Iterator for Slices<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<&'a [u8]> {
if self.outer >= self.seg.vecs.len() {
None
} else {
let i = self.outer;
self.outer += 1;
Some(&self.seg.vecs[i])
}
}
}
#[test]
fn test_segment() {
let mut s = Segment::from_slice(&[1, 2, 3, 4]);
s.insert_slice(0, &[7, 7, 7, 7, 7]);
}
|
#![allow(unsafe_code)]
#![allow(unused)]
use std::{
alloc::{alloc_zeroed, dealloc, Layout},
cmp::Ordering::{Equal, Greater, Less},
convert::{TryFrom, TryInto},
fmt,
mem::{align_of, size_of, ManuallyDrop},
num::NonZeroU64,
ops::{Deref, DerefMut, Index, IndexMut},
};
const ALIGNMENT: usize = align_of::<Header>();
const U64_SZ: usize = size_of::<u64>();
// allocates space for a header struct at the beginning.
fn aligned_boxed_slice(size: usize) -> Box<[u8]> {
let size = size + size_of::<Header>();
let layout = Layout::from_size_align(size, ALIGNMENT).unwrap();
unsafe {
let ptr = alloc_zeroed(layout);
let fat_ptr = fatten(ptr, size);
let ret = Box::from_raw(fat_ptr);
assert_eq!(ret.len(), size);
ret
}
}
/// <https://users.rust-lang.org/t/construct-fat-pointer-to-struct/29198/9>
#[allow(trivial_casts)]
fn fatten(data: *const u8, len: usize) -> *mut [u8] {
// Requirements of slice::from_raw_parts.
assert!(!data.is_null());
assert!(isize::try_from(len).is_ok());
let slice = unsafe { core::slice::from_raw_parts(data as *const (), len) };
slice as *const [()] as *mut _
}
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub(crate) struct Header {
// NB always lay out fields from largest to smallest
// to properly pack the struct
pub next: Option<NonZeroU64>,
pub merging_child: Option<NonZeroU64>,
lo_len: u64,
hi_len: u64,
fixed_key_length: Option<NonZeroU64>,
fixed_value_length: Option<NonZeroU64>,
pub children: u16,
offset_bytes: u8,
pub prefix_len: u8,
pub merging: bool,
pub is_index: bool,
}
/// An immutable sorted string table
pub(crate) struct SSTable(ManuallyDrop<Box<[u8]>>);
impl Drop for SSTable {
fn drop(&mut self) {
let box_ptr = self.0.as_mut_ptr();
let layout = Layout::from_size_align(self.0.len(), ALIGNMENT).unwrap();
unsafe {
dealloc(box_ptr, layout);
}
}
}
impl Deref for SSTable {
type Target = Header;
fn deref(&self) -> &Header {
self.header()
}
}
impl fmt::Debug for SSTable {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SSTable")
.field("header", self.header())
.field("lo", &self.lo())
.field("hi", &self.hi())
.field("items", &self.iter().collect::<crate::Map<_, _>>())
.finish()
}
}
impl DerefMut for SSTable {
fn deref_mut(&mut self) -> &mut Header {
self.header_mut()
}
}
impl SSTable {
pub fn new(
lo: &[u8],
hi: &[u8],
prefix_len: u8,
items: &[(&[u8], &[u8])],
) -> SSTable {
// determine if we need to use varints and offset
// indirection tables, or if everything is equal
// size we can skip this.
let mut key_lengths = Vec::with_capacity(items.len());
let mut value_lengths = Vec::with_capacity(items.len());
let mut keys_equal_length = true;
let mut values_equal_length = true;
for (k, v) in items {
key_lengths.push(k.len() as u64);
if let Some(first_sz) = key_lengths.first() {
keys_equal_length &= *first_sz == k.len() as u64;
}
value_lengths.push(v.len() as u64);
if let Some(first_sz) = value_lengths.first() {
values_equal_length &= *first_sz == v.len() as u64;
}
}
let fixed_key_length = if keys_equal_length {
if let Some(key_length) = key_lengths.first() {
if *key_length > 0 {
Some(NonZeroU64::new(*key_length).unwrap())
} else {
None
}
} else {
None
}
} else {
None
};
let fixed_value_length = if values_equal_length {
if let Some(value_length) = value_lengths.first() {
if *value_length > 0 {
Some(NonZeroU64::new(*value_length).unwrap())
} else {
None
}
} else {
None
}
} else {
None
};
let key_storage_size = if let Some(key_length) = fixed_key_length {
key_length.get() * (items.len() as u64)
} else {
let mut sum = 0;
for key_length in &key_lengths {
sum += key_length;
sum += varint_size(*key_length);
}
sum
};
let value_storage_size = if let Some(value_length) = fixed_value_length
{
value_length.get() * (items.len() as u64)
} else {
let mut sum = 0;
for value_length in &value_lengths {
sum += value_length;
sum += varint_size(*value_length);
}
sum
};
let (offsets_storage_size, offset_bytes) = if keys_equal_length
&& values_equal_length
{
(0, 0)
} else {
let max_offset_storage_size = (6 * items.len()) as u64;
let max_total_item_storage_size =
key_storage_size + value_storage_size + max_offset_storage_size;
let bytes_per_offset: u8 = match max_total_item_storage_size {
i if i < 256 => 1,
i if i < (1 << 16) => 2,
i if i < (1 << 24) => 3,
i if i < (1 << 32) => 4,
i if i < (1 << 40) => 5,
i if i < (1 << 48) => 6,
_ => unreachable!(),
};
(bytes_per_offset as u64 * items.len() as u64, bytes_per_offset)
};
let total_item_storage_size =
key_storage_size + value_storage_size + offsets_storage_size;
println!("allocating size of {}", total_item_storage_size);
let boxed_slice = aligned_boxed_slice(
usize::try_from(total_item_storage_size).unwrap(),
);
let mut ret = SSTable(ManuallyDrop::new(boxed_slice));
*ret.header_mut() = Header {
next: None,
merging_child: None,
lo_len: lo.len() as u64,
hi_len: hi.len() as u64,
fixed_key_length,
fixed_value_length,
offset_bytes,
children: u16::try_from(items.len()).unwrap(),
prefix_len: prefix_len,
merging: false,
is_index: true,
};
// we use either 0 or 1 offset tables.
// - if keys and values are all equal lengths, no offset table is
// required
// - if keys are equal length but values are not, we put an offset table
// at the beginning of the data buffer, then put each of the keys
// packed together, then varint-prefixed values which are addressed by
// the offset table
// - if keys and values are both different lengths, we put an offset
// table at the beginning of the data buffer, then varint-prefixed
// keys followed inline with varint-prefixed values.
//
// So, there are 4 possible layouts:
// 1. [fixed size keys] [fixed size values]
// - signified by fixed_key_length and fixed_value_length being Some
// 2. [offsets] [fixed size keys] [variable values]
// - fixed_key_length: Some, fixed_value_length: None
// 3. [offsets] [variable keys] [fixed-length values]
// - fixed_key_length: None, fixed_value_length: Some
// 4. [offsets] [variable keys followed by variable values]
// - fixed_key_length: None, fixed_value_length: None
let mut offset = 0_u64;
for (idx, (k, v)) in items.iter().enumerate() {
if !keys_equal_length || !values_equal_length {
ret.set_offset(idx, usize::try_from(offset).unwrap());
}
if !keys_equal_length {
offset += varint_size(k.len() as u64) + k.len() as u64;
}
if !values_equal_length {
offset += varint_size(v.len() as u64) + v.len() as u64;
}
ret.key_buf_for_offset_mut(idx).copy_from_slice(k);
ret.value_buf_for_offset_mut(idx).copy_from_slice(v);
}
ret
}
// returns the OPEN ENDED buffer where a key may be placed
fn key_buf_for_offset_mut(&mut self, index: usize) -> &mut [u8] {
match (self.fixed_key_length, self.fixed_value_length) {
(Some(k_sz), Some(_)) | (Some(k_sz), None) => {
let keys_buf = self.keys_buf_mut();
&mut keys_buf[index * usize::try_from(k_sz.get()).unwrap()..]
}
(None, Some(_)) | (None, None) => {
// find offset for key or combined kv offset
let offset = self.offset(index);
let keys_buf = self.keys_buf_mut();
&mut keys_buf[offset..]
}
}
}
// returns the OPEN ENDED buffer where a value may be placed
//
// NB: it's important that this is only ever called after setting
// the key and its varint length prefix, as this needs to be parsed
// for case 4.
fn value_buf_for_offset_mut(&mut self, index: usize) -> &mut [u8] {
match (self.fixed_key_length, self.fixed_value_length) {
(Some(_), Some(v_sz)) | (None, Some(v_sz)) => {
let values_buf = self.values_buf_mut();
&mut values_buf[index * usize::try_from(v_sz.get()).unwrap()..]
}
(Some(_), None) | (None, None) => {
// find combined kv offset, skip key bytes
let offset = self.offset(index);
let values_buf = self.values_buf_mut();
&mut values_buf[offset..]
}
}
}
fn offset(&self, index: usize) -> usize {
let start = index * self.offset_bytes as usize;
let end = start + self.offset_bytes as usize;
let buf = &self.offsets_buf()[start..end];
let mut le_usize_buf = [0u8; U64_SZ];
le_usize_buf[end - start..].copy_from_slice(buf);
usize::try_from(u64::from_le_bytes(le_usize_buf)).unwrap()
}
fn set_offset(&mut self, index: usize, offset: usize) {
let offset_bytes = self.offset_bytes as usize;
let mut buf = self.offset_buf_for_offset_mut(index);
let bytes = &offset.to_le_bytes()[..offset_bytes];
buf.copy_from_slice(bytes);
}
fn offset_buf_for_offset_mut(&mut self, index: usize) -> &mut [u8] {
let start = index * self.offset_bytes as usize;
let end = start + self.offset_bytes as usize;
&mut self.offsets_buf_mut()[start..end]
}
fn keys_buf_mut(&mut self) -> &mut [u8] {
todo!()
}
fn values_buf_mut(&mut self) -> &mut [u8] {
todo!()
}
fn offsets_buf(&self) -> &[u8] {
let offset_sz = self.children as usize * self.offset_bytes as usize;
&self.data_buf()[..offset_sz]
}
fn offsets_buf_mut(&mut self) -> &mut [u8] {
let offset_sz = self.children as usize * self.offset_bytes as usize;
&mut self.data_buf_mut()[..offset_sz]
}
fn data_buf(&self) -> &[u8] {
&self.0[size_of::<Header>()..]
}
fn data_buf_mut(&mut self) -> &mut [u8] {
&mut self.0[size_of::<Header>()..]
}
pub fn insert(&self, key: &[u8], value: &[u8]) -> SSTable {
match self.find(&key[usize::from(self.prefix_len)..]) {
Ok(offset) => {
if self.is_index {
panic!("already contained key being merged into index");
}
todo!()
}
Err(prospective_offset) => {
todo!()
}
}
}
pub fn remove(&self, key: &[u8]) -> SSTable {
let offset = self
.find(&key[usize::from(self.prefix_len)..])
.expect("called remove for non-present key");
//
//
//
todo!()
}
pub fn split(&self) -> (SSTable, SSTable) {
todo!()
}
pub fn merge(&self, other: &SSTable) -> SSTable {
todo!()
}
pub fn should_split(&self) -> bool {
todo!()
}
pub fn should_merge(&self) -> bool {
todo!()
}
fn header(&self) -> &Header {
unsafe { &*(self.0.as_ptr() as *mut Header) }
}
fn header_mut(&mut self) -> &mut Header {
unsafe { &mut *(self.0.as_mut_ptr() as *mut Header) }
}
pub fn len(&self) -> usize {
usize::from(self.children)
}
fn len_internal(&self) -> usize {
self.len() + 2
}
fn find(&self, key: &[u8]) -> Result<usize, usize> {
let mut size = self.len();
if size == 0 || key < self.index_child(0).0 {
return Err(0);
}
let mut base = 0_usize;
while size > 1 {
let half = size / 2;
let mid = base + half;
// mid is always in [0, size), that means mid is >= 0 and < size.
// mid >= 0: by definition
// mid < size: mid = size / 2 + size / 4 + size / 8 ...
let l = self.index_child(mid).0;
let cmp = crate::fastcmp(l, key);
base = if cmp == Greater { base } else { mid };
size -= half;
}
// base is always in [0, size) because base <= mid.
let l = self.index_child(base).0;
let cmp = crate::fastcmp(l, key);
if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) }
}
fn get_lub(&self, key: &[u8]) -> &[u8] {
assert!(key >= self.lo());
match self.find(key) {
Ok(idx) => self.index_child(idx).1,
Err(idx) => self.index_child(idx - 1).1,
}
}
fn iter(&self) -> impl Iterator<Item = (&[u8], &[u8])> {
(2..)
.take_while(move |idx| *idx < self.len_internal())
.map(move |idx| self.index(idx))
}
fn lo(&self) -> &[u8] {
self.index(0).0
}
fn hi(&self) -> &[u8] {
self.index(1).0
}
fn index_child(&self, idx: usize) -> (&[u8], &[u8]) {
self.index(idx + 2)
}
fn index(&self, idx: usize) -> (&[u8], &[u8]) {
assert!(
idx < self.len_internal(),
"index {} is not less than internal length of {}",
idx,
self.len_internal()
);
let raw = self.index_raw_kv(idx);
let pivot = raw.len() - U64_SZ;
let (key, pid_bytes) = raw.split_at(pivot);
(key, pid_bytes)
}
fn index_raw_kv(&self, idx: usize) -> &[u8] {
assert!(idx <= self.len_internal());
let data_buf = &self.0[size_of::<Header>()..];
let offsets_len = self.len_internal() * U64_SZ;
let (offsets, items) = data_buf.split_at(offsets_len);
let offset_buf = &offsets[U64_SZ * idx..U64_SZ * (idx + 1)];
let kv_offset =
u64::from_le_bytes(offset_buf.try_into().unwrap()) as usize;
let item_buf = &items[kv_offset..];
let len_buf = &item_buf[..U64_SZ];
let key_len = u64::from_le_bytes(len_buf.try_into().unwrap()) as usize;
let val_len = U64_SZ;
let start = U64_SZ;
let end = (2 * U64_SZ) + key_len;
&item_buf[start..end]
}
}
fn varint_size(int: u64) -> u64 {
if int <= 240 {
1
} else if int <= 2287 {
2
} else if int <= 67823 {
3
} else if int <= 0x00FF_FFFF {
4
} else if int <= 0xFFFF_FFFF {
5
} else if int <= 0x00FF_FFFF_FFFF {
6
} else if int <= 0xFFFF_FFFF_FFFF {
7
} else if int <= 0x00FF_FFFF_FFFF_FFFF {
8
} else {
9
}
}
// returns how many bytes the varint consumed
fn serialize_varint_into(int: u64, buf: &mut [u8]) -> usize {
if int <= 240 {
buf[0] = u8::try_from(int).unwrap();
1
} else if int <= 2287 {
buf[0] = u8::try_from((int - 240) / 256 + 241).unwrap();
buf[1] = u8::try_from((int - 240) % 256).unwrap();
2
} else if int <= 67823 {
buf[0] = 249;
buf[1] = u8::try_from((int - 2288) / 256).unwrap();
buf[2] = u8::try_from((int - 2288) % 256).unwrap();
3
} else if int <= 0x00FF_FFFF {
buf[0] = 250;
let bytes = int.to_le_bytes();
buf[1..4].copy_from_slice(&bytes[..3]);
4
} else if int <= 0xFFFF_FFFF {
buf[0] = 251;
let bytes = int.to_le_bytes();
buf[1..5].copy_from_slice(&bytes[..4]);
5
} else if int <= 0x00FF_FFFF_FFFF {
buf[0] = 252;
let bytes = int.to_le_bytes();
buf[1..6].copy_from_slice(&bytes[..5]);
6
} else if int <= 0xFFFF_FFFF_FFFF {
buf[0] = 253;
let bytes = int.to_le_bytes();
buf[1..7].copy_from_slice(&bytes[..6]);
7
} else if int <= 0x00FF_FFFF_FFFF_FFFF {
buf[0] = 254;
let bytes = int.to_le_bytes();
buf[1..8].copy_from_slice(&bytes[..7]);
8
} else {
buf[0] = 255;
let bytes = int.to_le_bytes();
buf[1..9].copy_from_slice(&bytes[..8]);
9
}
}
// returns the deserialized varint, along with how many bytes
// were taken up by the varint
fn deserialize_varint(buf: &[u8]) -> crate::Result<(u64, usize)> {
if buf.is_empty() {
return Err(crate::Error::corruption(None));
}
let res = match buf[0] {
0..=240 => (u64::from(buf[0]), 1),
241..=248 => {
let varint =
240 + 256 * (u64::from(buf[0]) - 241) + u64::from(buf[1]);
(varint, 2)
}
249 => {
let varint = 2288 + 256 * u64::from(buf[1]) + u64::from(buf[2]);
(varint, 3)
}
other => {
let sz = other as usize - 247;
let mut aligned = [0; 8];
aligned[..sz].copy_from_slice(&buf[1..=sz]);
let varint = u64::from_le_bytes(aligned);
(varint, sz)
}
};
Ok(res)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn simple() {
let mut ir =
SSTable::new(&[1], &[7], 0, &[(&[1], 42), (&[6, 6, 6], 66)]);
ir.next = Some(NonZeroU64::new(5).unwrap());
ir.is_index = false;
dbg!(ir.header());
println!("ir: {:#?}", ir);
assert_eq!(ir.get_lub(&[1]), 42);
assert_eq!(ir.get_lub(&[2]), 42);
assert_eq!(ir.get_lub(&[6]), 42);
assert_eq!(ir.get_lub(&[7]), 66);
}
}
Conditionally write varints into key and value slots
#![allow(unsafe_code)]
#![allow(unused)]
use std::{
alloc::{alloc_zeroed, dealloc, Layout},
cmp::Ordering::{Equal, Greater, Less},
convert::{TryFrom, TryInto},
fmt,
mem::{align_of, size_of, ManuallyDrop},
num::NonZeroU64,
ops::{Deref, DerefMut, Index, IndexMut},
};
const ALIGNMENT: usize = align_of::<Header>();
const U64_SZ: usize = size_of::<u64>();
// allocates space for a header struct at the beginning.
fn aligned_boxed_slice(size: usize) -> Box<[u8]> {
let size = size + size_of::<Header>();
let layout = Layout::from_size_align(size, ALIGNMENT).unwrap();
unsafe {
let ptr = alloc_zeroed(layout);
let fat_ptr = fatten(ptr, size);
let ret = Box::from_raw(fat_ptr);
assert_eq!(ret.len(), size);
ret
}
}
/// <https://users.rust-lang.org/t/construct-fat-pointer-to-struct/29198/9>
#[allow(trivial_casts)]
fn fatten(data: *const u8, len: usize) -> *mut [u8] {
// Requirements of slice::from_raw_parts.
assert!(!data.is_null());
assert!(isize::try_from(len).is_ok());
let slice = unsafe { core::slice::from_raw_parts(data as *const (), len) };
slice as *const [()] as *mut _
}
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub(crate) struct Header {
// NB always lay out fields from largest to smallest
// to properly pack the struct
pub next: Option<NonZeroU64>,
pub merging_child: Option<NonZeroU64>,
lo_len: u64,
hi_len: u64,
fixed_key_length: Option<NonZeroU64>,
fixed_value_length: Option<NonZeroU64>,
pub children: u16,
offset_bytes: u8,
pub prefix_len: u8,
pub merging: bool,
pub is_index: bool,
}
/// An immutable sorted string table
pub(crate) struct SSTable(ManuallyDrop<Box<[u8]>>);
impl Drop for SSTable {
fn drop(&mut self) {
let box_ptr = self.0.as_mut_ptr();
let layout = Layout::from_size_align(self.0.len(), ALIGNMENT).unwrap();
unsafe {
dealloc(box_ptr, layout);
}
}
}
impl Deref for SSTable {
type Target = Header;
fn deref(&self) -> &Header {
self.header()
}
}
impl fmt::Debug for SSTable {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SSTable")
.field("header", self.header())
.field("lo", &self.lo())
.field("hi", &self.hi())
.field("items", &self.iter().collect::<crate::Map<_, _>>())
.finish()
}
}
impl DerefMut for SSTable {
fn deref_mut(&mut self) -> &mut Header {
self.header_mut()
}
}
impl SSTable {
pub fn new(
lo: &[u8],
hi: &[u8],
prefix_len: u8,
items: &[(&[u8], &[u8])],
) -> SSTable {
// determine if we need to use varints and offset
// indirection tables, or if everything is equal
// size we can skip this.
let mut key_lengths = Vec::with_capacity(items.len());
let mut value_lengths = Vec::with_capacity(items.len());
let mut keys_equal_length = true;
let mut values_equal_length = true;
for (k, v) in items {
key_lengths.push(k.len() as u64);
if let Some(first_sz) = key_lengths.first() {
keys_equal_length &= *first_sz == k.len() as u64;
}
value_lengths.push(v.len() as u64);
if let Some(first_sz) = value_lengths.first() {
values_equal_length &= *first_sz == v.len() as u64;
}
}
let fixed_key_length = if keys_equal_length {
if let Some(key_length) = key_lengths.first() {
if *key_length > 0 {
Some(NonZeroU64::new(*key_length).unwrap())
} else {
None
}
} else {
None
}
} else {
None
};
let fixed_value_length = if values_equal_length {
if let Some(value_length) = value_lengths.first() {
if *value_length > 0 {
Some(NonZeroU64::new(*value_length).unwrap())
} else {
None
}
} else {
None
}
} else {
None
};
let key_storage_size = if let Some(key_length) = fixed_key_length {
key_length.get() * (items.len() as u64)
} else {
let mut sum = 0;
for key_length in &key_lengths {
sum += key_length;
sum += varint_size(*key_length);
}
sum
};
let value_storage_size = if let Some(value_length) = fixed_value_length
{
value_length.get() * (items.len() as u64)
} else {
let mut sum = 0;
for value_length in &value_lengths {
sum += value_length;
sum += varint_size(*value_length);
}
sum
};
let (offsets_storage_size, offset_bytes) = if keys_equal_length
&& values_equal_length
{
(0, 0)
} else {
let max_offset_storage_size = (6 * items.len()) as u64;
let max_total_item_storage_size =
key_storage_size + value_storage_size + max_offset_storage_size;
let bytes_per_offset: u8 = match max_total_item_storage_size {
i if i < 256 => 1,
i if i < (1 << 16) => 2,
i if i < (1 << 24) => 3,
i if i < (1 << 32) => 4,
i if i < (1 << 40) => 5,
i if i < (1 << 48) => 6,
_ => unreachable!(),
};
(bytes_per_offset as u64 * items.len() as u64, bytes_per_offset)
};
let total_item_storage_size =
key_storage_size + value_storage_size + offsets_storage_size;
println!("allocating size of {}", total_item_storage_size);
let boxed_slice = aligned_boxed_slice(
usize::try_from(total_item_storage_size).unwrap(),
);
let mut ret = SSTable(ManuallyDrop::new(boxed_slice));
*ret.header_mut() = Header {
next: None,
merging_child: None,
lo_len: lo.len() as u64,
hi_len: hi.len() as u64,
fixed_key_length,
fixed_value_length,
offset_bytes,
children: u16::try_from(items.len()).unwrap(),
prefix_len: prefix_len,
merging: false,
is_index: true,
};
// we use either 0 or 1 offset tables.
// - if keys and values are all equal lengths, no offset table is
// required
// - if keys are equal length but values are not, we put an offset table
// at the beginning of the data buffer, then put each of the keys
// packed together, then varint-prefixed values which are addressed by
// the offset table
// - if keys and values are both different lengths, we put an offset
// table at the beginning of the data buffer, then varint-prefixed
// keys followed inline with varint-prefixed values.
//
// So, there are 4 possible layouts:
// 1. [fixed size keys] [fixed size values]
// - signified by fixed_key_length and fixed_value_length being Some
// 2. [offsets] [fixed size keys] [variable values]
// - fixed_key_length: Some, fixed_value_length: None
// 3. [offsets] [variable keys] [fixed-length values]
// - fixed_key_length: None, fixed_value_length: Some
// 4. [offsets] [variable keys followed by variable values]
// - fixed_key_length: None, fixed_value_length: None
let mut offset = 0_u64;
for (idx, (k, v)) in items.iter().enumerate() {
if !keys_equal_length || !values_equal_length {
ret.set_offset(idx, usize::try_from(offset).unwrap());
}
if !keys_equal_length {
offset += varint_size(k.len() as u64) + k.len() as u64;
}
if !values_equal_length {
offset += varint_size(v.len() as u64) + v.len() as u64;
}
let mut key_buf = ret.key_buf_for_offset_mut(idx);
if !keys_equal_length {
let varint_bytes =
serialize_varint_into(k.len() as u64, key_buf);
key_buf = &mut key_buf[varint_bytes..];
}
key_buf[..k.len()].copy_from_slice(k);
let mut value_buf = ret.value_buf_for_offset_mut(idx);
if !values_equal_length {
let varint_bytes =
serialize_varint_into(v.len() as u64, value_buf);
value_buf = &mut value_buf[varint_bytes..];
}
value_buf[..v.len()].copy_from_slice(v);
}
ret
}
// returns the OPEN ENDED buffer where a key may be placed
fn key_buf_for_offset_mut(&mut self, index: usize) -> &mut [u8] {
match (self.fixed_key_length, self.fixed_value_length) {
(Some(k_sz), Some(_)) | (Some(k_sz), None) => {
let keys_buf = self.keys_buf_mut();
&mut keys_buf[index * usize::try_from(k_sz.get()).unwrap()..]
}
(None, Some(_)) | (None, None) => {
// find offset for key or combined kv offset
let offset = self.offset(index);
let keys_buf = self.keys_buf_mut();
&mut keys_buf[offset..]
}
}
}
// returns the OPEN ENDED buffer where a value may be placed
//
// NB: it's important that this is only ever called after setting
// the key and its varint length prefix, as this needs to be parsed
// for case 4.
fn value_buf_for_offset_mut(&mut self, index: usize) -> &mut [u8] {
match (self.fixed_key_length, self.fixed_value_length) {
(Some(_), Some(v_sz)) | (None, Some(v_sz)) => {
let values_buf = self.values_buf_mut();
&mut values_buf[index * usize::try_from(v_sz.get()).unwrap()..]
}
(Some(_), None) | (None, None) => {
// find combined kv offset, skip key bytes
let offset = self.offset(index);
let values_buf = self.values_buf_mut();
&mut values_buf[offset..]
}
}
}
fn offset(&self, index: usize) -> usize {
let start = index * self.offset_bytes as usize;
let end = start + self.offset_bytes as usize;
let buf = &self.offsets_buf()[start..end];
let mut le_usize_buf = [0u8; U64_SZ];
le_usize_buf[end - start..].copy_from_slice(buf);
usize::try_from(u64::from_le_bytes(le_usize_buf)).unwrap()
}
fn set_offset(&mut self, index: usize, offset: usize) {
let offset_bytes = self.offset_bytes as usize;
let mut buf = self.offset_buf_for_offset_mut(index);
let bytes = &offset.to_le_bytes()[..offset_bytes];
buf.copy_from_slice(bytes);
}
fn offset_buf_for_offset_mut(&mut self, index: usize) -> &mut [u8] {
let start = index * self.offset_bytes as usize;
let end = start + self.offset_bytes as usize;
&mut self.offsets_buf_mut()[start..end]
}
fn keys_buf_mut(&mut self) -> &mut [u8] {
todo!()
}
fn values_buf_mut(&mut self) -> &mut [u8] {
todo!()
}
fn offsets_buf(&self) -> &[u8] {
let offset_sz = self.children as usize * self.offset_bytes as usize;
&self.data_buf()[..offset_sz]
}
fn offsets_buf_mut(&mut self) -> &mut [u8] {
let offset_sz = self.children as usize * self.offset_bytes as usize;
&mut self.data_buf_mut()[..offset_sz]
}
fn data_buf(&self) -> &[u8] {
&self.0[size_of::<Header>()..]
}
fn data_buf_mut(&mut self) -> &mut [u8] {
&mut self.0[size_of::<Header>()..]
}
pub fn insert(&self, key: &[u8], value: &[u8]) -> SSTable {
match self.find(&key[usize::from(self.prefix_len)..]) {
Ok(offset) => {
if self.is_index {
panic!("already contained key being merged into index");
}
todo!()
}
Err(prospective_offset) => {
todo!()
}
}
}
pub fn remove(&self, key: &[u8]) -> SSTable {
let offset = self
.find(&key[usize::from(self.prefix_len)..])
.expect("called remove for non-present key");
//
//
//
todo!()
}
pub fn split(&self) -> (SSTable, SSTable) {
todo!()
}
pub fn merge(&self, other: &SSTable) -> SSTable {
todo!()
}
pub fn should_split(&self) -> bool {
todo!()
}
pub fn should_merge(&self) -> bool {
todo!()
}
fn header(&self) -> &Header {
unsafe { &*(self.0.as_ptr() as *mut Header) }
}
fn header_mut(&mut self) -> &mut Header {
unsafe { &mut *(self.0.as_mut_ptr() as *mut Header) }
}
pub fn len(&self) -> usize {
usize::from(self.children)
}
fn len_internal(&self) -> usize {
self.len() + 2
}
fn find(&self, key: &[u8]) -> Result<usize, usize> {
let mut size = self.len();
if size == 0 || key < self.index_child(0).0 {
return Err(0);
}
let mut base = 0_usize;
while size > 1 {
let half = size / 2;
let mid = base + half;
// mid is always in [0, size), that means mid is >= 0 and < size.
// mid >= 0: by definition
// mid < size: mid = size / 2 + size / 4 + size / 8 ...
let l = self.index_child(mid).0;
let cmp = crate::fastcmp(l, key);
base = if cmp == Greater { base } else { mid };
size -= half;
}
// base is always in [0, size) because base <= mid.
let l = self.index_child(base).0;
let cmp = crate::fastcmp(l, key);
if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) }
}
fn get_lub(&self, key: &[u8]) -> &[u8] {
assert!(key >= self.lo());
match self.find(key) {
Ok(idx) => self.index_child(idx).1,
Err(idx) => self.index_child(idx - 1).1,
}
}
fn iter(&self) -> impl Iterator<Item = (&[u8], &[u8])> {
(2..)
.take_while(move |idx| *idx < self.len_internal())
.map(move |idx| self.index(idx))
}
fn lo(&self) -> &[u8] {
self.index(0).0
}
fn hi(&self) -> &[u8] {
self.index(1).0
}
fn index_child(&self, idx: usize) -> (&[u8], &[u8]) {
self.index(idx + 2)
}
fn index(&self, idx: usize) -> (&[u8], &[u8]) {
assert!(
idx < self.len_internal(),
"index {} is not less than internal length of {}",
idx,
self.len_internal()
);
let raw = self.index_raw_kv(idx);
let pivot = raw.len() - U64_SZ;
let (key, pid_bytes) = raw.split_at(pivot);
(key, pid_bytes)
}
fn index_raw_kv(&self, idx: usize) -> &[u8] {
assert!(idx <= self.len_internal());
let data_buf = &self.0[size_of::<Header>()..];
let offsets_len = self.len_internal() * U64_SZ;
let (offsets, items) = data_buf.split_at(offsets_len);
let offset_buf = &offsets[U64_SZ * idx..U64_SZ * (idx + 1)];
let kv_offset =
u64::from_le_bytes(offset_buf.try_into().unwrap()) as usize;
let item_buf = &items[kv_offset..];
let len_buf = &item_buf[..U64_SZ];
let key_len = u64::from_le_bytes(len_buf.try_into().unwrap()) as usize;
let val_len = U64_SZ;
let start = U64_SZ;
let end = (2 * U64_SZ) + key_len;
&item_buf[start..end]
}
}
fn varint_size(int: u64) -> u64 {
if int <= 240 {
1
} else if int <= 2287 {
2
} else if int <= 67823 {
3
} else if int <= 0x00FF_FFFF {
4
} else if int <= 0xFFFF_FFFF {
5
} else if int <= 0x00FF_FFFF_FFFF {
6
} else if int <= 0xFFFF_FFFF_FFFF {
7
} else if int <= 0x00FF_FFFF_FFFF_FFFF {
8
} else {
9
}
}
// returns how many bytes the varint consumed
fn serialize_varint_into(int: u64, buf: &mut [u8]) -> usize {
if int <= 240 {
buf[0] = u8::try_from(int).unwrap();
1
} else if int <= 2287 {
buf[0] = u8::try_from((int - 240) / 256 + 241).unwrap();
buf[1] = u8::try_from((int - 240) % 256).unwrap();
2
} else if int <= 67823 {
buf[0] = 249;
buf[1] = u8::try_from((int - 2288) / 256).unwrap();
buf[2] = u8::try_from((int - 2288) % 256).unwrap();
3
} else if int <= 0x00FF_FFFF {
buf[0] = 250;
let bytes = int.to_le_bytes();
buf[1..4].copy_from_slice(&bytes[..3]);
4
} else if int <= 0xFFFF_FFFF {
buf[0] = 251;
let bytes = int.to_le_bytes();
buf[1..5].copy_from_slice(&bytes[..4]);
5
} else if int <= 0x00FF_FFFF_FFFF {
buf[0] = 252;
let bytes = int.to_le_bytes();
buf[1..6].copy_from_slice(&bytes[..5]);
6
} else if int <= 0xFFFF_FFFF_FFFF {
buf[0] = 253;
let bytes = int.to_le_bytes();
buf[1..7].copy_from_slice(&bytes[..6]);
7
} else if int <= 0x00FF_FFFF_FFFF_FFFF {
buf[0] = 254;
let bytes = int.to_le_bytes();
buf[1..8].copy_from_slice(&bytes[..7]);
8
} else {
buf[0] = 255;
let bytes = int.to_le_bytes();
buf[1..9].copy_from_slice(&bytes[..8]);
9
}
}
// returns the deserialized varint, along with how many bytes
// were taken up by the varint
fn deserialize_varint(buf: &[u8]) -> crate::Result<(u64, usize)> {
if buf.is_empty() {
return Err(crate::Error::corruption(None));
}
let res = match buf[0] {
0..=240 => (u64::from(buf[0]), 1),
241..=248 => {
let varint =
240 + 256 * (u64::from(buf[0]) - 241) + u64::from(buf[1]);
(varint, 2)
}
249 => {
let varint = 2288 + 256 * u64::from(buf[1]) + u64::from(buf[2]);
(varint, 3)
}
other => {
let sz = other as usize - 247;
let mut aligned = [0; 8];
aligned[..sz].copy_from_slice(&buf[1..=sz]);
let varint = u64::from_le_bytes(aligned);
(varint, sz)
}
};
Ok(res)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn simple() {
let mut ir =
SSTable::new(&[1], &[7], 0, &[(&[1], 42), (&[6, 6, 6], 66)]);
ir.next = Some(NonZeroU64::new(5).unwrap());
ir.is_index = false;
dbg!(ir.header());
println!("ir: {:#?}", ir);
assert_eq!(ir.get_lub(&[1]), 42);
assert_eq!(ir.get_lub(&[2]), 42);
assert_eq!(ir.get_lub(&[6]), 42);
assert_eq!(ir.get_lub(&[7]), 66);
}
}
|
//! Implementation of a Micro Transport Protocol library.
//!
//! http://www.bittorrent.org/beps/bep_0029.html
//!
//! TODO
//! ----
//!
//! - congestion control
//! - proper connection closing
//! - automatically send FIN (or should it be RST?) on `drop` if not already closed
//! - setters and getters that hide header field endianness conversion
//! - SACK extension
//! - handle packet loss
#![crate_name = "utp"]
#![license = "MIT/ASL2"]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![feature(macro_rules, phase)]
#![deny(missing_doc)]
extern crate time;
#[phase(plugin, link)] extern crate log;
use std::io::net::udp::UdpSocket;
use std::io::net::ip::SocketAddr;
use std::io::IoResult;
use std::mem::transmute;
use std::rand::random;
use std::fmt;
static HEADER_SIZE: uint = 20;
// For simplicity's sake, let us assume no packet will ever exceed the
// Ethernet maximum transfer unit of 1500 bytes.
static BUF_SIZE: uint = 1500;
macro_rules! u8_to_unsigned_be(
($src:ident[$start:expr..$end:expr] -> $t:ty) => ({
let mut result: $t = 0;
for i in range(0u, $end-$start+1).rev() {
result = result | $src[$start+i] as $t << i*8;
}
result
})
)
/// Return current time in microseconds since the UNIX epoch.
fn now_microseconds() -> u32 {
let t = time::get_time();
(t.sec * 1_000_000) as u32 + (t.nsec/1000) as u32
}
#[allow(dead_code,non_camel_case_types)]
#[deriving(PartialEq,Eq,Show)]
enum UtpPacketType {
ST_DATA = 0,
ST_FIN = 1,
ST_STATE = 2,
ST_RESET = 3,
ST_SYN = 4,
}
#[allow(dead_code)]
#[deriving(Clone)]
#[packed]
struct UtpPacketHeader {
type_ver: u8, // type: u4, ver: u4
extension: u8,
connection_id: u16,
timestamp_microseconds: u32,
timestamp_difference_microseconds: u32,
wnd_size: u32,
seq_nr: u16,
ack_nr: u16,
}
impl UtpPacketHeader {
/// Set type of packet to the specified type.
fn set_type(&mut self, t: UtpPacketType) {
let version = 0x0F & self.type_ver;
self.type_ver = t as u8 << 4 | version;
}
fn get_type(&self) -> UtpPacketType {
let t: UtpPacketType = unsafe { transmute(self.type_ver >> 4) };
t
}
fn get_version(&self) -> u8 {
self.type_ver & 0x0F
}
fn wnd_size(&self, new_wnd_size: u32) -> UtpPacketHeader {
UtpPacketHeader {
wnd_size: new_wnd_size.to_be(),
.. self.clone()
}
}
/// Return packet header as a slice of bytes.
fn bytes(&self) -> &[u8] {
let buf: &[u8, ..HEADER_SIZE] = unsafe { transmute(self) };
return buf.as_slice();
}
fn len(&self) -> uint {
return HEADER_SIZE;
}
/// Read byte buffer and return corresponding packet header.
/// It assumes the fields are in network (big-endian) byte order,
/// preserving it.
fn decode(buf: &[u8]) -> UtpPacketHeader {
UtpPacketHeader {
type_ver: buf[0],
extension: buf[1],
connection_id: u8_to_unsigned_be!(buf[2..3] -> u16),
timestamp_microseconds: u8_to_unsigned_be!(buf[4..7] -> u32),
timestamp_difference_microseconds: u8_to_unsigned_be!(buf[8..11] -> u32),
wnd_size: u8_to_unsigned_be!(buf[12..15] -> u32),
seq_nr: u8_to_unsigned_be!(buf[16..17] -> u16),
ack_nr: u8_to_unsigned_be!(buf[18..19] -> u16),
}
}
}
impl fmt::Show for UtpPacketHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(type: {}, version: {}, extension: {}, \
connection_id: {}, timestamp_microseconds: {}, \
timestamp_difference_microseconds: {}, wnd_size: {}, \
seq_nr: {}, ack_nr: {})",
self.get_type(),
Int::from_be(self.get_version()),
Int::from_be(self.extension),
Int::from_be(self.connection_id),
Int::from_be(self.timestamp_microseconds),
Int::from_be(self.timestamp_difference_microseconds),
Int::from_be(self.wnd_size),
Int::from_be(self.seq_nr),
Int::from_be(self.ack_nr),
)
}
}
#[allow(dead_code)]
struct UtpPacket {
header: UtpPacketHeader,
payload: Vec<u8>,
}
impl UtpPacket {
/// Construct a new, empty packet.
fn new() -> UtpPacket {
UtpPacket {
header: UtpPacketHeader {
type_ver: ST_DATA as u8 << 4 | 1,
extension: 0,
connection_id: 0,
timestamp_microseconds: 0,
timestamp_difference_microseconds: 0,
wnd_size: 0,
seq_nr: 0,
ack_nr: 0,
},
payload: Vec::new(),
}
}
fn set_type(&mut self, t: UtpPacketType) {
self.header.set_type(t);
}
// TODO: Read up on pointers and ownership
fn get_type(&self) -> UtpPacketType {
self.header.get_type()
}
fn wnd_size(&self, new_wnd_size: u32) -> UtpPacket {
UtpPacket {
header: self.header.wnd_size(new_wnd_size),
payload: self.payload.clone(),
}
}
/// TODO: return slice
fn bytes(&self) -> Vec<u8> {
let mut buf = Vec::with_capacity(self.len());
buf.push_all(self.header.bytes());
buf.push_all(self.payload.as_slice());
return buf;
}
fn len(&self) -> uint {
self.header.len() + self.payload.len()
}
/// Decode a byte slice and construct the equivalent UtpPacket.
///
/// Note that this method makes no attempt to guess the payload size, saving
/// all except the initial 20 bytes corresponding to the header as payload.
/// It's the caller's responsability to use an appropriately sized buffer.
fn decode(buf: &[u8]) -> UtpPacket {
UtpPacket {
header: UtpPacketHeader::decode(buf),
payload: Vec::from_slice(buf.slice(HEADER_SIZE, buf.len()))
}
}
}
impl Clone for UtpPacket {
fn clone(&self) -> UtpPacket {
UtpPacket {
header: self.header,
payload: self.payload.clone(),
}
}
}
impl fmt::Show for UtpPacket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.header.fmt(f)
}
}
#[allow(non_camel_case_types)]
#[deriving(PartialEq,Eq,Show)]
enum UtpSocketState {
CS_NEW,
CS_CONNECTED,
CS_SYN_SENT,
CS_FIN_RECEIVED,
CS_FIN_SENT,
CS_RST_RECEIVED,
CS_CLOSED,
CS_EOF,
}
/// A uTP (Micro Transport Protocol) socket.
pub struct UtpSocket {
socket: UdpSocket,
connected_to: SocketAddr,
sender_connection_id: u16,
receiver_connection_id: u16,
seq_nr: u16,
ack_nr: u16,
state: UtpSocketState,
// Received but not acknowledged packets
incoming_buffer: Vec<UtpPacket>,
// Sent but not yet acknowledged packets
send_buffer: Vec<UtpPacket>,
duplicate_ack_count: uint,
last_acked: u16,
}
macro_rules! reply_with_ack(
($header:expr, $src:expr) => ({
let resp = self.prepare_reply($header, ST_STATE).wnd_size(BUF_SIZE as u32);
try!(self.socket.send_to(resp.bytes().as_slice(), $src));
debug!("sent {}", resp.header);
})
)
impl UtpSocket {
/// Create a UTP socket from the given address.
#[unstable]
pub fn bind(addr: SocketAddr) -> IoResult<UtpSocket> {
let skt = UdpSocket::bind(addr);
let connection_id = random::<u16>();
match skt {
Ok(x) => Ok(UtpSocket {
socket: x,
connected_to: addr,
receiver_connection_id: connection_id,
sender_connection_id: connection_id + 1,
seq_nr: 1,
ack_nr: 0,
state: CS_NEW,
incoming_buffer: Vec::new(),
send_buffer: Vec::new(),
duplicate_ack_count: 0,
last_acked: 0,
}),
Err(e) => Err(e)
}
}
/// Open a uTP connection to a remote host by hostname or IP address.
#[unstable]
pub fn connect(mut self, other: SocketAddr) -> IoResult<UtpSocket> {
use std::io::{IoError, ConnectionFailed};
self.connected_to = other;
assert_eq!(self.receiver_connection_id + 1, self.sender_connection_id);
let mut packet = UtpPacket::new();
packet.set_type(ST_SYN);
packet.header.connection_id = self.receiver_connection_id.to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.timestamp_microseconds = now_microseconds().to_be();
// Send packet
let dst = self.connected_to;
let _result = self.socket.send_to(packet.bytes().as_slice(), dst);
debug!("sent {}", packet.header);
self.state = CS_SYN_SENT;
let mut buf = [0, ..BUF_SIZE];
let (_len, addr) = match self.socket.recv_from(buf) {
Ok(v) => v,
Err(e) => fail!("{}", e),
};
assert!(_len == HEADER_SIZE);
assert!(addr == self.connected_to);
let packet = UtpPacket::decode(buf.slice_to(_len));
if packet.get_type() != ST_STATE {
return Err(IoError {
kind: ConnectionFailed,
desc: "The remote peer sent an incorrect reply",
detail: None,
});
}
self.ack_nr = Int::from_be(packet.header.seq_nr);
debug!("connected to: {} {}", addr, self.connected_to);
self.state = CS_CONNECTED;
self.seq_nr += 1;
Ok(self)
}
/// Gracefully close connection to peer.
///
/// This method allows both peers to receive all packets still in
/// flight.
#[unstable]
pub fn close(&mut self) -> IoResult<()> {
let mut packet = UtpPacket::new();
packet.header.connection_id = self.sender_connection_id.to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.ack_nr = self.ack_nr.to_be();
packet.header.timestamp_microseconds = now_microseconds().to_be();
packet.set_type(ST_FIN);
// Send FIN
let dst = self.connected_to;
try!(self.socket.send_to(packet.bytes().as_slice(), dst));
debug!("sent {}", packet);
self.state = CS_FIN_SENT;
// Receive JAKE
let mut buf = [0u8, ..BUF_SIZE];
try!(self.socket.recv_from(buf));
let resp = UtpPacket::decode(buf);
debug!("received {}", resp);
assert!(resp.get_type() == ST_STATE);
// Set socket state
self.state = CS_CLOSED;
Ok(())
}
/// Receive data from socket.
///
/// On success, returns the number of bytes read and the sender's address.
/// Returns CS_EOF after receiving a FIN packet when the remaining
/// inflight packets are consumed. Subsequent calls return CS_CLOSED.
#[unstable]
pub fn recv_from(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> {
use std::cmp::min;
use std::io::{IoError, EndOfFile, Closed};
if self.state == CS_EOF {
self.state = CS_CLOSED;
return Err(IoError {
kind: EndOfFile,
desc: "End of file reached",
detail: None,
});
}
if self.state == CS_CLOSED {
return Err(IoError {
kind: Closed,
desc: "Connection closed",
detail: None,
});
}
let mut b = [0, ..BUF_SIZE + HEADER_SIZE];
let (read, src) = try!(self.socket.recv_from(b));
let packet = UtpPacket::decode(b.slice_to(read));
debug!("received {}", packet.header);
if packet.get_type() == ST_RESET {
use std::io::{IoError, ConnectionReset};
return Err(IoError {
kind: ConnectionReset,
desc: "Remote host aborted connection (incorrect connection id)",
detail: None,
});
}
// TODO: move this to handle_packet?
if packet.get_type() == ST_SYN {
self.connected_to = src;
}
// Check if the packet is out of order (that is, it's sequence number
// does not immediately follow the ACK number)
if packet.get_type() != ST_STATE && packet.get_type() != ST_SYN
&& self.ack_nr + 1 < Int::from_be(packet.header.seq_nr) {
debug!("current ack_nr ({}) is behind received packet seq_nr ({})",
self.ack_nr, Int::from_be(packet.header.seq_nr));
// Add to buffer but do not acknowledge until all packets between
// ack_nr + 1 and curr_packet.seq_nr - 1 are received
self.insert_into_buffer(packet);
return Ok((0, self.connected_to));
}
match self.handle_packet(packet.clone()) {
Some(pkt) => {
let pkt = pkt.wnd_size(BUF_SIZE as u32);
try!(self.socket.send_to(pkt.bytes().as_slice(), src));
debug!("sent {}", pkt.header);
},
None => {}
};
for i in range(0u, min(buf.len(), read - HEADER_SIZE)) {
buf[i] = b[i + HEADER_SIZE];
}
// Empty buffer if possible
let mut read = read - HEADER_SIZE;
while !self.incoming_buffer.is_empty() &&
self.ack_nr + 1 == Int::from_be(self.incoming_buffer[0].header.seq_nr) {
let packet = self.incoming_buffer.shift().unwrap();
debug!("Removing packet from buffer: {}", packet);
for i in range(0u, packet.payload.len()) {
buf[read] = packet.payload[i];
read += 1;
}
self.ack_nr = Int::from_be(packet.header.seq_nr);
}
Ok((read, src))
}
#[allow(missing_doc)]
#[deprecated = "renamed to `recv_from`"]
pub fn recvfrom(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> {
self.recv_from(buf)
}
fn prepare_reply(&self, original: &UtpPacketHeader, t: UtpPacketType) -> UtpPacket {
let mut resp = UtpPacket::new();
resp.set_type(t);
let self_t_micro: u32 = now_microseconds();
let other_t_micro: u32 = Int::from_be(original.timestamp_microseconds);
resp.header.timestamp_microseconds = self_t_micro.to_be();
resp.header.timestamp_difference_microseconds = (self_t_micro - other_t_micro).to_be();
resp.header.connection_id = self.sender_connection_id.to_be();
resp.header.seq_nr = self.seq_nr.to_be();
resp.header.ack_nr = self.ack_nr.to_be();
resp
}
/// Send data on socket to the given address. Returns nothing on success.
//
// # Implementation details
//
// This method inserts packets into the send buffer and keeps trying to
// advance the send window until an ACK corresponding to the last packet is
// received.
//
// Note that the buffer passed to `send_to` might exceed the maximum packet
// size, which will result in the data being split over several packets.
#[unstable]
pub fn send_to(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> {
use std::io::{IoError, Closed};
if self.state == CS_CLOSED {
return Err(IoError {
kind: Closed,
desc: "Connection closed",
detail: None,
});
}
for chunk in buf.chunks(BUF_SIZE) {
let mut packet = UtpPacket::new();
packet.set_type(ST_DATA);
packet.payload = Vec::from_slice(chunk);
packet.header.timestamp_microseconds = now_microseconds().to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.ack_nr = self.ack_nr.to_be();
packet.header.connection_id = self.sender_connection_id.to_be();
debug!("Pushing packet into send buffer: {}", packet);
self.send_buffer.push(packet.clone());
try!(self.socket.send_to(packet.bytes().as_slice(), dst));
self.seq_nr += 1;
}
// Consume acknowledgements until latest packet
let mut buf = [0, ..BUF_SIZE];
while self.last_acked < self.seq_nr - 1 {
try!(self.recv_from(buf));
}
Ok(())
}
#[allow(missing_doc)]
#[deprecated = "renamed to `send_to`"]
pub fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> {
self.send_to(buf, dst)
}
/// Handle incoming packet, updating socket state accordingly.
///
/// Returns appropriate reply packet, if needed.
fn handle_packet(&mut self, packet: UtpPacket) -> Option<UtpPacket> {
// Reset connection if connection id doesn't match and this isn't a SYN
if packet.get_type() != ST_SYN &&
!(Int::from_be(packet.header.connection_id) == self.sender_connection_id ||
Int::from_be(packet.header.connection_id) == self.receiver_connection_id) {
return Some(self.prepare_reply(&packet.header, ST_RESET));
}
// Acknowledge only if the packet strictly follows the previous one
if self.ack_nr + 1 == Int::from_be(packet.header.seq_nr) {
self.ack_nr = Int::from_be(packet.header.seq_nr);
}
match packet.header.get_type() {
ST_SYN => { // Respond with an ACK and populate own fields
// Update socket information for new connections
self.ack_nr = Int::from_be(packet.header.seq_nr);
self.seq_nr = random();
self.receiver_connection_id = Int::from_be(packet.header.connection_id) + 1;
self.sender_connection_id = Int::from_be(packet.header.connection_id);
self.state = CS_CONNECTED;
Some(self.prepare_reply(&packet.header, ST_STATE))
}
ST_DATA => Some(self.prepare_reply(&packet.header, ST_STATE)),
ST_FIN => {
self.state = CS_FIN_RECEIVED;
// TODO: check if no packets are missing
// If all packets are received
self.state = CS_EOF;
Some(self.prepare_reply(&packet.header, ST_STATE))
}
ST_STATE => {
if packet.header.ack_nr == Int::from_be(self.last_acked) {
self.duplicate_ack_count += 1;
} else {
self.last_acked = Int::from_be(packet.header.ack_nr);
self.duplicate_ack_count = 1;
}
// Three duplicate ACKs, must resend packets since `ack_nr + 1`
if self.duplicate_ack_count == 3 {
assert!(!self.send_buffer.is_empty());
match self.send_buffer.iter().position(|pkt| Int::from_be(pkt.header.seq_nr) == Int::from_be(packet.header.ack_nr) + 1) {
None => fail!("Received request to resend packets since {} but none was found in send buffer!", Int::from_be(packet.header.ack_nr) + 1),
Some(position) => {
for _ in range(0u, position + 1) {
let to_send = self.send_buffer.shift().unwrap();
debug!("resending: {}", to_send);
self.socket.send_to(to_send.bytes().as_slice(), self.connected_to);
}
},
}
}
// Success, advance send window
while !self.send_buffer.is_empty() &&
Int::from_be(self.send_buffer[0].header.seq_nr) <= self.last_acked {
self.send_buffer.shift();
}
None
},
ST_RESET => /* TODO */ None,
}
}
/// Insert a packet into the socket's buffer.
///
/// The packet is inserted in such a way that the buffer is
/// ordered ascendingly by their sequence number. This allows
/// storing packets that were received out of order.
fn insert_into_buffer(&mut self, packet: UtpPacket) {
let mut i = 0;
for pkt in self.incoming_buffer.iter() {
if Int::from_be(pkt.header.seq_nr) >= Int::from_be(packet.header.seq_nr) {
break;
}
i += 1;
}
self.incoming_buffer.insert(i, packet);
}
}
impl Clone for UtpSocket {
fn clone(&self) -> UtpSocket {
UtpSocket {
socket: self.socket.clone(),
connected_to: self.connected_to,
receiver_connection_id: self.receiver_connection_id,
sender_connection_id: self.sender_connection_id,
seq_nr: self.seq_nr,
ack_nr: self.ack_nr,
state: self.state,
incoming_buffer: Vec::new(),
send_buffer: Vec::new(),
duplicate_ack_count: 0,
last_acked: 0,
}
}
}
/// Stream interface for UtpSocket.
pub struct UtpStream {
socket: UtpSocket,
}
impl UtpStream {
/// Create a uTP stream listening on the given address.
#[unstable]
pub fn bind(addr: SocketAddr) -> IoResult<UtpStream> {
let socket = UtpSocket::bind(addr);
match socket {
Ok(s) => Ok(UtpStream { socket: s }),
Err(e) => Err(e),
}
}
/// Open a uTP connection to a remote host by hostname or IP address.
#[unstable]
pub fn connect(dst: SocketAddr) -> IoResult<UtpStream> {
use std::io::net::ip::Ipv4Addr;
// Port 0 means the operating system gets to choose it
let my_addr = SocketAddr { ip: Ipv4Addr(127,0,0,1), port: 0 };
let socket = match UtpSocket::bind(my_addr) {
Ok(s) => s,
Err(e) => return Err(e),
};
match socket.connect(dst) {
Ok(socket) => Ok(UtpStream { socket: socket }),
Err(e) => Err(e),
}
}
/// Gracefully close connection to peer.
///
/// This method allows both peers to receive all packets still in
/// flight.
#[unstable]
pub fn close(&mut self) -> IoResult<()> {
self.socket.close()
}
}
impl Reader for UtpStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
match self.socket.recv_from(buf) {
Ok((read, _src)) => Ok(read),
Err(e) => Err(e),
}
}
}
impl Writer for UtpStream {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
let dst = self.socket.connected_to;
for chunk in buf.chunks(BUF_SIZE) {
try!(self.socket.send_to(chunk, dst));
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::{UtpSocket, UtpPacket};
use super::{ST_STATE, ST_FIN, ST_DATA, ST_RESET, ST_SYN};
use super::{BUF_SIZE, HEADER_SIZE};
use super::{CS_CONNECTED, CS_NEW, CS_CLOSED, CS_EOF};
use std::rand::random;
macro_rules! expect_eq(
($left:expr, $right:expr) => (
if !($left == $right) {
fail!("expected {}, got {}", $right, $left);
}
);
)
macro_rules! iotry(
($e:expr) => (match $e { Ok(e) => e, Err(e) => fail!("{}", e) })
)
#[test]
fn test_packet_decode() {
let buf = [0x21, 0x00, 0x41, 0xa8, 0x99, 0x2f, 0xd0, 0x2a, 0x9f, 0x4a,
0x26, 0x21, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf2, 0x6c, 0x79];
let pkt = UtpPacket::decode(buf);
assert_eq!(pkt.header.get_version(), 1);
assert_eq!(pkt.header.get_type(), ST_STATE);
assert_eq!(pkt.header.extension, 0);
assert_eq!(Int::from_be(pkt.header.connection_id), 16808);
assert_eq!(Int::from_be(pkt.header.timestamp_microseconds), 2570047530);
assert_eq!(Int::from_be(pkt.header.timestamp_difference_microseconds), 2672436769);
assert_eq!(Int::from_be(pkt.header.wnd_size), ::std::num::pow(2u32, 20));
assert_eq!(Int::from_be(pkt.header.seq_nr), 15090);
assert_eq!(Int::from_be(pkt.header.ack_nr), 27769);
assert_eq!(pkt.len(), buf.len());
assert!(pkt.payload.is_empty());
}
#[test]
fn test_packet_encode() {
let payload = Vec::from_slice("Hello\n".as_bytes());
let (timestamp, timestamp_diff): (u32, u32) = (15270793, 1707040186);
let (connection_id, seq_nr, ack_nr): (u16, u16, u16) = (16808, 15090, 17096);
let window_size: u32 = 1048576;
let mut pkt = UtpPacket::new();
pkt.set_type(ST_DATA);
pkt.header.timestamp_microseconds = timestamp.to_be();
pkt.header.timestamp_difference_microseconds = timestamp_diff.to_be();
pkt.header.connection_id = connection_id.to_be();
pkt.header.seq_nr = seq_nr.to_be();
pkt.header.ack_nr = ack_nr.to_be();
pkt.header.wnd_size = window_size.to_be();
pkt.payload = payload.clone();
let header = pkt.header;
let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89,
0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00,
0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c,
0x6f, 0x0a];
assert_eq!(pkt.len(), buf.len());
assert_eq!(pkt.len(), HEADER_SIZE + payload.len());
assert_eq!(pkt.payload, payload);
assert_eq!(header.get_version(), 1);
assert_eq!(header.get_type(), ST_DATA);
assert_eq!(header.extension, 0);
assert_eq!(Int::from_be(header.connection_id), connection_id);
assert_eq!(Int::from_be(header.seq_nr), seq_nr);
assert_eq!(Int::from_be(header.ack_nr), ack_nr);
assert_eq!(Int::from_be(header.wnd_size), window_size);
assert_eq!(Int::from_be(header.timestamp_microseconds), timestamp);
assert_eq!(Int::from_be(header.timestamp_difference_microseconds), timestamp_diff);
assert_eq!(pkt.bytes(), Vec::from_slice(buf));
}
#[test]
fn test_reversible() {
let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89,
0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00,
0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c,
0x6f, 0x0a];
assert_eq!(UtpPacket::decode(buf).bytes().as_slice(), buf);
}
#[test]
fn test_socket_ipv4() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
// Check proper difference in client's send connection id and receive connection id
assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
assert_eq!(client.connected_to, serverAddr);
drop(client);
});
let mut buf = [0u8, ..BUF_SIZE];
match server.recv_from(buf) {
e => println!("{}", e),
}
// After establishing a new connection, the server's ids are a mirror of the client's.
assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1);
assert_eq!(server.connected_to, clientAddr);
assert!(server.state == CS_CONNECTED);
drop(server);
}
#[test]
fn test_recvfrom_on_closed_socket() {
use std::io::test::next_test_ip4;
use std::io::{Closed, EndOfFile};
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
spawn(proc() {
let mut client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
assert_eq!(client.close(), Ok(()));
drop(client);
});
// Make the server listen for incoming connections
let mut buf = [0u8, ..BUF_SIZE];
let _resp = server.recv_from(buf);
assert!(server.state == CS_CONNECTED);
// Closing the connection is fine
match server.recv_from(buf) {
Err(e) => fail!("{}", e),
_ => {},
}
expect_eq!(server.state, CS_EOF);
// Trying to listen on the socket after closing it raises an
// EOF error
match server.recv_from(buf) {
Err(e) => expect_eq!(e.kind, EndOfFile),
v => fail!("expected {}, got {}", EndOfFile, v),
}
expect_eq!(server.state, CS_CLOSED);
// Trying again raises a Closed error
match server.recv_from(buf) {
Err(e) => expect_eq!(e.kind, Closed),
v => fail!("expected {}, got {}", Closed, v),
}
drop(server);
}
#[test]
fn test_sendto_on_closed_socket() {
use std::io::test::next_test_ip4;
use std::io::Closed;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let mut buf = [0u8, ..BUF_SIZE];
let mut client = client;
iotry!(client.recv_from(buf));
});
// Make the server listen for incoming connections
let mut buf = [0u8, ..BUF_SIZE];
let (_read, _src) = iotry!(server.recv_from(buf));
assert!(server.state == CS_CONNECTED);
iotry!(server.close());
expect_eq!(server.state, CS_CLOSED);
// Trying to send to the socket after closing it raises an
// error
match server.send_to(buf, clientAddr) {
Err(e) => expect_eq!(e.kind, Closed),
v => fail!("expected {}, got {}", Closed, v),
}
drop(server);
}
#[test]
fn test_acks_on_socket() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let (tx, rx) = channel();
let client = iotry!(UtpSocket::bind(clientAddr));
let server = iotry!(UtpSocket::bind(serverAddr));
spawn(proc() {
// Make the server listen for incoming connections
let mut server = server;
let mut buf = [0u8, ..BUF_SIZE];
let _resp = server.recv_from(buf);
tx.send(server.seq_nr);
// Close the connection
iotry!(server.recv_from(buf));
drop(server);
});
let mut client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let sender_seq_nr = rx.recv();
let ack_nr = client.ack_nr;
assert!(ack_nr != 0);
assert!(ack_nr == sender_seq_nr);
assert_eq!(client.close(), Ok(()));
// The reply to both connect (SYN) and close (FIN) should be
// STATE packets, which don't increase the sequence number
// and, hence, the receiver's acknowledgement number.
assert!(client.ack_nr == ack_nr);
drop(client);
}
#[test]
fn test_handle_packet() {
use std::io::test::next_test_ip4;
//fn test_connection_setup() {
let initial_connection_id: u16 = random();
let sender_connection_id = initial_connection_id + 1;
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let sent = packet.header;
// Do we have a response?
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
// Is is of the correct type?
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// Same connection id on both ends during connection establishment
assert!(response.header.connection_id == sent.connection_id);
// Response acknowledges SYN
assert!(response.header.ack_nr == sent.seq_nr);
// No payload?
assert!(response.payload.is_empty());
//}
// ---------------------------------
// fn test_connection_usage() {
let old_packet = packet;
let old_response = response;
let mut packet = UtpPacket::new();
packet.set_type(ST_DATA);
packet.header.connection_id = sender_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let sent = packet.header;
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// Sender (i.e., who initated connection and sent SYN) has connection id
// equal to initial connection id + 1
// Receiver (i.e., who accepted connection) has connection id equal to
// initial connection id
assert!(Int::from_be(response.header.connection_id) == initial_connection_id);
assert!(Int::from_be(response.header.connection_id) == Int::from_be(sent.connection_id) - 1);
// Previous packets should be ack'ed
assert!(Int::from_be(response.header.ack_nr) == Int::from_be(sent.seq_nr));
// Responses with no payload should not increase the sequence number
assert!(response.payload.is_empty());
assert!(Int::from_be(response.header.seq_nr) == Int::from_be(old_response.header.seq_nr));
// }
//fn test_connection_teardown() {
let old_packet = packet;
let old_response = response;
let mut packet = UtpPacket::new();
packet.set_type(ST_FIN);
packet.header.connection_id = sender_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let sent = packet.header;
let response = socket.handle_packet(packet);
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// FIN packets have no payload but the sequence number shouldn't increase
assert!(Int::from_be(sent.seq_nr) == Int::from_be(old_packet.header.seq_nr) + 1);
// Nor should the ACK packet's sequence number
assert!(response.header.seq_nr == old_response.header.seq_nr);
// FIN should be acknowledged
assert!(response.header.ack_nr == sent.seq_nr);
//}
}
#[test]
fn test_response_to_keepalive_ack() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
let old_packet = packet;
let old_response = response;
// Now, send a keepalive packet
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let response = socket.handle_packet(packet.clone());
assert!(response.is_none());
// Send a second keepalive packet, identical to the previous one
let response = socket.handle_packet(packet.clone());
assert!(response.is_none());
}
#[test]
fn test_response_to_wrong_connection_id() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
assert!(response.unwrap().get_type() == ST_STATE);
// Now, disrupt connection with a packet with an incorrect connection id
let new_connection_id = initial_connection_id.to_le();
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.connection_id = new_connection_id;
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_RESET);
assert!(response.header.ack_nr == packet.header.seq_nr);
}
#[test]
fn test_utp_stream() {
use super::UtpStream;
use std::io::test::next_test_ip4;
let serverAddr = next_test_ip4();
let mut server = iotry!(UtpStream::bind(serverAddr));
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.close());
});
iotry!(server.read_to_end());
}
#[test]
fn test_utp_stream_small_data() {
use super::UtpStream;
use std::io::test::next_test_ip4;
// Fits in a packet
static len: uint = 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
let read = iotry!(server.read_to_end());
assert!(!read.is_empty());
expect_eq!(read.len(), data.len());
expect_eq!(read, data);
}
#[test]
fn test_utp_stream_large_data() {
use super::UtpStream;
use std::io::test::next_test_ip4;
// Has to be sent over several packets
static len: uint = 1024 * 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
let read = iotry!(server.read_to_end());
assert!(!read.is_empty());
expect_eq!(read.len(), data.len());
expect_eq!(read, data);
}
#[test]
fn test_utp_stream_successive_reads() {
use super::UtpStream;
use std::io::test::next_test_ip4;
use std::io::Closed;
static len: uint = 1024;
let data: Vec<u8> = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
iotry!(server.read_to_end());
let mut buf = [0u8, ..4096];
match server.read(buf) {
Err(ref e) if e.kind == Closed => {},
_ => fail!("should have failed with Closed"),
};
}
#[test]
fn test_unordered_packets() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
let old_packet = packet;
let old_response = response;
let mut window: Vec<UtpPacket> = Vec::new();
// Now, send a keepalive packet
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
packet.payload = vec!(1,2,3);
window.push(packet);
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 2).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
packet.payload = vec!(4,5,6);
window.push(packet);
// Send packets in reverse order
let response = socket.handle_packet(window[1].clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.header.ack_nr != window[1].header.seq_nr);
let response = socket.handle_packet(window[0].clone());
assert!(response.is_some());
}
#[test]
fn test_socket_unordered_packets() {
use std::io::test::next_test_ip4;
use super::UtpStream;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
// Check proper difference in client's send connection id and receive connection id
assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let mut s = client.socket;
let mut window: Vec<UtpPacket> = Vec::new();
let mut i = 0;
for data in Vec::from_fn(12, |idx| idx as u8 + 1).as_slice().chunks(3) {
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = client.sender_connection_id.to_be();
packet.header.seq_nr = (client.seq_nr + i).to_be();
packet.header.ack_nr = client.ack_nr.to_be();
packet.payload = Vec::from_slice(data);
window.push(packet);
i += 1;
}
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_FIN);
packet.header.connection_id = client.sender_connection_id.to_be();
packet.header.seq_nr = (client.seq_nr + 2).to_be();
packet.header.ack_nr = client.ack_nr.to_be();
window.push(packet);
iotry!(s.send_to(window[3].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[2].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[1].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[0].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[4].bytes().as_slice(), serverAddr));
for _ in range(0u, 2) {
let mut buf = [0, ..BUF_SIZE];
iotry!(s.recv_from(buf));
}
});
let mut buf = [0u8, ..BUF_SIZE];
match server.recv_from(buf) {
e => println!("{}", e),
}
// After establishing a new connection, the server's ids are a mirror of the client's.
assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1);
assert!(server.state == CS_CONNECTED);
let mut stream = UtpStream { socket: server };
let expected: Vec<u8> = Vec::from_fn(12, |idx| idx as u8 + 1);
match stream.read_to_end() {
Ok(data) => {
expect_eq!(data.len(), expected.len());
expect_eq!(data, expected);
},
Err(e) => fail!("{}", e),
}
}
#[test]
fn test_socket_should_not_buffer_syn_packets() {
use std::io::test::next_test_ip4;
use std::io::net::udp::UdpSocket;
use super::UtpSocket;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let server = iotry!(UtpSocket::bind(serverAddr));
let client = iotry!(UdpSocket::bind(clientAddr));
let test_syn_raw = [0x41, 0x00, 0x41, 0xa7, 0x00, 0x00, 0x00,
0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x3a,
0xf1, 0x00, 0x00];
let test_syn_pkt = UtpPacket::decode(test_syn_raw);
let seq_nr = Int::from_be(test_syn_pkt.header.seq_nr);
spawn(proc() {
let mut client = client;
iotry!(client.send_to(test_syn_raw, serverAddr));
client.set_timeout(Some(10));
let mut buf = [0, ..BUF_SIZE];
let packet = match client.recv_from(buf) {
Ok((nread, _src)) => UtpPacket::decode(buf.slice_to(nread)),
Err(e) => fail!("{}", e),
};
expect_eq!(packet.header.ack_nr, seq_nr.to_be());
drop(client);
});
let mut server = server;
let mut buf = [0, ..20];
iotry!(server.recv_from(buf));
assert!(server.ack_nr != 0);
expect_eq!(server.ack_nr, seq_nr);
assert!(server.incoming_buffer.is_empty());
}
#[test]
fn test_response_to_triple_ack() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let mut server = iotry!(UtpSocket::bind(serverAddr));
let client = iotry!(UtpSocket::bind(clientAddr));
// Fits in a packet
static len: uint = 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
let d = data.clone();
expect_eq!(len, data.len());
spawn(proc() {
let mut client = iotry!(client.connect(serverAddr));
iotry!(client.send_to(d.as_slice(), serverAddr));
iotry!(client.close());
});
let mut buf = [0, ..BUF_SIZE];
// Expect SYN
iotry!(server.recv_from(buf));
// Receive data
let mut data_packet;
match server.socket.recv_from(buf) {
Ok((read, _src)) => {
data_packet = UtpPacket::decode(buf.slice_to(read));
assert!(data_packet.get_type() == ST_DATA);
expect_eq!(data_packet.payload, data);
assert_eq!(data_packet.payload.len(), data.len());
},
Err(e) => fail!("{}", e),
}
let data_packet = data_packet;
// Send triple ACK
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.seq_nr = server.seq_nr.to_be();
packet.header.ack_nr = (Int::from_be(data_packet.header.seq_nr) - 1).to_be();
packet.header.connection_id = server.sender_connection_id.to_be();
for _ in range(0u, 3) {
iotry!(server.socket.send_to(packet.bytes().as_slice(), clientAddr));
}
// Receive data again and check that it's the same we reported as missing
match server.socket.recv_from(buf) {
Ok((0, _)) => fail!("Received 0 bytes from socket"),
Ok((read, _src)) => {
let packet = UtpPacket::decode(buf.slice_to(read));
assert_eq!(packet.get_type(), ST_DATA);
assert_eq!(Int::from_be(packet.header.seq_nr), Int::from_be(data_packet.header.seq_nr));
assert!(packet.payload == data_packet.payload);
let response = server.handle_packet(packet).unwrap();
iotry!(server.socket.send_to(response.bytes().as_slice(), server.connected_to));
},
Err(e) => fail!("{}", e),
}
// Receive close
iotry!(server.recv_from(buf));
}
}
Delegate splitting buffer into packets to UtpSocket.
//! Implementation of a Micro Transport Protocol library.
//!
//! http://www.bittorrent.org/beps/bep_0029.html
//!
//! TODO
//! ----
//!
//! - congestion control
//! - proper connection closing
//! - automatically send FIN (or should it be RST?) on `drop` if not already closed
//! - setters and getters that hide header field endianness conversion
//! - SACK extension
//! - handle packet loss
#![crate_name = "utp"]
#![license = "MIT/ASL2"]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![feature(macro_rules, phase)]
#![deny(missing_doc)]
extern crate time;
#[phase(plugin, link)] extern crate log;
use std::io::net::udp::UdpSocket;
use std::io::net::ip::SocketAddr;
use std::io::IoResult;
use std::mem::transmute;
use std::rand::random;
use std::fmt;
static HEADER_SIZE: uint = 20;
// For simplicity's sake, let us assume no packet will ever exceed the
// Ethernet maximum transfer unit of 1500 bytes.
static BUF_SIZE: uint = 1500;
macro_rules! u8_to_unsigned_be(
($src:ident[$start:expr..$end:expr] -> $t:ty) => ({
let mut result: $t = 0;
for i in range(0u, $end-$start+1).rev() {
result = result | $src[$start+i] as $t << i*8;
}
result
})
)
/// Return current time in microseconds since the UNIX epoch.
fn now_microseconds() -> u32 {
let t = time::get_time();
(t.sec * 1_000_000) as u32 + (t.nsec/1000) as u32
}
#[allow(dead_code,non_camel_case_types)]
#[deriving(PartialEq,Eq,Show)]
enum UtpPacketType {
ST_DATA = 0,
ST_FIN = 1,
ST_STATE = 2,
ST_RESET = 3,
ST_SYN = 4,
}
#[allow(dead_code)]
#[deriving(Clone)]
#[packed]
struct UtpPacketHeader {
type_ver: u8, // type: u4, ver: u4
extension: u8,
connection_id: u16,
timestamp_microseconds: u32,
timestamp_difference_microseconds: u32,
wnd_size: u32,
seq_nr: u16,
ack_nr: u16,
}
impl UtpPacketHeader {
/// Set type of packet to the specified type.
fn set_type(&mut self, t: UtpPacketType) {
let version = 0x0F & self.type_ver;
self.type_ver = t as u8 << 4 | version;
}
fn get_type(&self) -> UtpPacketType {
let t: UtpPacketType = unsafe { transmute(self.type_ver >> 4) };
t
}
fn get_version(&self) -> u8 {
self.type_ver & 0x0F
}
fn wnd_size(&self, new_wnd_size: u32) -> UtpPacketHeader {
UtpPacketHeader {
wnd_size: new_wnd_size.to_be(),
.. self.clone()
}
}
/// Return packet header as a slice of bytes.
fn bytes(&self) -> &[u8] {
let buf: &[u8, ..HEADER_SIZE] = unsafe { transmute(self) };
return buf.as_slice();
}
fn len(&self) -> uint {
return HEADER_SIZE;
}
/// Read byte buffer and return corresponding packet header.
/// It assumes the fields are in network (big-endian) byte order,
/// preserving it.
fn decode(buf: &[u8]) -> UtpPacketHeader {
UtpPacketHeader {
type_ver: buf[0],
extension: buf[1],
connection_id: u8_to_unsigned_be!(buf[2..3] -> u16),
timestamp_microseconds: u8_to_unsigned_be!(buf[4..7] -> u32),
timestamp_difference_microseconds: u8_to_unsigned_be!(buf[8..11] -> u32),
wnd_size: u8_to_unsigned_be!(buf[12..15] -> u32),
seq_nr: u8_to_unsigned_be!(buf[16..17] -> u16),
ack_nr: u8_to_unsigned_be!(buf[18..19] -> u16),
}
}
}
impl fmt::Show for UtpPacketHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(type: {}, version: {}, extension: {}, \
connection_id: {}, timestamp_microseconds: {}, \
timestamp_difference_microseconds: {}, wnd_size: {}, \
seq_nr: {}, ack_nr: {})",
self.get_type(),
Int::from_be(self.get_version()),
Int::from_be(self.extension),
Int::from_be(self.connection_id),
Int::from_be(self.timestamp_microseconds),
Int::from_be(self.timestamp_difference_microseconds),
Int::from_be(self.wnd_size),
Int::from_be(self.seq_nr),
Int::from_be(self.ack_nr),
)
}
}
#[allow(dead_code)]
struct UtpPacket {
header: UtpPacketHeader,
payload: Vec<u8>,
}
impl UtpPacket {
/// Construct a new, empty packet.
fn new() -> UtpPacket {
UtpPacket {
header: UtpPacketHeader {
type_ver: ST_DATA as u8 << 4 | 1,
extension: 0,
connection_id: 0,
timestamp_microseconds: 0,
timestamp_difference_microseconds: 0,
wnd_size: 0,
seq_nr: 0,
ack_nr: 0,
},
payload: Vec::new(),
}
}
fn set_type(&mut self, t: UtpPacketType) {
self.header.set_type(t);
}
// TODO: Read up on pointers and ownership
fn get_type(&self) -> UtpPacketType {
self.header.get_type()
}
fn wnd_size(&self, new_wnd_size: u32) -> UtpPacket {
UtpPacket {
header: self.header.wnd_size(new_wnd_size),
payload: self.payload.clone(),
}
}
/// TODO: return slice
fn bytes(&self) -> Vec<u8> {
let mut buf = Vec::with_capacity(self.len());
buf.push_all(self.header.bytes());
buf.push_all(self.payload.as_slice());
return buf;
}
fn len(&self) -> uint {
self.header.len() + self.payload.len()
}
/// Decode a byte slice and construct the equivalent UtpPacket.
///
/// Note that this method makes no attempt to guess the payload size, saving
/// all except the initial 20 bytes corresponding to the header as payload.
/// It's the caller's responsability to use an appropriately sized buffer.
fn decode(buf: &[u8]) -> UtpPacket {
UtpPacket {
header: UtpPacketHeader::decode(buf),
payload: Vec::from_slice(buf.slice(HEADER_SIZE, buf.len()))
}
}
}
impl Clone for UtpPacket {
fn clone(&self) -> UtpPacket {
UtpPacket {
header: self.header,
payload: self.payload.clone(),
}
}
}
impl fmt::Show for UtpPacket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.header.fmt(f)
}
}
#[allow(non_camel_case_types)]
#[deriving(PartialEq,Eq,Show)]
enum UtpSocketState {
CS_NEW,
CS_CONNECTED,
CS_SYN_SENT,
CS_FIN_RECEIVED,
CS_FIN_SENT,
CS_RST_RECEIVED,
CS_CLOSED,
CS_EOF,
}
/// A uTP (Micro Transport Protocol) socket.
pub struct UtpSocket {
socket: UdpSocket,
connected_to: SocketAddr,
sender_connection_id: u16,
receiver_connection_id: u16,
seq_nr: u16,
ack_nr: u16,
state: UtpSocketState,
// Received but not acknowledged packets
incoming_buffer: Vec<UtpPacket>,
// Sent but not yet acknowledged packets
send_buffer: Vec<UtpPacket>,
duplicate_ack_count: uint,
last_acked: u16,
}
macro_rules! reply_with_ack(
($header:expr, $src:expr) => ({
let resp = self.prepare_reply($header, ST_STATE).wnd_size(BUF_SIZE as u32);
try!(self.socket.send_to(resp.bytes().as_slice(), $src));
debug!("sent {}", resp.header);
})
)
impl UtpSocket {
/// Create a UTP socket from the given address.
#[unstable]
pub fn bind(addr: SocketAddr) -> IoResult<UtpSocket> {
let skt = UdpSocket::bind(addr);
let connection_id = random::<u16>();
match skt {
Ok(x) => Ok(UtpSocket {
socket: x,
connected_to: addr,
receiver_connection_id: connection_id,
sender_connection_id: connection_id + 1,
seq_nr: 1,
ack_nr: 0,
state: CS_NEW,
incoming_buffer: Vec::new(),
send_buffer: Vec::new(),
duplicate_ack_count: 0,
last_acked: 0,
}),
Err(e) => Err(e)
}
}
/// Open a uTP connection to a remote host by hostname or IP address.
#[unstable]
pub fn connect(mut self, other: SocketAddr) -> IoResult<UtpSocket> {
use std::io::{IoError, ConnectionFailed};
self.connected_to = other;
assert_eq!(self.receiver_connection_id + 1, self.sender_connection_id);
let mut packet = UtpPacket::new();
packet.set_type(ST_SYN);
packet.header.connection_id = self.receiver_connection_id.to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.timestamp_microseconds = now_microseconds().to_be();
// Send packet
let dst = self.connected_to;
let _result = self.socket.send_to(packet.bytes().as_slice(), dst);
debug!("sent {}", packet.header);
self.state = CS_SYN_SENT;
let mut buf = [0, ..BUF_SIZE];
let (_len, addr) = match self.socket.recv_from(buf) {
Ok(v) => v,
Err(e) => fail!("{}", e),
};
assert!(_len == HEADER_SIZE);
assert!(addr == self.connected_to);
let packet = UtpPacket::decode(buf.slice_to(_len));
if packet.get_type() != ST_STATE {
return Err(IoError {
kind: ConnectionFailed,
desc: "The remote peer sent an incorrect reply",
detail: None,
});
}
self.ack_nr = Int::from_be(packet.header.seq_nr);
debug!("connected to: {} {}", addr, self.connected_to);
self.state = CS_CONNECTED;
self.seq_nr += 1;
Ok(self)
}
/// Gracefully close connection to peer.
///
/// This method allows both peers to receive all packets still in
/// flight.
#[unstable]
pub fn close(&mut self) -> IoResult<()> {
let mut packet = UtpPacket::new();
packet.header.connection_id = self.sender_connection_id.to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.ack_nr = self.ack_nr.to_be();
packet.header.timestamp_microseconds = now_microseconds().to_be();
packet.set_type(ST_FIN);
// Send FIN
let dst = self.connected_to;
try!(self.socket.send_to(packet.bytes().as_slice(), dst));
debug!("sent {}", packet);
self.state = CS_FIN_SENT;
// Receive JAKE
let mut buf = [0u8, ..BUF_SIZE];
try!(self.socket.recv_from(buf));
let resp = UtpPacket::decode(buf);
debug!("received {}", resp);
assert!(resp.get_type() == ST_STATE);
// Set socket state
self.state = CS_CLOSED;
Ok(())
}
/// Receive data from socket.
///
/// On success, returns the number of bytes read and the sender's address.
/// Returns CS_EOF after receiving a FIN packet when the remaining
/// inflight packets are consumed. Subsequent calls return CS_CLOSED.
#[unstable]
pub fn recv_from(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> {
use std::cmp::min;
use std::io::{IoError, EndOfFile, Closed};
if self.state == CS_EOF {
self.state = CS_CLOSED;
return Err(IoError {
kind: EndOfFile,
desc: "End of file reached",
detail: None,
});
}
if self.state == CS_CLOSED {
return Err(IoError {
kind: Closed,
desc: "Connection closed",
detail: None,
});
}
let mut b = [0, ..BUF_SIZE + HEADER_SIZE];
let (read, src) = try!(self.socket.recv_from(b));
let packet = UtpPacket::decode(b.slice_to(read));
debug!("received {}", packet.header);
if packet.get_type() == ST_RESET {
use std::io::{IoError, ConnectionReset};
return Err(IoError {
kind: ConnectionReset,
desc: "Remote host aborted connection (incorrect connection id)",
detail: None,
});
}
// TODO: move this to handle_packet?
if packet.get_type() == ST_SYN {
self.connected_to = src;
}
// Check if the packet is out of order (that is, it's sequence number
// does not immediately follow the ACK number)
if packet.get_type() != ST_STATE && packet.get_type() != ST_SYN
&& self.ack_nr + 1 < Int::from_be(packet.header.seq_nr) {
debug!("current ack_nr ({}) is behind received packet seq_nr ({})",
self.ack_nr, Int::from_be(packet.header.seq_nr));
// Add to buffer but do not acknowledge until all packets between
// ack_nr + 1 and curr_packet.seq_nr - 1 are received
self.insert_into_buffer(packet);
return Ok((0, self.connected_to));
}
match self.handle_packet(packet.clone()) {
Some(pkt) => {
let pkt = pkt.wnd_size(BUF_SIZE as u32);
try!(self.socket.send_to(pkt.bytes().as_slice(), src));
debug!("sent {}", pkt.header);
},
None => {}
};
for i in range(0u, min(buf.len(), read - HEADER_SIZE)) {
buf[i] = b[i + HEADER_SIZE];
}
// Empty buffer if possible
let mut read = read - HEADER_SIZE;
while !self.incoming_buffer.is_empty() &&
self.ack_nr + 1 == Int::from_be(self.incoming_buffer[0].header.seq_nr) {
let packet = self.incoming_buffer.shift().unwrap();
debug!("Removing packet from buffer: {}", packet);
for i in range(0u, packet.payload.len()) {
buf[read] = packet.payload[i];
read += 1;
}
self.ack_nr = Int::from_be(packet.header.seq_nr);
}
Ok((read, src))
}
#[allow(missing_doc)]
#[deprecated = "renamed to `recv_from`"]
pub fn recvfrom(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> {
self.recv_from(buf)
}
fn prepare_reply(&self, original: &UtpPacketHeader, t: UtpPacketType) -> UtpPacket {
let mut resp = UtpPacket::new();
resp.set_type(t);
let self_t_micro: u32 = now_microseconds();
let other_t_micro: u32 = Int::from_be(original.timestamp_microseconds);
resp.header.timestamp_microseconds = self_t_micro.to_be();
resp.header.timestamp_difference_microseconds = (self_t_micro - other_t_micro).to_be();
resp.header.connection_id = self.sender_connection_id.to_be();
resp.header.seq_nr = self.seq_nr.to_be();
resp.header.ack_nr = self.ack_nr.to_be();
resp
}
/// Send data on socket to the given address. Returns nothing on success.
//
// # Implementation details
//
// This method inserts packets into the send buffer and keeps trying to
// advance the send window until an ACK corresponding to the last packet is
// received.
//
// Note that the buffer passed to `send_to` might exceed the maximum packet
// size, which will result in the data being split over several packets.
#[unstable]
pub fn send_to(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> {
use std::io::{IoError, Closed};
if self.state == CS_CLOSED {
return Err(IoError {
kind: Closed,
desc: "Connection closed",
detail: None,
});
}
for chunk in buf.chunks(BUF_SIZE) {
let mut packet = UtpPacket::new();
packet.set_type(ST_DATA);
packet.payload = Vec::from_slice(chunk);
packet.header.timestamp_microseconds = now_microseconds().to_be();
packet.header.seq_nr = self.seq_nr.to_be();
packet.header.ack_nr = self.ack_nr.to_be();
packet.header.connection_id = self.sender_connection_id.to_be();
debug!("Pushing packet into send buffer: {}", packet);
self.send_buffer.push(packet.clone());
try!(self.socket.send_to(packet.bytes().as_slice(), dst));
self.seq_nr += 1;
}
// Consume acknowledgements until latest packet
let mut buf = [0, ..BUF_SIZE];
while self.last_acked < self.seq_nr - 1 {
try!(self.recv_from(buf));
}
Ok(())
}
#[allow(missing_doc)]
#[deprecated = "renamed to `send_to`"]
pub fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> {
self.send_to(buf, dst)
}
/// Handle incoming packet, updating socket state accordingly.
///
/// Returns appropriate reply packet, if needed.
fn handle_packet(&mut self, packet: UtpPacket) -> Option<UtpPacket> {
// Reset connection if connection id doesn't match and this isn't a SYN
if packet.get_type() != ST_SYN &&
!(Int::from_be(packet.header.connection_id) == self.sender_connection_id ||
Int::from_be(packet.header.connection_id) == self.receiver_connection_id) {
return Some(self.prepare_reply(&packet.header, ST_RESET));
}
// Acknowledge only if the packet strictly follows the previous one
if self.ack_nr + 1 == Int::from_be(packet.header.seq_nr) {
self.ack_nr = Int::from_be(packet.header.seq_nr);
}
match packet.header.get_type() {
ST_SYN => { // Respond with an ACK and populate own fields
// Update socket information for new connections
self.ack_nr = Int::from_be(packet.header.seq_nr);
self.seq_nr = random();
self.receiver_connection_id = Int::from_be(packet.header.connection_id) + 1;
self.sender_connection_id = Int::from_be(packet.header.connection_id);
self.state = CS_CONNECTED;
Some(self.prepare_reply(&packet.header, ST_STATE))
}
ST_DATA => Some(self.prepare_reply(&packet.header, ST_STATE)),
ST_FIN => {
self.state = CS_FIN_RECEIVED;
// TODO: check if no packets are missing
// If all packets are received
self.state = CS_EOF;
Some(self.prepare_reply(&packet.header, ST_STATE))
}
ST_STATE => {
if packet.header.ack_nr == Int::from_be(self.last_acked) {
self.duplicate_ack_count += 1;
} else {
self.last_acked = Int::from_be(packet.header.ack_nr);
self.duplicate_ack_count = 1;
}
// Three duplicate ACKs, must resend packets since `ack_nr + 1`
if self.duplicate_ack_count == 3 {
assert!(!self.send_buffer.is_empty());
match self.send_buffer.iter().position(|pkt| Int::from_be(pkt.header.seq_nr) == Int::from_be(packet.header.ack_nr) + 1) {
None => fail!("Received request to resend packets since {} but none was found in send buffer!", Int::from_be(packet.header.ack_nr) + 1),
Some(position) => {
for _ in range(0u, position + 1) {
let to_send = self.send_buffer.shift().unwrap();
debug!("resending: {}", to_send);
self.socket.send_to(to_send.bytes().as_slice(), self.connected_to);
}
},
}
}
// Success, advance send window
while !self.send_buffer.is_empty() &&
Int::from_be(self.send_buffer[0].header.seq_nr) <= self.last_acked {
self.send_buffer.shift();
}
None
},
ST_RESET => /* TODO */ None,
}
}
/// Insert a packet into the socket's buffer.
///
/// The packet is inserted in such a way that the buffer is
/// ordered ascendingly by their sequence number. This allows
/// storing packets that were received out of order.
fn insert_into_buffer(&mut self, packet: UtpPacket) {
let mut i = 0;
for pkt in self.incoming_buffer.iter() {
if Int::from_be(pkt.header.seq_nr) >= Int::from_be(packet.header.seq_nr) {
break;
}
i += 1;
}
self.incoming_buffer.insert(i, packet);
}
}
impl Clone for UtpSocket {
fn clone(&self) -> UtpSocket {
UtpSocket {
socket: self.socket.clone(),
connected_to: self.connected_to,
receiver_connection_id: self.receiver_connection_id,
sender_connection_id: self.sender_connection_id,
seq_nr: self.seq_nr,
ack_nr: self.ack_nr,
state: self.state,
incoming_buffer: Vec::new(),
send_buffer: Vec::new(),
duplicate_ack_count: 0,
last_acked: 0,
}
}
}
/// Stream interface for UtpSocket.
pub struct UtpStream {
socket: UtpSocket,
}
impl UtpStream {
/// Create a uTP stream listening on the given address.
#[unstable]
pub fn bind(addr: SocketAddr) -> IoResult<UtpStream> {
let socket = UtpSocket::bind(addr);
match socket {
Ok(s) => Ok(UtpStream { socket: s }),
Err(e) => Err(e),
}
}
/// Open a uTP connection to a remote host by hostname or IP address.
#[unstable]
pub fn connect(dst: SocketAddr) -> IoResult<UtpStream> {
use std::io::net::ip::Ipv4Addr;
// Port 0 means the operating system gets to choose it
let my_addr = SocketAddr { ip: Ipv4Addr(127,0,0,1), port: 0 };
let socket = match UtpSocket::bind(my_addr) {
Ok(s) => s,
Err(e) => return Err(e),
};
match socket.connect(dst) {
Ok(socket) => Ok(UtpStream { socket: socket }),
Err(e) => Err(e),
}
}
/// Gracefully close connection to peer.
///
/// This method allows both peers to receive all packets still in
/// flight.
#[unstable]
pub fn close(&mut self) -> IoResult<()> {
self.socket.close()
}
}
impl Reader for UtpStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
match self.socket.recv_from(buf) {
Ok((read, _src)) => Ok(read),
Err(e) => Err(e),
}
}
}
impl Writer for UtpStream {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
let dst = self.socket.connected_to;
self.socket.send_to(buf, dst)
}
}
#[cfg(test)]
mod test {
use super::{UtpSocket, UtpPacket};
use super::{ST_STATE, ST_FIN, ST_DATA, ST_RESET, ST_SYN};
use super::{BUF_SIZE, HEADER_SIZE};
use super::{CS_CONNECTED, CS_NEW, CS_CLOSED, CS_EOF};
use std::rand::random;
macro_rules! expect_eq(
($left:expr, $right:expr) => (
if !($left == $right) {
fail!("expected {}, got {}", $right, $left);
}
);
)
macro_rules! iotry(
($e:expr) => (match $e { Ok(e) => e, Err(e) => fail!("{}", e) })
)
#[test]
fn test_packet_decode() {
let buf = [0x21, 0x00, 0x41, 0xa8, 0x99, 0x2f, 0xd0, 0x2a, 0x9f, 0x4a,
0x26, 0x21, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf2, 0x6c, 0x79];
let pkt = UtpPacket::decode(buf);
assert_eq!(pkt.header.get_version(), 1);
assert_eq!(pkt.header.get_type(), ST_STATE);
assert_eq!(pkt.header.extension, 0);
assert_eq!(Int::from_be(pkt.header.connection_id), 16808);
assert_eq!(Int::from_be(pkt.header.timestamp_microseconds), 2570047530);
assert_eq!(Int::from_be(pkt.header.timestamp_difference_microseconds), 2672436769);
assert_eq!(Int::from_be(pkt.header.wnd_size), ::std::num::pow(2u32, 20));
assert_eq!(Int::from_be(pkt.header.seq_nr), 15090);
assert_eq!(Int::from_be(pkt.header.ack_nr), 27769);
assert_eq!(pkt.len(), buf.len());
assert!(pkt.payload.is_empty());
}
#[test]
fn test_packet_encode() {
let payload = Vec::from_slice("Hello\n".as_bytes());
let (timestamp, timestamp_diff): (u32, u32) = (15270793, 1707040186);
let (connection_id, seq_nr, ack_nr): (u16, u16, u16) = (16808, 15090, 17096);
let window_size: u32 = 1048576;
let mut pkt = UtpPacket::new();
pkt.set_type(ST_DATA);
pkt.header.timestamp_microseconds = timestamp.to_be();
pkt.header.timestamp_difference_microseconds = timestamp_diff.to_be();
pkt.header.connection_id = connection_id.to_be();
pkt.header.seq_nr = seq_nr.to_be();
pkt.header.ack_nr = ack_nr.to_be();
pkt.header.wnd_size = window_size.to_be();
pkt.payload = payload.clone();
let header = pkt.header;
let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89,
0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00,
0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c,
0x6f, 0x0a];
assert_eq!(pkt.len(), buf.len());
assert_eq!(pkt.len(), HEADER_SIZE + payload.len());
assert_eq!(pkt.payload, payload);
assert_eq!(header.get_version(), 1);
assert_eq!(header.get_type(), ST_DATA);
assert_eq!(header.extension, 0);
assert_eq!(Int::from_be(header.connection_id), connection_id);
assert_eq!(Int::from_be(header.seq_nr), seq_nr);
assert_eq!(Int::from_be(header.ack_nr), ack_nr);
assert_eq!(Int::from_be(header.wnd_size), window_size);
assert_eq!(Int::from_be(header.timestamp_microseconds), timestamp);
assert_eq!(Int::from_be(header.timestamp_difference_microseconds), timestamp_diff);
assert_eq!(pkt.bytes(), Vec::from_slice(buf));
}
#[test]
fn test_reversible() {
let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89,
0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00,
0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c,
0x6f, 0x0a];
assert_eq!(UtpPacket::decode(buf).bytes().as_slice(), buf);
}
#[test]
fn test_socket_ipv4() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
// Check proper difference in client's send connection id and receive connection id
assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
assert_eq!(client.connected_to, serverAddr);
drop(client);
});
let mut buf = [0u8, ..BUF_SIZE];
match server.recv_from(buf) {
e => println!("{}", e),
}
// After establishing a new connection, the server's ids are a mirror of the client's.
assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1);
assert_eq!(server.connected_to, clientAddr);
assert!(server.state == CS_CONNECTED);
drop(server);
}
#[test]
fn test_recvfrom_on_closed_socket() {
use std::io::test::next_test_ip4;
use std::io::{Closed, EndOfFile};
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
spawn(proc() {
let mut client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
assert_eq!(client.close(), Ok(()));
drop(client);
});
// Make the server listen for incoming connections
let mut buf = [0u8, ..BUF_SIZE];
let _resp = server.recv_from(buf);
assert!(server.state == CS_CONNECTED);
// Closing the connection is fine
match server.recv_from(buf) {
Err(e) => fail!("{}", e),
_ => {},
}
expect_eq!(server.state, CS_EOF);
// Trying to listen on the socket after closing it raises an
// EOF error
match server.recv_from(buf) {
Err(e) => expect_eq!(e.kind, EndOfFile),
v => fail!("expected {}, got {}", EndOfFile, v),
}
expect_eq!(server.state, CS_CLOSED);
// Trying again raises a Closed error
match server.recv_from(buf) {
Err(e) => expect_eq!(e.kind, Closed),
v => fail!("expected {}, got {}", Closed, v),
}
drop(server);
}
#[test]
fn test_sendto_on_closed_socket() {
use std::io::test::next_test_ip4;
use std::io::Closed;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let mut buf = [0u8, ..BUF_SIZE];
let mut client = client;
iotry!(client.recv_from(buf));
});
// Make the server listen for incoming connections
let mut buf = [0u8, ..BUF_SIZE];
let (_read, _src) = iotry!(server.recv_from(buf));
assert!(server.state == CS_CONNECTED);
iotry!(server.close());
expect_eq!(server.state, CS_CLOSED);
// Trying to send to the socket after closing it raises an
// error
match server.send_to(buf, clientAddr) {
Err(e) => expect_eq!(e.kind, Closed),
v => fail!("expected {}, got {}", Closed, v),
}
drop(server);
}
#[test]
fn test_acks_on_socket() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let (tx, rx) = channel();
let client = iotry!(UtpSocket::bind(clientAddr));
let server = iotry!(UtpSocket::bind(serverAddr));
spawn(proc() {
// Make the server listen for incoming connections
let mut server = server;
let mut buf = [0u8, ..BUF_SIZE];
let _resp = server.recv_from(buf);
tx.send(server.seq_nr);
// Close the connection
iotry!(server.recv_from(buf));
drop(server);
});
let mut client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let sender_seq_nr = rx.recv();
let ack_nr = client.ack_nr;
assert!(ack_nr != 0);
assert!(ack_nr == sender_seq_nr);
assert_eq!(client.close(), Ok(()));
// The reply to both connect (SYN) and close (FIN) should be
// STATE packets, which don't increase the sequence number
// and, hence, the receiver's acknowledgement number.
assert!(client.ack_nr == ack_nr);
drop(client);
}
#[test]
fn test_handle_packet() {
use std::io::test::next_test_ip4;
//fn test_connection_setup() {
let initial_connection_id: u16 = random();
let sender_connection_id = initial_connection_id + 1;
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let sent = packet.header;
// Do we have a response?
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
// Is is of the correct type?
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// Same connection id on both ends during connection establishment
assert!(response.header.connection_id == sent.connection_id);
// Response acknowledges SYN
assert!(response.header.ack_nr == sent.seq_nr);
// No payload?
assert!(response.payload.is_empty());
//}
// ---------------------------------
// fn test_connection_usage() {
let old_packet = packet;
let old_response = response;
let mut packet = UtpPacket::new();
packet.set_type(ST_DATA);
packet.header.connection_id = sender_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let sent = packet.header;
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// Sender (i.e., who initated connection and sent SYN) has connection id
// equal to initial connection id + 1
// Receiver (i.e., who accepted connection) has connection id equal to
// initial connection id
assert!(Int::from_be(response.header.connection_id) == initial_connection_id);
assert!(Int::from_be(response.header.connection_id) == Int::from_be(sent.connection_id) - 1);
// Previous packets should be ack'ed
assert!(Int::from_be(response.header.ack_nr) == Int::from_be(sent.seq_nr));
// Responses with no payload should not increase the sequence number
assert!(response.payload.is_empty());
assert!(Int::from_be(response.header.seq_nr) == Int::from_be(old_response.header.seq_nr));
// }
//fn test_connection_teardown() {
let old_packet = packet;
let old_response = response;
let mut packet = UtpPacket::new();
packet.set_type(ST_FIN);
packet.header.connection_id = sender_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let sent = packet.header;
let response = socket.handle_packet(packet);
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
// FIN packets have no payload but the sequence number shouldn't increase
assert!(Int::from_be(sent.seq_nr) == Int::from_be(old_packet.header.seq_nr) + 1);
// Nor should the ACK packet's sequence number
assert!(response.header.seq_nr == old_response.header.seq_nr);
// FIN should be acknowledged
assert!(response.header.ack_nr == sent.seq_nr);
//}
}
#[test]
fn test_response_to_keepalive_ack() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
let old_packet = packet;
let old_response = response;
// Now, send a keepalive packet
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
let response = socket.handle_packet(packet.clone());
assert!(response.is_none());
// Send a second keepalive packet, identical to the previous one
let response = socket.handle_packet(packet.clone());
assert!(response.is_none());
}
#[test]
fn test_response_to_wrong_connection_id() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
assert!(response.unwrap().get_type() == ST_STATE);
// Now, disrupt connection with a packet with an incorrect connection id
let new_connection_id = initial_connection_id.to_le();
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.connection_id = new_connection_id;
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_RESET);
assert!(response.header.ack_nr == packet.header.seq_nr);
}
#[test]
fn test_utp_stream() {
use super::UtpStream;
use std::io::test::next_test_ip4;
let serverAddr = next_test_ip4();
let mut server = iotry!(UtpStream::bind(serverAddr));
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.close());
});
iotry!(server.read_to_end());
}
#[test]
fn test_utp_stream_small_data() {
use super::UtpStream;
use std::io::test::next_test_ip4;
// Fits in a packet
static len: uint = 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
let read = iotry!(server.read_to_end());
assert!(!read.is_empty());
expect_eq!(read.len(), data.len());
expect_eq!(read, data);
}
#[test]
fn test_utp_stream_large_data() {
use super::UtpStream;
use std::io::test::next_test_ip4;
// Has to be sent over several packets
static len: uint = 1024 * 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
let read = iotry!(server.read_to_end());
assert!(!read.is_empty());
expect_eq!(read.len(), data.len());
expect_eq!(read, data);
}
#[test]
fn test_utp_stream_successive_reads() {
use super::UtpStream;
use std::io::test::next_test_ip4;
use std::io::Closed;
static len: uint = 1024;
let data: Vec<u8> = Vec::from_fn(len, |idx| idx as u8);
expect_eq!(len, data.len());
let d = data.clone();
let serverAddr = next_test_ip4();
let mut server = UtpStream::bind(serverAddr);
spawn(proc() {
let mut client = iotry!(UtpStream::connect(serverAddr));
iotry!(client.write(d.as_slice()));
iotry!(client.close());
});
iotry!(server.read_to_end());
let mut buf = [0u8, ..4096];
match server.read(buf) {
Err(ref e) if e.kind == Closed => {},
_ => fail!("should have failed with Closed"),
};
}
#[test]
fn test_unordered_packets() {
use std::io::test::next_test_ip4;
// Boilerplate test setup
let initial_connection_id: u16 = random();
let serverAddr = next_test_ip4();
let mut socket = iotry!(UtpSocket::bind(serverAddr));
// Establish connection
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_SYN);
packet.header.connection_id = initial_connection_id.to_be();
let response = socket.handle_packet(packet.clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.get_type() == ST_STATE);
let old_packet = packet;
let old_response = response;
let mut window: Vec<UtpPacket> = Vec::new();
// Now, send a keepalive packet
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
packet.payload = vec!(1,2,3);
window.push(packet);
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = initial_connection_id.to_be();
packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 2).to_be();
packet.header.ack_nr = old_response.header.seq_nr;
packet.payload = vec!(4,5,6);
window.push(packet);
// Send packets in reverse order
let response = socket.handle_packet(window[1].clone());
assert!(response.is_some());
let response = response.unwrap();
assert!(response.header.ack_nr != window[1].header.seq_nr);
let response = socket.handle_packet(window[0].clone());
assert!(response.is_some());
}
#[test]
fn test_socket_unordered_packets() {
use std::io::test::next_test_ip4;
use super::UtpStream;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let client = iotry!(UtpSocket::bind(clientAddr));
let mut server = iotry!(UtpSocket::bind(serverAddr));
assert!(server.state == CS_NEW);
assert!(client.state == CS_NEW);
// Check proper difference in client's send connection id and receive connection id
assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1);
spawn(proc() {
let client = iotry!(client.connect(serverAddr));
assert!(client.state == CS_CONNECTED);
let mut s = client.socket;
let mut window: Vec<UtpPacket> = Vec::new();
let mut i = 0;
for data in Vec::from_fn(12, |idx| idx as u8 + 1).as_slice().chunks(3) {
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_DATA);
packet.header.connection_id = client.sender_connection_id.to_be();
packet.header.seq_nr = (client.seq_nr + i).to_be();
packet.header.ack_nr = client.ack_nr.to_be();
packet.payload = Vec::from_slice(data);
window.push(packet);
i += 1;
}
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_FIN);
packet.header.connection_id = client.sender_connection_id.to_be();
packet.header.seq_nr = (client.seq_nr + 2).to_be();
packet.header.ack_nr = client.ack_nr.to_be();
window.push(packet);
iotry!(s.send_to(window[3].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[2].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[1].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[0].bytes().as_slice(), serverAddr));
iotry!(s.send_to(window[4].bytes().as_slice(), serverAddr));
for _ in range(0u, 2) {
let mut buf = [0, ..BUF_SIZE];
iotry!(s.recv_from(buf));
}
});
let mut buf = [0u8, ..BUF_SIZE];
match server.recv_from(buf) {
e => println!("{}", e),
}
// After establishing a new connection, the server's ids are a mirror of the client's.
assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1);
assert!(server.state == CS_CONNECTED);
let mut stream = UtpStream { socket: server };
let expected: Vec<u8> = Vec::from_fn(12, |idx| idx as u8 + 1);
match stream.read_to_end() {
Ok(data) => {
expect_eq!(data.len(), expected.len());
expect_eq!(data, expected);
},
Err(e) => fail!("{}", e),
}
}
#[test]
fn test_socket_should_not_buffer_syn_packets() {
use std::io::test::next_test_ip4;
use std::io::net::udp::UdpSocket;
use super::UtpSocket;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let server = iotry!(UtpSocket::bind(serverAddr));
let client = iotry!(UdpSocket::bind(clientAddr));
let test_syn_raw = [0x41, 0x00, 0x41, 0xa7, 0x00, 0x00, 0x00,
0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x3a,
0xf1, 0x00, 0x00];
let test_syn_pkt = UtpPacket::decode(test_syn_raw);
let seq_nr = Int::from_be(test_syn_pkt.header.seq_nr);
spawn(proc() {
let mut client = client;
iotry!(client.send_to(test_syn_raw, serverAddr));
client.set_timeout(Some(10));
let mut buf = [0, ..BUF_SIZE];
let packet = match client.recv_from(buf) {
Ok((nread, _src)) => UtpPacket::decode(buf.slice_to(nread)),
Err(e) => fail!("{}", e),
};
expect_eq!(packet.header.ack_nr, seq_nr.to_be());
drop(client);
});
let mut server = server;
let mut buf = [0, ..20];
iotry!(server.recv_from(buf));
assert!(server.ack_nr != 0);
expect_eq!(server.ack_nr, seq_nr);
assert!(server.incoming_buffer.is_empty());
}
#[test]
fn test_response_to_triple_ack() {
use std::io::test::next_test_ip4;
let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4());
let mut server = iotry!(UtpSocket::bind(serverAddr));
let client = iotry!(UtpSocket::bind(clientAddr));
// Fits in a packet
static len: uint = 1024;
let data = Vec::from_fn(len, |idx| idx as u8);
let d = data.clone();
expect_eq!(len, data.len());
spawn(proc() {
let mut client = iotry!(client.connect(serverAddr));
iotry!(client.send_to(d.as_slice(), serverAddr));
iotry!(client.close());
});
let mut buf = [0, ..BUF_SIZE];
// Expect SYN
iotry!(server.recv_from(buf));
// Receive data
let mut data_packet;
match server.socket.recv_from(buf) {
Ok((read, _src)) => {
data_packet = UtpPacket::decode(buf.slice_to(read));
assert!(data_packet.get_type() == ST_DATA);
expect_eq!(data_packet.payload, data);
assert_eq!(data_packet.payload.len(), data.len());
},
Err(e) => fail!("{}", e),
}
let data_packet = data_packet;
// Send triple ACK
let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32);
packet.set_type(ST_STATE);
packet.header.seq_nr = server.seq_nr.to_be();
packet.header.ack_nr = (Int::from_be(data_packet.header.seq_nr) - 1).to_be();
packet.header.connection_id = server.sender_connection_id.to_be();
for _ in range(0u, 3) {
iotry!(server.socket.send_to(packet.bytes().as_slice(), clientAddr));
}
// Receive data again and check that it's the same we reported as missing
match server.socket.recv_from(buf) {
Ok((0, _)) => fail!("Received 0 bytes from socket"),
Ok((read, _src)) => {
let packet = UtpPacket::decode(buf.slice_to(read));
assert_eq!(packet.get_type(), ST_DATA);
assert_eq!(Int::from_be(packet.header.seq_nr), Int::from_be(data_packet.header.seq_nr));
assert!(packet.payload == data_packet.payload);
let response = server.handle_packet(packet).unwrap();
iotry!(server.socket.send_to(response.bytes().as_slice(), server.connected_to));
},
Err(e) => fail!("{}", e),
}
// Receive close
iotry!(server.recv_from(buf));
}
}
|
// Filebuffer -- Fast and simple file reading
// Copyright 2016 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
//! This mod contains the platform-specific implementations for Windows based on the winapi crate.
use std::fs;
use std::io;
use std::os;
use std::os::windows::io::AsRawHandle;
use std::ptr;
extern crate winapi;
#[derive(Debug)]
pub struct PlatformData {
// On Windows, the file must be kept open for the lifetime of the mapping.
#[allow(dead_code)] // The field is not dead, the destructor is effectful.
file: fs::File,
mapping_handle: winapi::um::winnt::HANDLE,
}
impl Drop for PlatformData {
fn drop (&mut self) {
if self.mapping_handle != ptr::null_mut() {
let success = unsafe { winapi::um::handleapi::CloseHandle(self.mapping_handle) };
assert!(success != 0);
}
}
}
pub fn map_file(file: fs::File) -> io::Result<(*const u8, usize, PlatformData)> {
let file_handle = file.as_raw_handle();
let length = try!(file.metadata()).len();
if length > usize::max_value() as u64 {
return Err(io::Error::new(io::ErrorKind::Other, "file is larger than address space"));
}
let mut platform_data = PlatformData {
file: file,
mapping_handle: ptr::null_mut(),
};
// Don't try to map anything if the file is empty.
if length == 0 {
return Ok((ptr::null(), 0, platform_data));
}
// Memory-mapping a file on Windows is a two-step process: first we create a file mapping
// object, and then we create a view of that mapping in the virtual address space.
platform_data.mapping_handle = unsafe {
winapi::um::memoryapi::CreateFileMappingW(
file_handle,
ptr::null_mut(), // Use default security policy.
winapi::um::winnt::PAGE_READONLY, // The memory will be read-only.
0, 0, // The mapping size is the size of the file.
ptr::null_mut() // The mapping does not have a name.
)
};
if platform_data.mapping_handle == ptr::null_mut() {
return Err(io::Error::last_os_error());
}
let result = unsafe {
winapi::um::memoryapi::MapViewOfFile(
platform_data.mapping_handle,
winapi::um::memoryapi::FILE_MAP_READ, // The memory mapping will be read-only.
0, 0, // Start offset of the mapping is 0.
length as winapi::shared::basetsd::SIZE_T // Map the entire file.
)
};
if result == ptr::null_mut() {
Err(io::Error::last_os_error())
} else {
Ok((result as *const u8, length as usize, platform_data))
}
}
pub fn unmap_file(buffer: *const u8, _length: usize) {
let success = unsafe {
winapi::um::memoryapi::UnmapViewOfFile(buffer as *mut os::raw::c_void)
};
assert!(success != 0);
}
/// See also `unix::get_resident`.
pub fn get_resident(_buffer: *const u8, _length: usize, residency: &mut [bool]) {
// As far as I am aware, Windows does not expose a way to query whether pages are resident.
// There is no equivalent of `mincore()`. The closest thing is `VirtualQuery()`, but the
// `state` value in the `MEMORY_BASIC_INFORMATION` struct that it fills does not indicate
// whether the page is resident.
// Lie and pretend everything is resident.
for x in residency {
*x = true;
}
}
/// See also `unix::prefetch`.
pub fn prefetch(buffer: *const u8, length: usize) {
let mut entry = winapi::um::memoryapi::WIN32_MEMORY_RANGE_ENTRY {
VirtualAddress: buffer as *mut os::raw::c_void,
NumberOfBytes: length as winapi::shared::basetsd::SIZE_T,
};
unsafe {
let current_process_handle = winapi::um::processthreadsapi::GetCurrentProcess();
winapi::um::memoryapi::PrefetchVirtualMemory(
current_process_handle, // Prefetch for the current process.
1, &mut entry, // An array of length 1 that contains `entry`.
0 // Reserved flag that must be 0.
);
}
// The return value of `PrefetchVirtualMemory` is ignored. MSDN says the function may fail if
// the system is under memory pressure. (It is not entirely clear whether "fail" means
// "returns a nonzero value", but I assume it does.)
}
pub fn get_page_size() -> usize {
// Fill the `SYSTEM_INFO` struct with zeroes. It will be filled by
// `GetSystemInfo` later but Rust requires it to be initialized.
let mut sysinfo = winapi::um::sysinfoapi::SYSTEM_INFO {
wProcessorArchitecture: 0,
wReserved: 0,
dwPageSize: 0,
lpMinimumApplicationAddress: ptr::null_mut(),
lpMaximumApplicationAddress: ptr::null_mut(),
dwActiveProcessorMask: 0,
dwNumberOfProcessors: 0,
dwProcessorType: 0,
dwAllocationGranularity: 0,
wProcessorLevel: 0,
wProcessorRevision: 0
};
unsafe { winapi::um::sysinfoapi::GetSystemInfo(&mut sysinfo); }
sysinfo.dwPageSize as usize
}
Fix more build errors on Windows
This is kind of difficult to debug without access to a Windows machine.
I have to push, wait for CI to fail, fix, ... Hopefully all errors are
resolved now.
// Filebuffer -- Fast and simple file reading
// Copyright 2016 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
//! This mod contains the platform-specific implementations for Windows based on the winapi crate.
use std::fs;
use std::io;
use std::mem;
use std::os;
use std::os::windows::io::AsRawHandle;
use std::ptr;
extern crate winapi;
#[derive(Debug)]
pub struct PlatformData {
// On Windows, the file must be kept open for the lifetime of the mapping.
#[allow(dead_code)] // The field is not dead, the destructor is effectful.
file: fs::File,
mapping_handle: winapi::um::winnt::HANDLE,
}
impl Drop for PlatformData {
fn drop (&mut self) {
if self.mapping_handle != ptr::null_mut() {
let success = unsafe { winapi::um::handleapi::CloseHandle(self.mapping_handle) };
assert!(success != 0);
}
}
}
pub fn map_file(file: fs::File) -> io::Result<(*const u8, usize, PlatformData)> {
let file_handle = file.as_raw_handle();
let length = try!(file.metadata()).len();
if length > usize::max_value() as u64 {
return Err(io::Error::new(io::ErrorKind::Other, "file is larger than address space"));
}
let mut platform_data = PlatformData {
file: file,
mapping_handle: ptr::null_mut(),
};
// Don't try to map anything if the file is empty.
if length == 0 {
return Ok((ptr::null(), 0, platform_data));
}
// Memory-mapping a file on Windows is a two-step process: first we create a file mapping
// object, and then we create a view of that mapping in the virtual address space.
platform_data.mapping_handle = unsafe {
winapi::um::memoryapi::CreateFileMappingW(
file_handle as *mut winapi::ctypes::c_void,
ptr::null_mut(), // Use default security policy.
winapi::um::winnt::PAGE_READONLY, // The memory will be read-only.
0, 0, // The mapping size is the size of the file.
ptr::null_mut() // The mapping does not have a name.
)
};
if platform_data.mapping_handle == ptr::null_mut() {
return Err(io::Error::last_os_error());
}
let result = unsafe {
winapi::um::memoryapi::MapViewOfFile(
platform_data.mapping_handle,
winapi::um::memoryapi::FILE_MAP_READ, // The memory mapping will be read-only.
0, 0, // Start offset of the mapping is 0.
length as winapi::shared::basetsd::SIZE_T // Map the entire file.
)
};
if result == ptr::null_mut() {
Err(io::Error::last_os_error())
} else {
Ok((result as *const u8, length as usize, platform_data))
}
}
pub fn unmap_file(buffer: *const u8, _length: usize) {
let success = unsafe {
winapi::um::memoryapi::UnmapViewOfFile(buffer as *mut winapi::ctypes::c_void)
};
assert!(success != 0);
}
/// See also `unix::get_resident`.
pub fn get_resident(_buffer: *const u8, _length: usize, residency: &mut [bool]) {
// As far as I am aware, Windows does not expose a way to query whether pages are resident.
// There is no equivalent of `mincore()`. The closest thing is `VirtualQuery()`, but the
// `state` value in the `MEMORY_BASIC_INFORMATION` struct that it fills does not indicate
// whether the page is resident.
// Lie and pretend everything is resident.
for x in residency {
*x = true;
}
}
/// See also `unix::prefetch`.
pub fn prefetch(buffer: *const u8, length: usize) {
let mut entry = winapi::um::memoryapi::WIN32_MEMORY_RANGE_ENTRY {
VirtualAddress: buffer as *mut winapi::ctypes::c_void,
NumberOfBytes: length as winapi::shared::basetsd::SIZE_T,
};
unsafe {
let current_process_handle = winapi::um::processthreadsapi::GetCurrentProcess();
winapi::um::memoryapi::PrefetchVirtualMemory(
current_process_handle, // Prefetch for the current process.
1, &mut entry, // An array of length 1 that contains `entry`.
0 // Reserved flag that must be 0.
);
}
// The return value of `PrefetchVirtualMemory` is ignored. MSDN says the function may fail if
// the system is under memory pressure. (It is not entirely clear whether "fail" means
// "returns a nonzero value", but I assume it does.)
}
pub fn get_page_size() -> usize {
// Fill the `SYSTEM_INFO` struct with zeroes. It will be filled by
// `GetSystemInfo` later but Rust requires it to be initialized.
let mut sysinfo: winapi::um::sysinfoapi::SYSTEM_INFO = unsafe { mem::zeroed() };
unsafe { winapi::um::sysinfoapi::GetSystemInfo(&mut sysinfo); }
sysinfo.dwPageSize as usize
}
|
use jsonpath_lib::select;
use lazy_static::lazy_static;
use regex::{Regex, RegexBuilder};
use serde_json::Value;
use std::{env, fmt, fs};
mod cache;
mod config;
mod error;
use cache::Cache;
use config::parse_config;
use error::CkError;
fn main() -> Result<(), String> {
let config = parse_config(env::args().collect());
let mut failed = Vec::new();
let mut cache = Cache::new(&config.doc_dir);
let commands = get_commands(&config.template)
.map_err(|_| format!("Jsondocck failed for {}", &config.template))?;
for command in commands {
if let Err(e) = check_command(command, &mut cache) {
failed.push(e);
}
}
if failed.is_empty() {
Ok(())
} else {
for i in failed {
eprintln!("{}", i);
}
Err(format!("Jsondocck failed for {}", &config.template))
}
}
#[derive(Debug)]
pub struct Command {
negated: bool,
kind: CommandKind,
args: Vec<String>,
lineno: usize,
}
#[derive(Debug)]
pub enum CommandKind {
Has,
Count,
}
impl CommandKind {
fn validate(&self, args: &[String], command_num: usize, lineno: usize) -> bool {
let count = match self {
CommandKind::Has => (1..=3).contains(&args.len()),
CommandKind::Count => 3 == args.len(),
};
if !count {
print_err(&format!("Incorrect number of arguments to `@{}`", self), lineno);
return false;
}
if args[0] == "-" && command_num == 0 {
print_err(&format!("Tried to use the previous path in the first command"), lineno);
return false;
}
if let CommandKind::Count = self {
if args[2].parse::<usize>().is_err() {
print_err(&format!("Third argument to @count must be a valid usize"), lineno);
return false;
}
}
true
}
}
impl fmt::Display for CommandKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let text = match self {
CommandKind::Has => "has",
CommandKind::Count => "count",
};
write!(f, "{}", text)
}
}
lazy_static! {
static ref LINE_PATTERN: Regex = RegexBuilder::new(
r#"
\s(?P<invalid>!?)@(?P<negated>!?)
(?P<cmd>[A-Za-z]+(?:-[A-Za-z]+)*)
(?P<args>.*)$
"#
)
.ignore_whitespace(true)
.unicode(true)
.build()
.unwrap();
}
fn print_err(msg: &str, lineno: usize) {
eprintln!("Invalid command: {} on line {}", msg, lineno)
}
/// Get a list of commands from a file. Does the work of ensuring the commands
/// are syntactically valid.
fn get_commands(template: &str) -> Result<Vec<Command>, ()> {
let mut commands = Vec::new();
let mut errors = false;
let file = fs::read_to_string(template).unwrap();
for (lineno, line) in file.split('\n').enumerate() {
let lineno = lineno + 1;
let cap = match LINE_PATTERN.captures(line) {
Some(c) => c,
None => continue,
};
let negated = cap.name("negated").unwrap().as_str() == "!";
let cmd = cap.name("cmd").unwrap().as_str();
let cmd = match cmd {
"has" => CommandKind::Has,
"count" => CommandKind::Count,
_ => {
print_err(&format!("Unrecognized command name `@{}`", cmd), lineno);
errors = true;
continue;
}
};
if let Some(m) = cap.name("invalid") {
if m.as_str() == "!" {
print_err(
&format!(
"`!@{0}{1}`, (help: try with `@!{1}`)",
if negated { "!" } else { "" },
cmd,
),
lineno,
);
errors = true;
continue;
}
}
let args = cap.name("args").map_or(vec![], |m| shlex::split(m.as_str()).unwrap());
if !cmd.validate(&args, commands.len(), lineno) {
errors = true;
continue;
}
commands.push(Command { negated, kind: cmd, args, lineno })
}
if !errors { Ok(commands) } else { Err(()) }
}
/// Performs the actual work of ensuring a command passes. Generally assumes the command
/// is syntactically valid.
fn check_command(command: Command, cache: &mut Cache) -> Result<(), CkError> {
let result = match command.kind {
CommandKind::Has => {
match command.args.len() {
// @has <path> = file existence
1 => cache.get_file(&command.args[0]).is_ok(),
// @has <path> <jsonpath> = check path exists
2 => {
let val = cache.get_value(&command.args[0])?;
match select(&val, &command.args[1]) {
Ok(results) => !results.is_empty(),
Err(_) => false,
}
}
// @has <path> <jsonpath> <value> = check *any* item matched by path equals value
3 => {
let val = cache.get_value(&command.args[0])?;
match select(&val, &command.args[1]) {
Ok(results) => {
let pat: Value = serde_json::from_str(&command.args[2]).unwrap();
!results.is_empty() && results.into_iter().any(|val| *val == pat)
}
Err(_) => false,
}
}
_ => unreachable!(),
}
}
CommandKind::Count => {
// @count <path> <jsonpath> <count> = Check that the jsonpath matches exactly [count] times
assert_eq!(command.args.len(), 3);
let expected: usize = command.args[2].parse().unwrap();
let val = cache.get_value(&command.args[0])?;
match select(&val, &command.args[1]) {
Ok(results) => results.len() == expected,
Err(_) => false,
}
}
};
if result == command.negated {
if command.negated {
Err(CkError::FailedCheck(
format!(
"`@!{} {}` matched when it shouldn't",
command.kind,
command.args.join(" ")
),
command,
))
} else {
// FIXME: In the future, try 'peeling back' each step, and see at what level the match failed
Err(CkError::FailedCheck(
format!(
"`@{} {}` didn't match when it should",
command.kind,
command.args.join(" ")
),
command,
))
}
} else {
Ok(())
}
}
Rollup merge of #81320 - CraftSpider:jsondoc-errors, r=jyn514
Make bad shlex parsing a pretty error
Closes #81319
Old Output:
<details><summary>Backtrace</summary>
<p>
```
thread 'main' panicked at 'called `Option::unwrap()` on a `None` value', src\too
ls\jsondocck\src\main.rs:152:81
stack backtrace:
0: 0x7ff79a011405 - std::backtrace_rs::backtrace::dbghelp::trace
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\..\..\backtrace\src\backtrace\dbghelp.rs:98
1: 0x7ff79a011405 - std::backtrace_rs::backtrace::trace_unsynchronized
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\..\..\backtrace\src\backtrace\mod.rs:66
2: 0x7ff79a011405 - std::sys_common::backtrace::_print_fmt
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\sys_common\backtrace.rs:67
3: 0x7ff79a011405 - std::sys_common::backtrace::_print::{{impl}}::fmt
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\sys_common\backtrace.rs:46
4: 0x7ff79a026c7b - core::fmt::write
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\core\src\fmt\mod.rs:1078
5: 0x7ff79a00e74d - std::io::Write::write_fmt<std::sys::windows::stdio::S
tderr>
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\io\mod.rs:1519
6: 0x7ff79a01413d - std::sys_common::backtrace::_print
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\sys_common\backtrace.rs:49
7: 0x7ff79a01413d - std::sys_common::backtrace::print
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\sys_common\backtrace.rs:36
8: 0x7ff79a01413d - std::panicking::default_hook::{{closure}}
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\panicking.rs:208
9: 0x7ff79a013c4a - std::panicking::default_hook
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\panicking.rs:225
10: 0x7ff79a014a7e - std::panicking::rust_panic_with_hook
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\panicking.rs:591
11: 0x7ff79a014573 - std::panicking::begin_panic_handler::{{closure}}
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\panicking.rs:495
12: 0x7ff79a011ddf - std::sys_common::backtrace::__rust_end_short_backtrac
e<closure-0,!>
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\sys_common\backtrace.rs:141
13: 0x7ff79a0144f9 - std::panicking::begin_panic_handler
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\panicking.rs:493
14: 0x7ff79a025230 - core::panicking::panic_fmt
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\core\src\panicking.rs:92
15: 0x7ff79a02517c - core::panicking::panic
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\core\src\panicking.rs:50
16: 0x7ff799f5245f - indexmap::map::core::raw::<impl indexmap::map::core::
IndexMapCore<K,V>>::get_index_of::had34e726f99bd999
17: 0x7ff799f48fea - std::sys_common::backtrace::__rust_begin_short_backtr
ace::h1ac92efa44350e74
18: 0x7ff799f41015 - std::rt::lang_start::{{closure}}::hdfe733a6a1ad9a18
19: 0x7ff79a014c34 - core::ops::function::impls::{{impl}}::call_once
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\library\core\src\ops\function.rs:280
20: 0x7ff79a014c34 - std::panicking::try::do_call
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\panicking.rs:379
21: 0x7ff79a014c34 - std::panicking::try
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\panicking.rs:343
22: 0x7ff79a014c34 - std::panic::catch_unwind
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\panic.rs:396
23: 0x7ff79a014c34 - std::rt::lang_start_internal
at /rustc/05b6023675d77979637b04a350c85903fbf5925
7\/library\std\src\rt.rs:51
24: 0x7ff799f536a7 - main
25: 0x7ff79a02d788 - invoke_main
at d:\A01\_work\6\s\src\vctools\crt\vcstartup\src
\startup\exe_common.inl:78
26: 0x7ff79a02d788 - __scrt_common_main_seh
at d:\A01\_work\6\s\src\vctools\crt\vcstartup\src
\startup\exe_common.inl:288
27: 0x7ffe6bf47034 - BaseThreadInitThunk
28: 0x7ffe6c89d241 - RtlUserThreadStart
```
</p>
</details>
New Output:
```
Invalid command: Invalid arguments to shlex::split: ` - "$foo` on line 26
```
I've hit this a couple times, makes debugging a little nicer.
use jsonpath_lib::select;
use lazy_static::lazy_static;
use regex::{Regex, RegexBuilder};
use serde_json::Value;
use std::{env, fmt, fs};
mod cache;
mod config;
mod error;
use cache::Cache;
use config::parse_config;
use error::CkError;
fn main() -> Result<(), String> {
let config = parse_config(env::args().collect());
let mut failed = Vec::new();
let mut cache = Cache::new(&config.doc_dir);
let commands = get_commands(&config.template)
.map_err(|_| format!("Jsondocck failed for {}", &config.template))?;
for command in commands {
if let Err(e) = check_command(command, &mut cache) {
failed.push(e);
}
}
if failed.is_empty() {
Ok(())
} else {
for i in failed {
eprintln!("{}", i);
}
Err(format!("Jsondocck failed for {}", &config.template))
}
}
#[derive(Debug)]
pub struct Command {
negated: bool,
kind: CommandKind,
args: Vec<String>,
lineno: usize,
}
#[derive(Debug)]
pub enum CommandKind {
Has,
Count,
}
impl CommandKind {
fn validate(&self, args: &[String], command_num: usize, lineno: usize) -> bool {
let count = match self {
CommandKind::Has => (1..=3).contains(&args.len()),
CommandKind::Count => 3 == args.len(),
};
if !count {
print_err(&format!("Incorrect number of arguments to `@{}`", self), lineno);
return false;
}
if args[0] == "-" && command_num == 0 {
print_err(&format!("Tried to use the previous path in the first command"), lineno);
return false;
}
if let CommandKind::Count = self {
if args[2].parse::<usize>().is_err() {
print_err(&format!("Third argument to @count must be a valid usize"), lineno);
return false;
}
}
true
}
}
impl fmt::Display for CommandKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let text = match self {
CommandKind::Has => "has",
CommandKind::Count => "count",
};
write!(f, "{}", text)
}
}
lazy_static! {
static ref LINE_PATTERN: Regex = RegexBuilder::new(
r#"
\s(?P<invalid>!?)@(?P<negated>!?)
(?P<cmd>[A-Za-z]+(?:-[A-Za-z]+)*)
(?P<args>.*)$
"#
)
.ignore_whitespace(true)
.unicode(true)
.build()
.unwrap();
}
fn print_err(msg: &str, lineno: usize) {
eprintln!("Invalid command: {} on line {}", msg, lineno)
}
/// Get a list of commands from a file. Does the work of ensuring the commands
/// are syntactically valid.
fn get_commands(template: &str) -> Result<Vec<Command>, ()> {
let mut commands = Vec::new();
let mut errors = false;
let file = fs::read_to_string(template).unwrap();
for (lineno, line) in file.split('\n').enumerate() {
let lineno = lineno + 1;
let cap = match LINE_PATTERN.captures(line) {
Some(c) => c,
None => continue,
};
let negated = cap.name("negated").unwrap().as_str() == "!";
let cmd = cap.name("cmd").unwrap().as_str();
let cmd = match cmd {
"has" => CommandKind::Has,
"count" => CommandKind::Count,
_ => {
print_err(&format!("Unrecognized command name `@{}`", cmd), lineno);
errors = true;
continue;
}
};
if let Some(m) = cap.name("invalid") {
if m.as_str() == "!" {
print_err(
&format!(
"`!@{0}{1}`, (help: try with `@!{1}`)",
if negated { "!" } else { "" },
cmd,
),
lineno,
);
errors = true;
continue;
}
}
let args = cap.name("args").map_or(Some(vec![]), |m| shlex::split(m.as_str()));
let args = match args {
Some(args) => args,
None => {
print_err(
&format!(
"Invalid arguments to shlex::split: `{}`",
cap.name("args").unwrap().as_str()
),
lineno,
);
errors = true;
continue;
}
};
if !cmd.validate(&args, commands.len(), lineno) {
errors = true;
continue;
}
commands.push(Command { negated, kind: cmd, args, lineno })
}
if !errors { Ok(commands) } else { Err(()) }
}
/// Performs the actual work of ensuring a command passes. Generally assumes the command
/// is syntactically valid.
fn check_command(command: Command, cache: &mut Cache) -> Result<(), CkError> {
let result = match command.kind {
CommandKind::Has => {
match command.args.len() {
// @has <path> = file existence
1 => cache.get_file(&command.args[0]).is_ok(),
// @has <path> <jsonpath> = check path exists
2 => {
let val = cache.get_value(&command.args[0])?;
match select(&val, &command.args[1]) {
Ok(results) => !results.is_empty(),
Err(_) => false,
}
}
// @has <path> <jsonpath> <value> = check *any* item matched by path equals value
3 => {
let val = cache.get_value(&command.args[0])?;
match select(&val, &command.args[1]) {
Ok(results) => {
let pat: Value = serde_json::from_str(&command.args[2]).unwrap();
!results.is_empty() && results.into_iter().any(|val| *val == pat)
}
Err(_) => false,
}
}
_ => unreachable!(),
}
}
CommandKind::Count => {
// @count <path> <jsonpath> <count> = Check that the jsonpath matches exactly [count] times
assert_eq!(command.args.len(), 3);
let expected: usize = command.args[2].parse().unwrap();
let val = cache.get_value(&command.args[0])?;
match select(&val, &command.args[1]) {
Ok(results) => results.len() == expected,
Err(_) => false,
}
}
};
if result == command.negated {
if command.negated {
Err(CkError::FailedCheck(
format!(
"`@!{} {}` matched when it shouldn't",
command.kind,
command.args.join(" ")
),
command,
))
} else {
// FIXME: In the future, try 'peeling back' each step, and see at what level the match failed
Err(CkError::FailedCheck(
format!(
"`@{} {}` didn't match when it should",
command.kind,
command.args.join(" ")
),
command,
))
}
} else {
Ok(())
}
}
|
//! ESMTP features
use crate::transport::smtp::{
authentication::Mechanism, error::Error, response::Response, util::XText,
};
use std::{
collections::HashSet,
fmt::{self, Display, Formatter},
net::{Ipv4Addr, Ipv6Addr},
result::Result,
};
/// Default client id
const DEFAULT_DOMAIN_CLIENT_ID: &str = "localhost";
/// Client identifier, the parameter to `EHLO`
#[derive(PartialEq, Eq, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum ClientId {
/// A fully-qualified domain name
Domain(String),
/// An IPv4 address
Ipv4(Ipv4Addr),
/// An IPv6 address
Ipv6(Ipv6Addr),
}
impl Display for ClientId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
ClientId::Domain(ref value) => f.write_str(value),
ClientId::Ipv4(ref value) => write!(f, "{}", value),
ClientId::Ipv6(ref value) => write!(f, "{}", value),
}
}
}
impl ClientId {
/// Creates a new `ClientId` from a fully qualified domain name
pub fn new(domain: String) -> ClientId {
ClientId::Domain(domain)
}
/// Defines a `ClientId` with the current hostname, of `localhost` if hostname could not be
/// found
#[cfg(feature = "hostname")]
pub fn hostname() -> ClientId {
ClientId::Domain(
hostname::get()
.map_err(|_| ())
.and_then(|s| s.into_string().map_err(|_| ()))
.unwrap_or_else(|_| DEFAULT_DOMAIN_CLIENT_ID.to_string()),
)
}
#[cfg(not(feature = "hostname"))]
pub fn hostname() -> ClientId {
ClientId::Domain(DEFAULT_DOMAIN_CLIENT_ID.to_string())
}
}
/// Supported ESMTP keywords
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Extension {
/// 8BITMIME keyword
///
/// RFC 6152: https://tools.ietf.org/html/rfc6152
EightBitMime,
/// SMTPUTF8 keyword
///
/// RFC 6531: https://tools.ietf.org/html/rfc6531
SmtpUtfEight,
/// STARTTLS keyword
///
/// RFC 2487: https://tools.ietf.org/html/rfc2487
StartTls,
/// AUTH mechanism
Authentication(Mechanism),
}
impl Display for Extension {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
Extension::EightBitMime => f.write_str("8BITMIME"),
Extension::SmtpUtfEight => f.write_str("SMTPUTF8"),
Extension::StartTls => f.write_str("STARTTLS"),
Extension::Authentication(ref mechanism) => write!(f, "AUTH {}", mechanism),
}
}
}
/// Contains information about an SMTP server
#[derive(Clone, Debug, Eq, PartialEq, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ServerInfo {
/// Server name
///
/// The name given in the server banner
pub name: String,
/// ESMTP features supported by the server
///
/// It contains the features supported by the server and known by the `Extension` module.
pub features: HashSet<Extension>,
}
impl Display for ServerInfo {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let features = if self.features.is_empty() {
"no supported features".to_string()
} else {
format!("{:?}", self.features)
};
write!(f, "{} with {}", self.name, features)
}
}
impl ServerInfo {
/// Parses a EHLO response to create a `ServerInfo`
pub fn from_response(response: &Response) -> Result<ServerInfo, Error> {
let name = match response.first_word() {
Some(name) => name,
None => return Err(Error::ResponseParsing("Could not read server name")),
};
let mut features: HashSet<Extension> = HashSet::new();
for line in response.message.as_slice() {
if line.is_empty() {
continue;
}
let mut split = line.split_whitespace();
match split.next().unwrap() {
"8BITMIME" => {
features.insert(Extension::EightBitMime);
}
"SMTPUTF8" => {
features.insert(Extension::SmtpUtfEight);
}
"STARTTLS" => {
features.insert(Extension::StartTls);
}
"AUTH" => {
for mechanism in split {
match mechanism {
"PLAIN" => {
features.insert(Extension::Authentication(Mechanism::Plain));
}
"LOGIN" => {
features.insert(Extension::Authentication(Mechanism::Login));
}
"XOAUTH2" => {
features.insert(Extension::Authentication(Mechanism::Xoauth2));
}
_ => (),
}
}
}
_ => (),
};
}
Ok(ServerInfo {
name: name.to_string(),
features,
})
}
/// Checks if the server supports an ESMTP feature
pub fn supports_feature(&self, keyword: Extension) -> bool {
self.features.contains(&keyword)
}
/// Checks if the server supports an ESMTP feature
pub fn supports_auth_mechanism(&self, mechanism: Mechanism) -> bool {
self.features
.contains(&Extension::Authentication(mechanism))
}
/// Gets a compatible mechanism from list
pub fn get_auth_mechanism(&self, mechanisms: &[Mechanism]) -> Option<Mechanism> {
for mechanism in mechanisms {
if self.supports_auth_mechanism(*mechanism) {
return Some(*mechanism);
}
}
None
}
}
/// A `MAIL FROM` extension parameter
#[derive(PartialEq, Eq, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum MailParameter {
/// `BODY` parameter
Body(MailBodyParameter),
/// `SIZE` parameter
Size(usize),
/// `SMTPUTF8` parameter
SmtpUtfEight,
/// Custom parameter
Other {
/// Parameter keyword
keyword: String,
/// Parameter value
value: Option<String>,
},
}
impl Display for MailParameter {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
MailParameter::Body(ref value) => write!(f, "BODY={}", value),
MailParameter::Size(size) => write!(f, "SIZE={}", size),
MailParameter::SmtpUtfEight => f.write_str("SMTPUTF8"),
MailParameter::Other {
ref keyword,
value: Some(ref value),
} => write!(f, "{}={}", keyword, XText(value)),
MailParameter::Other {
ref keyword,
value: None,
} => f.write_str(keyword),
}
}
}
/// Values for the `BODY` parameter to `MAIL FROM`
#[derive(PartialEq, Eq, Clone, Debug, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum MailBodyParameter {
/// `7BIT`
SevenBit,
/// `8BITMIME`
EightBitMime,
}
impl Display for MailBodyParameter {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
MailBodyParameter::SevenBit => f.write_str("7BIT"),
MailBodyParameter::EightBitMime => f.write_str("8BITMIME"),
}
}
}
/// A `RCPT TO` extension parameter
#[derive(PartialEq, Eq, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum RcptParameter {
/// Custom parameter
Other {
/// Parameter keyword
keyword: String,
/// Parameter value
value: Option<String>,
},
}
impl Display for RcptParameter {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
RcptParameter::Other {
ref keyword,
value: Some(ref value),
} => write!(f, "{}={}", keyword, XText(value)),
RcptParameter::Other {
ref keyword,
value: None,
} => f.write_str(keyword),
}
}
}
#[cfg(test)]
mod test {
use super::{ClientId, Extension, ServerInfo};
use crate::transport::smtp::{
authentication::Mechanism,
response::{Category, Code, Detail, Response, Severity},
};
use std::collections::HashSet;
#[test]
fn test_clientid_fmt() {
assert_eq!(
format!("{}", ClientId::new("test".to_string())),
"test".to_string()
);
}
#[test]
fn test_extension_fmt() {
assert_eq!(
format!("{}", Extension::EightBitMime),
"8BITMIME".to_string()
);
assert_eq!(
format!("{}", Extension::Authentication(Mechanism::Plain)),
"AUTH PLAIN".to_string()
);
}
#[test]
fn test_serverinfo_fmt() {
let mut eightbitmime = HashSet::new();
assert!(eightbitmime.insert(Extension::EightBitMime));
assert_eq!(
format!(
"{}",
ServerInfo {
name: "name".to_string(),
features: eightbitmime.clone(),
}
),
"name with {EightBitMime}".to_string()
);
let empty = HashSet::new();
assert_eq!(
format!(
"{}",
ServerInfo {
name: "name".to_string(),
features: empty,
}
),
"name with no supported features".to_string()
);
let mut plain = HashSet::new();
assert!(plain.insert(Extension::Authentication(Mechanism::Plain)));
assert_eq!(
format!(
"{}",
ServerInfo {
name: "name".to_string(),
features: plain.clone(),
}
),
"name with {Authentication(Plain)}".to_string()
);
}
#[test]
fn test_serverinfo() {
let response = Response::new(
Code::new(
Severity::PositiveCompletion,
Category::Unspecified4,
Detail::One,
),
vec![
"me".to_string(),
"8BITMIME".to_string(),
"SIZE 42".to_string(),
],
);
let mut features = HashSet::new();
assert!(features.insert(Extension::EightBitMime));
let server_info = ServerInfo {
name: "me".to_string(),
features,
};
assert_eq!(ServerInfo::from_response(&response).unwrap(), server_info);
assert!(server_info.supports_feature(Extension::EightBitMime));
assert!(!server_info.supports_feature(Extension::StartTls));
let response2 = Response::new(
Code::new(
Severity::PositiveCompletion,
Category::Unspecified4,
Detail::One,
),
vec![
"me".to_string(),
"AUTH PLAIN CRAM-MD5 XOAUTH2 OTHER".to_string(),
"8BITMIME".to_string(),
"SIZE 42".to_string(),
],
);
let mut features2 = HashSet::new();
assert!(features2.insert(Extension::EightBitMime));
assert!(features2.insert(Extension::Authentication(Mechanism::Plain),));
assert!(features2.insert(Extension::Authentication(Mechanism::Xoauth2),));
let server_info2 = ServerInfo {
name: "me".to_string(),
features: features2,
};
assert_eq!(ServerInfo::from_response(&response2).unwrap(), server_info2);
assert!(server_info2.supports_feature(Extension::EightBitMime));
assert!(server_info2.supports_auth_mechanism(Mechanism::Plain));
assert!(!server_info2.supports_feature(Extension::StartTls));
}
}
chore: simplify ClientId::hostname
//! ESMTP features
use crate::transport::smtp::{
authentication::Mechanism, error::Error, response::Response, util::XText,
};
use std::{
collections::HashSet,
fmt::{self, Display, Formatter},
net::{Ipv4Addr, Ipv6Addr},
result::Result,
};
/// Default client id
const DEFAULT_DOMAIN_CLIENT_ID: &str = "localhost";
/// Client identifier, the parameter to `EHLO`
#[derive(PartialEq, Eq, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum ClientId {
/// A fully-qualified domain name
Domain(String),
/// An IPv4 address
Ipv4(Ipv4Addr),
/// An IPv6 address
Ipv6(Ipv6Addr),
}
impl Display for ClientId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
ClientId::Domain(ref value) => f.write_str(value),
ClientId::Ipv4(ref value) => write!(f, "{}", value),
ClientId::Ipv6(ref value) => write!(f, "{}", value),
}
}
}
impl ClientId {
/// Creates a new `ClientId` from a fully qualified domain name
pub fn new(domain: String) -> ClientId {
ClientId::Domain(domain)
}
/// Defines a `ClientId` with the current hostname, of `localhost` if hostname could not be
/// found
pub fn hostname() -> ClientId {
#[cfg(feature = "hostname")]
return ClientId::Domain(
hostname::get()
.ok()
.and_then(|s| s.into_string().ok())
.unwrap_or_else(|| DEFAULT_DOMAIN_CLIENT_ID.to_string()),
);
#[cfg(not(feature = "hostname"))]
return ClientId::Domain(DEFAULT_DOMAIN_CLIENT_ID.to_string());
}
}
/// Supported ESMTP keywords
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Extension {
/// 8BITMIME keyword
///
/// RFC 6152: https://tools.ietf.org/html/rfc6152
EightBitMime,
/// SMTPUTF8 keyword
///
/// RFC 6531: https://tools.ietf.org/html/rfc6531
SmtpUtfEight,
/// STARTTLS keyword
///
/// RFC 2487: https://tools.ietf.org/html/rfc2487
StartTls,
/// AUTH mechanism
Authentication(Mechanism),
}
impl Display for Extension {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
Extension::EightBitMime => f.write_str("8BITMIME"),
Extension::SmtpUtfEight => f.write_str("SMTPUTF8"),
Extension::StartTls => f.write_str("STARTTLS"),
Extension::Authentication(ref mechanism) => write!(f, "AUTH {}", mechanism),
}
}
}
/// Contains information about an SMTP server
#[derive(Clone, Debug, Eq, PartialEq, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ServerInfo {
/// Server name
///
/// The name given in the server banner
pub name: String,
/// ESMTP features supported by the server
///
/// It contains the features supported by the server and known by the `Extension` module.
pub features: HashSet<Extension>,
}
impl Display for ServerInfo {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let features = if self.features.is_empty() {
"no supported features".to_string()
} else {
format!("{:?}", self.features)
};
write!(f, "{} with {}", self.name, features)
}
}
impl ServerInfo {
/// Parses a EHLO response to create a `ServerInfo`
pub fn from_response(response: &Response) -> Result<ServerInfo, Error> {
let name = match response.first_word() {
Some(name) => name,
None => return Err(Error::ResponseParsing("Could not read server name")),
};
let mut features: HashSet<Extension> = HashSet::new();
for line in response.message.as_slice() {
if line.is_empty() {
continue;
}
let mut split = line.split_whitespace();
match split.next().unwrap() {
"8BITMIME" => {
features.insert(Extension::EightBitMime);
}
"SMTPUTF8" => {
features.insert(Extension::SmtpUtfEight);
}
"STARTTLS" => {
features.insert(Extension::StartTls);
}
"AUTH" => {
for mechanism in split {
match mechanism {
"PLAIN" => {
features.insert(Extension::Authentication(Mechanism::Plain));
}
"LOGIN" => {
features.insert(Extension::Authentication(Mechanism::Login));
}
"XOAUTH2" => {
features.insert(Extension::Authentication(Mechanism::Xoauth2));
}
_ => (),
}
}
}
_ => (),
};
}
Ok(ServerInfo {
name: name.to_string(),
features,
})
}
/// Checks if the server supports an ESMTP feature
pub fn supports_feature(&self, keyword: Extension) -> bool {
self.features.contains(&keyword)
}
/// Checks if the server supports an ESMTP feature
pub fn supports_auth_mechanism(&self, mechanism: Mechanism) -> bool {
self.features
.contains(&Extension::Authentication(mechanism))
}
/// Gets a compatible mechanism from list
pub fn get_auth_mechanism(&self, mechanisms: &[Mechanism]) -> Option<Mechanism> {
for mechanism in mechanisms {
if self.supports_auth_mechanism(*mechanism) {
return Some(*mechanism);
}
}
None
}
}
/// A `MAIL FROM` extension parameter
#[derive(PartialEq, Eq, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum MailParameter {
/// `BODY` parameter
Body(MailBodyParameter),
/// `SIZE` parameter
Size(usize),
/// `SMTPUTF8` parameter
SmtpUtfEight,
/// Custom parameter
Other {
/// Parameter keyword
keyword: String,
/// Parameter value
value: Option<String>,
},
}
impl Display for MailParameter {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
MailParameter::Body(ref value) => write!(f, "BODY={}", value),
MailParameter::Size(size) => write!(f, "SIZE={}", size),
MailParameter::SmtpUtfEight => f.write_str("SMTPUTF8"),
MailParameter::Other {
ref keyword,
value: Some(ref value),
} => write!(f, "{}={}", keyword, XText(value)),
MailParameter::Other {
ref keyword,
value: None,
} => f.write_str(keyword),
}
}
}
/// Values for the `BODY` parameter to `MAIL FROM`
#[derive(PartialEq, Eq, Clone, Debug, Copy)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum MailBodyParameter {
/// `7BIT`
SevenBit,
/// `8BITMIME`
EightBitMime,
}
impl Display for MailBodyParameter {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
MailBodyParameter::SevenBit => f.write_str("7BIT"),
MailBodyParameter::EightBitMime => f.write_str("8BITMIME"),
}
}
}
/// A `RCPT TO` extension parameter
#[derive(PartialEq, Eq, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum RcptParameter {
/// Custom parameter
Other {
/// Parameter keyword
keyword: String,
/// Parameter value
value: Option<String>,
},
}
impl Display for RcptParameter {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
RcptParameter::Other {
ref keyword,
value: Some(ref value),
} => write!(f, "{}={}", keyword, XText(value)),
RcptParameter::Other {
ref keyword,
value: None,
} => f.write_str(keyword),
}
}
}
#[cfg(test)]
mod test {
use super::{ClientId, Extension, ServerInfo};
use crate::transport::smtp::{
authentication::Mechanism,
response::{Category, Code, Detail, Response, Severity},
};
use std::collections::HashSet;
#[test]
fn test_clientid_fmt() {
assert_eq!(
format!("{}", ClientId::new("test".to_string())),
"test".to_string()
);
}
#[test]
fn test_extension_fmt() {
assert_eq!(
format!("{}", Extension::EightBitMime),
"8BITMIME".to_string()
);
assert_eq!(
format!("{}", Extension::Authentication(Mechanism::Plain)),
"AUTH PLAIN".to_string()
);
}
#[test]
fn test_serverinfo_fmt() {
let mut eightbitmime = HashSet::new();
assert!(eightbitmime.insert(Extension::EightBitMime));
assert_eq!(
format!(
"{}",
ServerInfo {
name: "name".to_string(),
features: eightbitmime.clone(),
}
),
"name with {EightBitMime}".to_string()
);
let empty = HashSet::new();
assert_eq!(
format!(
"{}",
ServerInfo {
name: "name".to_string(),
features: empty,
}
),
"name with no supported features".to_string()
);
let mut plain = HashSet::new();
assert!(plain.insert(Extension::Authentication(Mechanism::Plain)));
assert_eq!(
format!(
"{}",
ServerInfo {
name: "name".to_string(),
features: plain.clone(),
}
),
"name with {Authentication(Plain)}".to_string()
);
}
#[test]
fn test_serverinfo() {
let response = Response::new(
Code::new(
Severity::PositiveCompletion,
Category::Unspecified4,
Detail::One,
),
vec![
"me".to_string(),
"8BITMIME".to_string(),
"SIZE 42".to_string(),
],
);
let mut features = HashSet::new();
assert!(features.insert(Extension::EightBitMime));
let server_info = ServerInfo {
name: "me".to_string(),
features,
};
assert_eq!(ServerInfo::from_response(&response).unwrap(), server_info);
assert!(server_info.supports_feature(Extension::EightBitMime));
assert!(!server_info.supports_feature(Extension::StartTls));
let response2 = Response::new(
Code::new(
Severity::PositiveCompletion,
Category::Unspecified4,
Detail::One,
),
vec![
"me".to_string(),
"AUTH PLAIN CRAM-MD5 XOAUTH2 OTHER".to_string(),
"8BITMIME".to_string(),
"SIZE 42".to_string(),
],
);
let mut features2 = HashSet::new();
assert!(features2.insert(Extension::EightBitMime));
assert!(features2.insert(Extension::Authentication(Mechanism::Plain),));
assert!(features2.insert(Extension::Authentication(Mechanism::Xoauth2),));
let server_info2 = ServerInfo {
name: "me".to_string(),
features: features2,
};
assert_eq!(ServerInfo::from_response(&response2).unwrap(), server_info2);
assert!(server_info2.supports_feature(Extension::EightBitMime));
assert!(server_info2.supports_auth_mechanism(Mechanism::Plain));
assert!(!server_info2.supports_feature(Extension::StartTls));
}
}
|
use value::AMQPValue;
use std::collections::BTreeMap;
use std::fmt;
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub enum AMQPType {
Boolean,
ShortShortInt,
ShortShortUInt,
ShortInt,
ShortUInt,
LongInt,
LongUInt,
LongLongInt,
LongLongUInt,
Float,
Double,
DecimalValue,
LongString,
FieldArray,
Timestamp,
FieldTable,
Void,
// TODO: byte array (x)
}
impl AMQPType {
pub fn from_id(id: char) -> Option<AMQPType> {
match id {
't' => Some(AMQPType::Boolean),
'b' => Some(AMQPType::ShortShortInt),
'B' => Some(AMQPType::ShortShortUInt),
's' => Some(AMQPType::ShortInt),
'u' => Some(AMQPType::ShortUInt),
'I' => Some(AMQPType::LongInt),
'i' => Some(AMQPType::LongUInt),
/* RabbitMQ treats both 'l' and 'L' as LongLongInt and ignores LongLongUInt */
'L' => Some(AMQPType::LongLongInt),
'l' => Some(AMQPType::LongLongInt),
'f' => Some(AMQPType::Float),
'd' => Some(AMQPType::Double),
'D' => Some(AMQPType::DecimalValue),
'S' => Some(AMQPType::LongString),
'A' => Some(AMQPType::FieldArray),
'T' => Some(AMQPType::Timestamp),
'F' => Some(AMQPType::FieldTable),
'V' => Some(AMQPType::Void),
_ => None,
}
}
pub fn get_id(&self) -> char {
match *self {
AMQPType::Boolean => 't',
AMQPType::ShortShortInt => 'b',
AMQPType::ShortShortUInt => 'B',
AMQPType::ShortInt => 's',
AMQPType::ShortUInt => 'u',
AMQPType::LongInt => 'I',
AMQPType::LongUInt => 'i',
/* RabbitMQ treats both 'l' and 'L' as LongLongInt and ignores LongLongUInt */
AMQPType::LongLongInt => 'l',
AMQPType::LongLongUInt => 'l',
AMQPType::Float => 'f',
AMQPType::Double => 'd',
AMQPType::DecimalValue => 'D',
AMQPType::LongString => 'S',
AMQPType::FieldArray => 'A',
AMQPType::Timestamp => 'T',
AMQPType::FieldTable => 'F',
AMQPType::Void => 'V',
}
}
pub fn to_string(&self) -> String {
format!("{}", self)
}
}
impl fmt::Display for AMQPType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
pub type Boolean = bool;
pub type ShortShortInt = i8;
pub type ShortShortUInt = u8;
pub type ShortInt = i16;
pub type ShortUInt = u16;
pub type LongInt = i32;
pub type LongUInt = u32;
pub type LongLongInt = i64;
pub type LongLongUInt = u64;
pub type Float = f32;
pub type Double = f64;
pub type LongString = String;
pub type FieldArray = Vec<AMQPValue>;
pub type Timestamp = LongLongUInt;
pub type FieldTable = BTreeMap<LongString, AMQPValue>;
pub type Void = ();
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct DecimalValue {
pub scale: ShortShortUInt,
pub value: LongUInt,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_type_from_id() {
assert_eq!(AMQPType::from_id('T'), Some(AMQPType::Timestamp));
assert_eq!(AMQPType::from_id('S'), Some(AMQPType::LongString));
assert_eq!(AMQPType::from_id('s'), Some(AMQPType::ShortInt));
assert_eq!(AMQPType::from_id('z'), None);
}
#[test]
fn test_type_to_string() {
assert_eq!(AMQPType::Boolean.to_string(), "Boolean");
assert_eq!(AMQPType::Void.to_string(), "Void");
}
}
types: still handle 'U' for types
Signed-off-by: Marc-Antoine Perennou <07f76cf0511c79b361712839686f3cee8c75791c@Perennou.com>
use value::AMQPValue;
use std::collections::BTreeMap;
use std::fmt;
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub enum AMQPType {
Boolean,
ShortShortInt,
ShortShortUInt,
ShortInt,
ShortUInt,
LongInt,
LongUInt,
LongLongInt,
LongLongUInt,
Float,
Double,
DecimalValue,
LongString,
FieldArray,
Timestamp,
FieldTable,
Void,
// TODO: byte array (x)
}
impl AMQPType {
pub fn from_id(id: char) -> Option<AMQPType> {
match id {
't' => Some(AMQPType::Boolean),
'b' => Some(AMQPType::ShortShortInt),
'B' => Some(AMQPType::ShortShortUInt),
/* Specs says 'U', RabbitMQ says 's' (which means ShortString in specs) */
's' => Some(AMQPType::ShortInt),
'U' => Some(AMQPType::ShortInt),
'u' => Some(AMQPType::ShortUInt),
'I' => Some(AMQPType::LongInt),
'i' => Some(AMQPType::LongUInt),
/* RabbitMQ treats both 'l' and 'L' as LongLongInt and ignores LongLongUInt */
'L' => Some(AMQPType::LongLongInt),
'l' => Some(AMQPType::LongLongInt),
'f' => Some(AMQPType::Float),
'd' => Some(AMQPType::Double),
'D' => Some(AMQPType::DecimalValue),
'S' => Some(AMQPType::LongString),
'A' => Some(AMQPType::FieldArray),
'T' => Some(AMQPType::Timestamp),
'F' => Some(AMQPType::FieldTable),
'V' => Some(AMQPType::Void),
_ => None,
}
}
pub fn get_id(&self) -> char {
match *self {
AMQPType::Boolean => 't',
AMQPType::ShortShortInt => 'b',
AMQPType::ShortShortUInt => 'B',
/* Specs says 'U', RabbitMQ says 's' (which means ShortString in specs) */
AMQPType::ShortInt => 's',
AMQPType::ShortUInt => 'u',
AMQPType::LongInt => 'I',
AMQPType::LongUInt => 'i',
/* RabbitMQ treats both 'l' and 'L' as LongLongInt and ignores LongLongUInt */
AMQPType::LongLongInt => 'l',
AMQPType::LongLongUInt => 'l',
AMQPType::Float => 'f',
AMQPType::Double => 'd',
AMQPType::DecimalValue => 'D',
AMQPType::LongString => 'S',
AMQPType::FieldArray => 'A',
AMQPType::Timestamp => 'T',
AMQPType::FieldTable => 'F',
AMQPType::Void => 'V',
}
}
pub fn to_string(&self) -> String {
format!("{}", self)
}
}
impl fmt::Display for AMQPType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
pub type Boolean = bool;
pub type ShortShortInt = i8;
pub type ShortShortUInt = u8;
pub type ShortInt = i16;
pub type ShortUInt = u16;
pub type LongInt = i32;
pub type LongUInt = u32;
pub type LongLongInt = i64;
pub type LongLongUInt = u64;
pub type Float = f32;
pub type Double = f64;
pub type LongString = String;
pub type FieldArray = Vec<AMQPValue>;
pub type Timestamp = LongLongUInt;
pub type FieldTable = BTreeMap<LongString, AMQPValue>;
pub type Void = ();
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct DecimalValue {
pub scale: ShortShortUInt,
pub value: LongUInt,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_type_from_id() {
assert_eq!(AMQPType::from_id('T'), Some(AMQPType::Timestamp));
assert_eq!(AMQPType::from_id('S'), Some(AMQPType::LongString));
assert_eq!(AMQPType::from_id('s'), Some(AMQPType::ShortInt));
assert_eq!(AMQPType::from_id('z'), None);
}
#[test]
fn test_type_to_string() {
assert_eq!(AMQPType::Boolean.to_string(), "Boolean");
assert_eq!(AMQPType::Void.to_string(), "Void");
}
}
|
//! A module for user processes
// So far it just contains some test code
use super::Process;
use super::ready_queue;
use super::super::vga::window::{Window, Color};
use super::super::data_structures::concurrency::{StaticSemaphore};
// useful constants
const ROWS: usize = 25;
const COLS: usize = 80;
// Some test semaphores
static mut current: (usize, usize) = (0,0);
static mut s1: StaticSemaphore = StaticSemaphore::new(1);
static mut s2: StaticSemaphore = StaticSemaphore::new(1);
// Some test routines
pub fn run(this: &Process) -> usize {
let mut w0 = Window::new(COLS, ROWS, (0, 0));
let mut msg = Window::new(60, 4, (1,1));
unsafe { *(0xf00000 as *mut usize) = this.pid; }
w0.set_bg(Color::LightBlue);
w0.paint();
msg.set_cursor((0,0));
msg.set_bg(Color::LightGray);
msg.set_fg(Color::Black);
msg.put_str("<-- If semaphores work correctly, then only this block \
should be red when all loop_procs finish running.\n\nYay :)");
for _ in 0..206*3 {
ready_queue::make_ready(Process::new("loop_proc", super::user::run2));
unsafe { s1.down(); }
// test vm
if unsafe { *(0xf00000 as *mut usize) } != this.pid {
panic!("Oh no! *0xf00000 should be {} but is {}",
this.pid, unsafe { *(0xf00000 as *mut usize) });
}
}
0
}
fn get_prev((r,c): (usize, usize)) -> (usize, usize) {
if r == 0 && c > 0 { // top
(0, c-1)
} else if c == COLS-1 { // right
(r-1, c)
} else if r == ROWS-1 { // bottom
(r, c+1)
} else { // left
(r+1, c)
}
}
fn get_next((r,c): (usize, usize)) -> (usize, usize) {
if r == 0 && c < COLS-1 { // top
(0, c+1)
} else if c == COLS-1 && r < ROWS-1 { // right
(r+1, c)
} else if r == ROWS-1 && c > 0 { // bottom
(r, c-1)
} else { // left
(r-1, c)
}
}
fn run2(this: &Process) -> usize {
unsafe { *(0xf00000 as *mut usize) = this.pid; }
// test vm
if unsafe { *(0xf00000 as *mut usize) } != this.pid {
panic!("Oh no! *0xf00000 should be {} but is {}",
this.pid, unsafe { *(0xf00000 as *mut usize) });
}
unsafe { s2.down(); }
let mut w = Window::new(COLS,ROWS, (0,0));
let me = unsafe { current };
let prev = get_prev(me);
unsafe {
current = get_next(me);
}
printf!("Erase ({},{}) ", prev.0, prev.1);
w.set_bg(Color::LightBlue);
w.set_cursor(prev);
w.put_char(' ');
printf!("Draw ({},{})\n", me.0,me.1);
w.set_bg(Color::Red);
w.set_cursor(me);
w.put_char(' ');
unsafe { s2.up(); }
// test vm
if unsafe { *(0xf00000 as *mut usize) } != this.pid {
panic!("Oh no! *0xf00000 should be {} but is {}",
this.pid, unsafe { *(0xf00000 as *mut usize) });
}
unsafe { s1.up(); }
0
}
Minor change
//! A module for user processes
// So far it just contains some test code
use super::Process;
use super::ready_queue;
use super::super::vga::window::{Window, Color};
use super::super::data_structures::concurrency::{StaticSemaphore};
// useful constants
const ROWS: usize = 25;
const COLS: usize = 80;
// Some test semaphores
static mut current: (usize, usize) = (0,0);
static mut s1: StaticSemaphore = StaticSemaphore::new(1);
static mut s2: StaticSemaphore = StaticSemaphore::new(1);
// Some test routines
pub fn run(this: &Process) -> usize {
let mut w0 = Window::new(COLS, ROWS, (0, 0));
let mut msg = Window::new(60, 4, (1,1));
unsafe { *(0xf00000 as *mut usize) = this.pid; }
w0.set_bg(Color::LightBlue);
w0.paint();
msg.set_cursor((0,0));
msg.set_bg(Color::LightGray);
msg.set_fg(Color::Black);
msg.put_str("<-- If semaphores work correctly, then only this block \
should be red when all loop_procs finish running.");
for _ in 0..206*3 {
ready_queue::make_ready(Process::new("loop_proc", super::user::run2));
unsafe { s1.down(); }
// test vm
if unsafe { *(0xf00000 as *mut usize) } != this.pid {
panic!("Oh no! *0xf00000 should be {} but is {}",
this.pid, unsafe { *(0xf00000 as *mut usize) });
}
}
msg.put_str("\n\nYay :)");
0
}
fn get_prev((r,c): (usize, usize)) -> (usize, usize) {
if r == 0 && c > 0 { // top
(0, c-1)
} else if c == COLS-1 { // right
(r-1, c)
} else if r == ROWS-1 { // bottom
(r, c+1)
} else { // left
(r+1, c)
}
}
fn get_next((r,c): (usize, usize)) -> (usize, usize) {
if r == 0 && c < COLS-1 { // top
(0, c+1)
} else if c == COLS-1 && r < ROWS-1 { // right
(r+1, c)
} else if r == ROWS-1 && c > 0 { // bottom
(r, c-1)
} else { // left
(r-1, c)
}
}
fn run2(this: &Process) -> usize {
unsafe { *(0xf00000 as *mut usize) = this.pid; }
// test vm
if unsafe { *(0xf00000 as *mut usize) } != this.pid {
panic!("Oh no! *0xf00000 should be {} but is {}",
this.pid, unsafe { *(0xf00000 as *mut usize) });
}
unsafe { s2.down(); }
let mut w = Window::new(COLS,ROWS, (0,0));
let me = unsafe { current };
let prev = get_prev(me);
unsafe {
current = get_next(me);
}
printf!("Erase ({},{}) ", prev.0, prev.1);
w.set_bg(Color::LightBlue);
w.set_cursor(prev);
w.put_char(' ');
printf!("Draw ({},{})\n", me.0,me.1);
w.set_bg(Color::Red);
w.set_cursor(me);
w.put_char(' ');
unsafe { s2.up(); }
// test vm
if unsafe { *(0xf00000 as *mut usize) } != this.pid {
panic!("Oh no! *0xf00000 should be {} but is {}",
this.pid, unsafe { *(0xf00000 as *mut usize) });
}
unsafe { s1.up(); }
0
}
|
#![allow(clippy::enum_variant_names, clippy::useless_format, clippy::too_many_arguments)]
use std::collections::VecDeque;
use std::fmt::Write;
use std::path::{Path, PathBuf};
use std::process::{Command, ExitStatus};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
pub use color_eyre;
use color_eyre::eyre::Result;
use colored::*;
use parser::{ErrorMatch, Pattern};
use regex::Regex;
use rustc_stderr::{Level, Message};
use crate::parser::{Comments, Condition};
mod parser;
mod rustc_stderr;
#[cfg(test)]
mod tests;
#[derive(Debug)]
pub struct Config {
/// Arguments passed to the binary that is executed.
pub args: Vec<String>,
/// `None` to run on the host, otherwise a target triple
pub target: Option<String>,
/// Filters applied to stderr output before processing it
pub stderr_filters: Filter,
/// Filters applied to stdout output before processing it
pub stdout_filters: Filter,
/// The folder in which to start searching for .rs files
pub root_dir: PathBuf,
pub mode: Mode,
pub program: PathBuf,
pub output_conflict_handling: OutputConflictHandling,
/// Only run tests with one of these strings in their path/name
pub path_filter: Vec<String>,
}
#[derive(Debug)]
pub enum OutputConflictHandling {
/// The default: emit a diff of the expected/actual output.
Error,
/// Ignore mismatches in the stderr/stdout files.
Ignore,
/// Instead of erroring if the stderr/stdout differs from the expected
/// automatically replace it with the found output (after applying filters).
Bless,
}
pub type Filter = Vec<(Regex, &'static str)>;
pub fn run_tests(config: Config) -> Result<()> {
eprintln!(" Compiler flags: {:?}", config.args);
// Get the triple with which to run the tests
let target = config.target.clone().unwrap_or_else(|| config.get_host());
// A channel for files to process
let (submit, receive) = crossbeam::channel::unbounded();
// Some statistics and failure reports.
let failures = Mutex::new(vec![]);
let succeeded = AtomicUsize::default();
let ignored = AtomicUsize::default();
let filtered = AtomicUsize::default();
crossbeam::scope(|s| -> Result<()> {
// Create a thread that is in charge of walking the directory and submitting jobs.
// It closes the channel when it is done.
s.spawn(|_| {
let mut todo = VecDeque::new();
todo.push_back(config.root_dir.clone());
while let Some(path) = todo.pop_front() {
if path.is_dir() {
// Enqueue everything inside this directory.
// We want it sorted, to have some control over scheduling of slow tests.
let mut entries =
std::fs::read_dir(path).unwrap().collect::<Result<Vec<_>, _>>().unwrap();
entries.sort_by_key(|e| e.file_name());
for entry in entries {
todo.push_back(entry.path());
}
} else if path.extension().map(|ext| ext == "rs").unwrap_or(false) {
// Forward .rs files to the test workers.
submit.send(path).unwrap();
}
}
// There will be no more jobs. This signals the workers to quit.
// (This also ensures `submit` is moved into this closure.)
drop(submit);
});
let mut threads = vec![];
// Create N worker threads that receive files to test.
for _ in 0..std::thread::available_parallelism().unwrap().get() {
threads.push(s.spawn(|_| -> Result<()> {
for path in &receive {
if !config.path_filter.is_empty() {
let path_display = path.display().to_string();
if !config.path_filter.iter().any(|filter| path_display.contains(filter)) {
filtered.fetch_add(1, Ordering::Relaxed);
continue;
}
}
let comments = Comments::parse_file(&path)?;
// Ignore file if only/ignore rules do (not) apply
if !test_file_conditions(&comments, &target, &config) {
ignored.fetch_add(1, Ordering::Relaxed);
eprintln!(
"{} ... {}",
path.display(),
"ignored (in-test comment)".yellow()
);
continue;
}
// Run the test for all revisions
for revision in
comments.revisions.clone().unwrap_or_else(|| vec![String::new()])
{
let (m, errors, stderr) =
run_test(&path, &config, &target, &revision, &comments);
// Using a single `eprintln!` to prevent messages from threads from getting intermingled.
let mut msg = format!("{} ", path.display());
if !revision.is_empty() {
write!(msg, "(revision `{revision}`) ").unwrap();
}
write!(msg, "... ").unwrap();
if errors.is_empty() {
eprintln!("{msg}{}", "ok".green());
succeeded.fetch_add(1, Ordering::Relaxed);
} else {
eprintln!("{msg}{}", "FAILED".red().bold());
failures.lock().unwrap().push((
path.clone(),
m,
revision,
errors,
stderr,
));
}
}
}
Ok(())
}));
}
for thread in threads {
thread.join().unwrap()?;
}
Ok(())
})
.unwrap()?;
// Print all errors in a single thread to show reliable output
let failures = failures.into_inner().unwrap();
let succeeded = succeeded.load(Ordering::Relaxed);
let ignored = ignored.load(Ordering::Relaxed);
let filtered = filtered.load(Ordering::Relaxed);
if !failures.is_empty() {
for (path, miri, revision, errors, stderr) in &failures {
eprintln!();
eprint!("{}", path.display().to_string().underline());
if !revision.is_empty() {
eprint!(" (revision `{}`)", revision);
}
eprint!(" {}", "FAILED".red());
eprintln!();
eprintln!("command: {:?}", miri);
eprintln!();
let mut dump_stderr = true;
for error in errors {
match error {
Error::ExitStatus(mode, exit_status) => eprintln!("{mode:?} got {exit_status}"),
Error::PatternNotFound { pattern, definition_line } => {
match pattern {
Pattern::SubString(s) =>
eprintln!("substring `{s}` {} in stderr output", "not found".red()),
Pattern::Regex(r) =>
eprintln!("`/{r}/` does {} stderr output", "not match".red()),
}
eprintln!(
"expected because of pattern here: {}:{definition_line}",
path.display().to_string().bold()
);
}
Error::NoPatternsFound => {
eprintln!("{}", "no error patterns found in failure test".red());
}
Error::PatternFoundInPassTest =>
eprintln!("{}", "error pattern found in success test".red()),
Error::OutputDiffers { path, actual, expected } => {
if path.extension().unwrap() == "stderr" {
dump_stderr = false;
}
eprintln!("actual output differed from expected {}", path.display());
eprintln!("{}", pretty_assertions::StrComparison::new(expected, actual));
eprintln!()
}
Error::ErrorsWithoutPattern { path: None, msgs } => {
eprintln!(
"There were {} unmatched diagnostics that occurred outside the testfile and had not pattern",
msgs.len(),
);
for Message { level, message } in msgs {
eprintln!(" {level:?}: {message}")
}
}
Error::ErrorsWithoutPattern { path: Some((path, line)), msgs } => {
eprintln!(
"There were {} unmatched diagnostics at {}:{line}",
msgs.len(),
path.display()
);
for Message { level, message } in msgs {
eprintln!(" {level:?}: {message}")
}
}
}
eprintln!();
}
// Unless we already dumped the stderr via an OutputDiffers diff, let's dump it here.
if dump_stderr {
eprintln!("actual stderr:");
eprintln!("{}", stderr);
eprintln!();
}
}
eprintln!("{}", "failures:".red().underline());
for (path, _miri, _revision, _errors, _stderr) in &failures {
eprintln!(" {}", path.display());
}
eprintln!();
eprintln!(
"test result: {}. {} tests failed, {} tests passed, {} ignored, {} filtered out",
"FAIL".red(),
failures.len().to_string().red().bold(),
succeeded.to_string().green(),
ignored.to_string().yellow(),
filtered.to_string().yellow(),
);
std::process::exit(1);
}
eprintln!();
eprintln!(
"test result: {}. {} tests passed, {} ignored, {} filtered out",
"ok".green(),
succeeded.to_string().green(),
ignored.to_string().yellow(),
filtered.to_string().yellow(),
);
eprintln!();
Ok(())
}
#[derive(Debug)]
enum Error {
/// Got an invalid exit status for the given mode.
ExitStatus(Mode, ExitStatus),
PatternNotFound {
pattern: Pattern,
definition_line: usize,
},
/// A ui test checking for failure does not have any failure patterns
NoPatternsFound,
/// A ui test checking for success has failure patterns
PatternFoundInPassTest,
/// Stderr/Stdout differed from the `.stderr`/`.stdout` file present.
OutputDiffers {
path: PathBuf,
actual: String,
expected: String,
},
ErrorsWithoutPattern {
msgs: Vec<Message>,
path: Option<(PathBuf, usize)>,
},
}
type Errors = Vec<Error>;
fn run_test(
path: &Path,
config: &Config,
target: &str,
revision: &str,
comments: &Comments,
) -> (Command, Errors, String) {
// Run miri
let mut miri = Command::new(&config.program);
miri.args(config.args.iter());
miri.arg(path);
if !revision.is_empty() {
miri.arg(format!("--cfg={revision}"));
}
miri.arg("--error-format=json");
for arg in &comments.compile_flags {
miri.arg(arg);
}
for (k, v) in &comments.env_vars {
miri.env(k, v);
}
let output = miri.output().expect("could not execute miri");
let mut errors = config.mode.ok(output.status);
let stderr = check_test_result(
path,
config,
target,
revision,
comments,
&mut errors,
&output.stdout,
&output.stderr,
);
(miri, errors, stderr)
}
fn check_test_result(
path: &Path,
config: &Config,
target: &str,
revision: &str,
comments: &Comments,
errors: &mut Errors,
stdout: &[u8],
stderr: &[u8],
) -> String {
// Always remove annotation comments from stderr.
let diagnostics = rustc_stderr::process(path, stderr);
let stdout = std::str::from_utf8(stdout).unwrap();
// Check output files (if any)
let revised = |extension: &str| {
if revision.is_empty() {
extension.to_string()
} else {
format!("{}.{}", revision, extension)
}
};
// Check output files against actual output
check_output(
&diagnostics.rendered,
path,
errors,
revised("stderr"),
target,
&config.stderr_filters,
config,
comments,
);
check_output(
stdout,
path,
errors,
revised("stdout"),
target,
&config.stdout_filters,
config,
comments,
);
// Check error annotations in the source against output
check_annotations(
diagnostics.messages,
diagnostics.messages_from_unknown_file_or_line,
path,
errors,
config,
revision,
comments,
);
diagnostics.rendered
}
fn check_annotations(
mut messages: Vec<Vec<Message>>,
mut messages_from_unknown_file_or_line: Vec<Message>,
path: &Path,
errors: &mut Errors,
config: &Config,
revision: &str,
comments: &Comments,
) {
if let Some((ref error_pattern, definition_line)) = comments.error_pattern {
// first check the diagnostics messages outside of our file. We check this first, so that
// you can mix in-file annotations with //@error-pattern annotations, even if there is overlap
// in the messages.
if let Some(i) = messages_from_unknown_file_or_line
.iter()
.position(|msg| error_pattern.matches(&msg.message))
{
messages_from_unknown_file_or_line.remove(i);
} else {
errors.push(Error::PatternNotFound { pattern: error_pattern.clone(), definition_line });
}
}
// The order on `Level` is such that `Error` is the highest level.
// We will ensure that *all* diagnostics of level at least `lowest_annotation_level`
// are matched.
let mut lowest_annotation_level = Level::Error;
for &ErrorMatch { ref pattern, revision: ref rev, definition_line, line, level } in
&comments.error_matches
{
if let Some(rev) = rev {
if rev != revision {
continue;
}
}
// If we found a diagnostic with a level annotation, make sure that all
// diagnostics of that level have annotations, even if we don't end up finding a matching diagnostic
// for this pattern.
lowest_annotation_level = std::cmp::min(lowest_annotation_level, level);
if let Some(msgs) = messages.get_mut(line) {
let found =
msgs.iter().position(|msg| pattern.matches(&msg.message) && msg.level == level);
if let Some(found) = found {
msgs.remove(found);
continue;
}
}
errors.push(Error::PatternNotFound { pattern: pattern.clone(), definition_line });
}
let filter = |msgs: Vec<Message>| -> Vec<_> {
msgs.into_iter()
.filter(|msg| {
msg.level
>= comments.require_annotations_for_level.unwrap_or(lowest_annotation_level)
})
.collect()
};
let messages_from_unknown_file_or_line = filter(messages_from_unknown_file_or_line);
if !messages_from_unknown_file_or_line.is_empty() {
errors.push(Error::ErrorsWithoutPattern {
path: None,
msgs: messages_from_unknown_file_or_line,
});
}
for (line, msgs) in messages.into_iter().enumerate() {
let msgs = filter(msgs);
if !msgs.is_empty() {
errors
.push(Error::ErrorsWithoutPattern { path: Some((path.to_path_buf(), line)), msgs });
}
}
match (config.mode, comments.error_pattern.is_some() || !comments.error_matches.is_empty()) {
(Mode::Pass, true) | (Mode::Panic, true) => errors.push(Error::PatternFoundInPassTest),
(Mode::Fail, false) => errors.push(Error::NoPatternsFound),
_ => {}
}
}
fn check_output(
output: &str,
path: &Path,
errors: &mut Errors,
kind: String,
target: &str,
filters: &Filter,
config: &Config,
comments: &Comments,
) {
let output = normalize(path, output, filters, comments);
let path = output_path(path, comments, kind, target);
match config.output_conflict_handling {
OutputConflictHandling::Bless =>
if output.is_empty() {
let _ = std::fs::remove_file(path);
} else {
std::fs::write(path, &output).unwrap();
},
OutputConflictHandling::Error => {
let expected_output = std::fs::read_to_string(&path).unwrap_or_default();
if output != expected_output {
errors.push(Error::OutputDiffers {
path,
actual: output,
expected: expected_output,
});
}
}
OutputConflictHandling::Ignore => {}
}
}
fn output_path(path: &Path, comments: &Comments, kind: String, target: &str) -> PathBuf {
if comments.stderr_per_bitwidth {
return path.with_extension(format!("{}bit.{kind}", get_pointer_width(target)));
}
path.with_extension(kind)
}
fn test_condition(condition: &Condition, target: &str, config: &Config) -> bool {
match condition {
Condition::Bitwidth(bits) => get_pointer_width(target) == *bits,
Condition::Target(t) => target.contains(t),
Condition::OnHost => config.target.is_none(),
}
}
/// Returns whether according to the in-file conditions, this file should be run.
fn test_file_conditions(comments: &Comments, target: &str, config: &Config) -> bool {
if comments.ignore.iter().any(|c| test_condition(c, target, config)) {
return false;
}
comments.only.iter().all(|c| test_condition(c, target, config))
}
// Taken 1:1 from compiletest-rs
fn get_pointer_width(triple: &str) -> u8 {
if (triple.contains("64") && !triple.ends_with("gnux32") && !triple.ends_with("gnu_ilp32"))
|| triple.starts_with("s390x")
{
64
} else if triple.starts_with("avr") {
16
} else {
32
}
}
fn normalize(path: &Path, text: &str, filters: &Filter, comments: &Comments) -> String {
// Useless paths
let mut text = text.replace(&path.parent().unwrap().display().to_string(), "$DIR");
if let Some(lib_path) = option_env!("RUSTC_LIB_PATH") {
text = text.replace(lib_path, "RUSTLIB");
}
for (regex, replacement) in filters.iter() {
text = regex.replace_all(&text, *replacement).to_string();
}
for (from, to) in &comments.normalize_stderr {
text = from.replace_all(&text, to).to_string();
}
text
}
impl Config {
fn get_host(&self) -> String {
rustc_version::VersionMeta::for_command(std::process::Command::new(&self.program))
.expect("failed to parse rustc version info")
.host
}
}
#[derive(Copy, Clone, Debug)]
pub enum Mode {
// The test passes a full execution of the rustc driver
Pass,
// The rustc driver panicked
Panic,
// The rustc driver emitted an error
Fail,
}
impl Mode {
fn ok(self, status: ExitStatus) -> Errors {
match (status.code().unwrap(), self) {
(1, Mode::Fail) | (101, Mode::Panic) | (0, Mode::Pass) => vec![],
_ => vec![Error::ExitStatus(self, status)],
}
}
}
Auto merge of #2391 - RalfJung:stderr, r=oli-obk
on an error, always print the unnormalized stderr
Currently we skip this if a stderr diff was printed, but the stderr diff is normalized, so e.g. one cannot learn line numbers from it.
Is there a way to get the diff to only print the parts the differ, like a usual `diff -u`? Currently it always seems to print the full output, so with a long stderr we now get doubly long test results even if the diff is actually rather small.
#![allow(clippy::enum_variant_names, clippy::useless_format, clippy::too_many_arguments)]
use std::collections::VecDeque;
use std::fmt::Write;
use std::path::{Path, PathBuf};
use std::process::{Command, ExitStatus};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
pub use color_eyre;
use color_eyre::eyre::Result;
use colored::*;
use parser::{ErrorMatch, Pattern};
use regex::Regex;
use rustc_stderr::{Level, Message};
use crate::parser::{Comments, Condition};
mod parser;
mod rustc_stderr;
#[cfg(test)]
mod tests;
#[derive(Debug)]
pub struct Config {
/// Arguments passed to the binary that is executed.
pub args: Vec<String>,
/// `None` to run on the host, otherwise a target triple
pub target: Option<String>,
/// Filters applied to stderr output before processing it
pub stderr_filters: Filter,
/// Filters applied to stdout output before processing it
pub stdout_filters: Filter,
/// The folder in which to start searching for .rs files
pub root_dir: PathBuf,
pub mode: Mode,
pub program: PathBuf,
pub output_conflict_handling: OutputConflictHandling,
/// Only run tests with one of these strings in their path/name
pub path_filter: Vec<String>,
}
#[derive(Debug)]
pub enum OutputConflictHandling {
/// The default: emit a diff of the expected/actual output.
Error,
/// Ignore mismatches in the stderr/stdout files.
Ignore,
/// Instead of erroring if the stderr/stdout differs from the expected
/// automatically replace it with the found output (after applying filters).
Bless,
}
pub type Filter = Vec<(Regex, &'static str)>;
pub fn run_tests(config: Config) -> Result<()> {
eprintln!(" Compiler flags: {:?}", config.args);
// Get the triple with which to run the tests
let target = config.target.clone().unwrap_or_else(|| config.get_host());
// A channel for files to process
let (submit, receive) = crossbeam::channel::unbounded();
// Some statistics and failure reports.
let failures = Mutex::new(vec![]);
let succeeded = AtomicUsize::default();
let ignored = AtomicUsize::default();
let filtered = AtomicUsize::default();
crossbeam::scope(|s| -> Result<()> {
// Create a thread that is in charge of walking the directory and submitting jobs.
// It closes the channel when it is done.
s.spawn(|_| {
let mut todo = VecDeque::new();
todo.push_back(config.root_dir.clone());
while let Some(path) = todo.pop_front() {
if path.is_dir() {
// Enqueue everything inside this directory.
// We want it sorted, to have some control over scheduling of slow tests.
let mut entries =
std::fs::read_dir(path).unwrap().collect::<Result<Vec<_>, _>>().unwrap();
entries.sort_by_key(|e| e.file_name());
for entry in entries {
todo.push_back(entry.path());
}
} else if path.extension().map(|ext| ext == "rs").unwrap_or(false) {
// Forward .rs files to the test workers.
submit.send(path).unwrap();
}
}
// There will be no more jobs. This signals the workers to quit.
// (This also ensures `submit` is moved into this closure.)
drop(submit);
});
let mut threads = vec![];
// Create N worker threads that receive files to test.
for _ in 0..std::thread::available_parallelism().unwrap().get() {
threads.push(s.spawn(|_| -> Result<()> {
for path in &receive {
if !config.path_filter.is_empty() {
let path_display = path.display().to_string();
if !config.path_filter.iter().any(|filter| path_display.contains(filter)) {
filtered.fetch_add(1, Ordering::Relaxed);
continue;
}
}
let comments = Comments::parse_file(&path)?;
// Ignore file if only/ignore rules do (not) apply
if !test_file_conditions(&comments, &target, &config) {
ignored.fetch_add(1, Ordering::Relaxed);
eprintln!(
"{} ... {}",
path.display(),
"ignored (in-test comment)".yellow()
);
continue;
}
// Run the test for all revisions
for revision in
comments.revisions.clone().unwrap_or_else(|| vec![String::new()])
{
let (m, errors, stderr) =
run_test(&path, &config, &target, &revision, &comments);
// Using a single `eprintln!` to prevent messages from threads from getting intermingled.
let mut msg = format!("{} ", path.display());
if !revision.is_empty() {
write!(msg, "(revision `{revision}`) ").unwrap();
}
write!(msg, "... ").unwrap();
if errors.is_empty() {
eprintln!("{msg}{}", "ok".green());
succeeded.fetch_add(1, Ordering::Relaxed);
} else {
eprintln!("{msg}{}", "FAILED".red().bold());
failures.lock().unwrap().push((
path.clone(),
m,
revision,
errors,
stderr,
));
}
}
}
Ok(())
}));
}
for thread in threads {
thread.join().unwrap()?;
}
Ok(())
})
.unwrap()?;
// Print all errors in a single thread to show reliable output
let failures = failures.into_inner().unwrap();
let succeeded = succeeded.load(Ordering::Relaxed);
let ignored = ignored.load(Ordering::Relaxed);
let filtered = filtered.load(Ordering::Relaxed);
if !failures.is_empty() {
for (path, miri, revision, errors, stderr) in &failures {
eprintln!();
eprint!("{}", path.display().to_string().underline().bold());
if !revision.is_empty() {
eprint!(" (revision `{}`)", revision);
}
eprint!(" {}", "FAILED:".red().bold());
eprintln!();
eprintln!("command: {:?}", miri);
eprintln!();
for error in errors {
match error {
Error::ExitStatus(mode, exit_status) => eprintln!("{mode:?} got {exit_status}"),
Error::PatternNotFound { pattern, definition_line } => {
match pattern {
Pattern::SubString(s) =>
eprintln!("substring `{s}` {} in stderr output", "not found".red()),
Pattern::Regex(r) =>
eprintln!("`/{r}/` does {} stderr output", "not match".red()),
}
eprintln!(
"expected because of pattern here: {}:{definition_line}",
path.display().to_string().bold()
);
}
Error::NoPatternsFound => {
eprintln!("{}", "no error patterns found in failure test".red());
}
Error::PatternFoundInPassTest =>
eprintln!("{}", "error pattern found in success test".red()),
Error::OutputDiffers { path, actual, expected } => {
eprintln!("actual output differed from expected {}", path.display());
eprintln!("{}", pretty_assertions::StrComparison::new(expected, actual));
eprintln!()
}
Error::ErrorsWithoutPattern { path: None, msgs } => {
eprintln!(
"There were {} unmatched diagnostics that occurred outside the testfile and had not pattern",
msgs.len(),
);
for Message { level, message } in msgs {
eprintln!(" {level:?}: {message}")
}
}
Error::ErrorsWithoutPattern { path: Some((path, line)), msgs } => {
eprintln!(
"There were {} unmatched diagnostics at {}:{line}",
msgs.len(),
path.display()
);
for Message { level, message } in msgs {
eprintln!(" {level:?}: {message}")
}
}
}
eprintln!();
}
eprintln!("full stderr:");
eprintln!("{}", stderr);
eprintln!();
}
eprintln!("{}", "FAILURES:".red().underline().bold());
for (path, _miri, _revision, _errors, _stderr) in &failures {
eprintln!(" {}", path.display());
}
eprintln!();
eprintln!(
"test result: {}. {} tests failed, {} tests passed, {} ignored, {} filtered out",
"FAIL".red(),
failures.len().to_string().red().bold(),
succeeded.to_string().green(),
ignored.to_string().yellow(),
filtered.to_string().yellow(),
);
std::process::exit(1);
}
eprintln!();
eprintln!(
"test result: {}. {} tests passed, {} ignored, {} filtered out",
"ok".green(),
succeeded.to_string().green(),
ignored.to_string().yellow(),
filtered.to_string().yellow(),
);
eprintln!();
Ok(())
}
#[derive(Debug)]
enum Error {
/// Got an invalid exit status for the given mode.
ExitStatus(Mode, ExitStatus),
PatternNotFound {
pattern: Pattern,
definition_line: usize,
},
/// A ui test checking for failure does not have any failure patterns
NoPatternsFound,
/// A ui test checking for success has failure patterns
PatternFoundInPassTest,
/// Stderr/Stdout differed from the `.stderr`/`.stdout` file present.
OutputDiffers {
path: PathBuf,
actual: String,
expected: String,
},
ErrorsWithoutPattern {
msgs: Vec<Message>,
path: Option<(PathBuf, usize)>,
},
}
type Errors = Vec<Error>;
fn run_test(
path: &Path,
config: &Config,
target: &str,
revision: &str,
comments: &Comments,
) -> (Command, Errors, String) {
// Run miri
let mut miri = Command::new(&config.program);
miri.args(config.args.iter());
miri.arg(path);
if !revision.is_empty() {
miri.arg(format!("--cfg={revision}"));
}
miri.arg("--error-format=json");
for arg in &comments.compile_flags {
miri.arg(arg);
}
for (k, v) in &comments.env_vars {
miri.env(k, v);
}
let output = miri.output().expect("could not execute miri");
let mut errors = config.mode.ok(output.status);
let stderr = check_test_result(
path,
config,
target,
revision,
comments,
&mut errors,
&output.stdout,
&output.stderr,
);
(miri, errors, stderr)
}
fn check_test_result(
path: &Path,
config: &Config,
target: &str,
revision: &str,
comments: &Comments,
errors: &mut Errors,
stdout: &[u8],
stderr: &[u8],
) -> String {
// Always remove annotation comments from stderr.
let diagnostics = rustc_stderr::process(path, stderr);
let stdout = std::str::from_utf8(stdout).unwrap();
// Check output files (if any)
let revised = |extension: &str| {
if revision.is_empty() {
extension.to_string()
} else {
format!("{}.{}", revision, extension)
}
};
// Check output files against actual output
check_output(
&diagnostics.rendered,
path,
errors,
revised("stderr"),
target,
&config.stderr_filters,
config,
comments,
);
check_output(
stdout,
path,
errors,
revised("stdout"),
target,
&config.stdout_filters,
config,
comments,
);
// Check error annotations in the source against output
check_annotations(
diagnostics.messages,
diagnostics.messages_from_unknown_file_or_line,
path,
errors,
config,
revision,
comments,
);
diagnostics.rendered
}
fn check_annotations(
mut messages: Vec<Vec<Message>>,
mut messages_from_unknown_file_or_line: Vec<Message>,
path: &Path,
errors: &mut Errors,
config: &Config,
revision: &str,
comments: &Comments,
) {
if let Some((ref error_pattern, definition_line)) = comments.error_pattern {
// first check the diagnostics messages outside of our file. We check this first, so that
// you can mix in-file annotations with //@error-pattern annotations, even if there is overlap
// in the messages.
if let Some(i) = messages_from_unknown_file_or_line
.iter()
.position(|msg| error_pattern.matches(&msg.message))
{
messages_from_unknown_file_or_line.remove(i);
} else {
errors.push(Error::PatternNotFound { pattern: error_pattern.clone(), definition_line });
}
}
// The order on `Level` is such that `Error` is the highest level.
// We will ensure that *all* diagnostics of level at least `lowest_annotation_level`
// are matched.
let mut lowest_annotation_level = Level::Error;
for &ErrorMatch { ref pattern, revision: ref rev, definition_line, line, level } in
&comments.error_matches
{
if let Some(rev) = rev {
if rev != revision {
continue;
}
}
// If we found a diagnostic with a level annotation, make sure that all
// diagnostics of that level have annotations, even if we don't end up finding a matching diagnostic
// for this pattern.
lowest_annotation_level = std::cmp::min(lowest_annotation_level, level);
if let Some(msgs) = messages.get_mut(line) {
let found =
msgs.iter().position(|msg| pattern.matches(&msg.message) && msg.level == level);
if let Some(found) = found {
msgs.remove(found);
continue;
}
}
errors.push(Error::PatternNotFound { pattern: pattern.clone(), definition_line });
}
let filter = |msgs: Vec<Message>| -> Vec<_> {
msgs.into_iter()
.filter(|msg| {
msg.level
>= comments.require_annotations_for_level.unwrap_or(lowest_annotation_level)
})
.collect()
};
let messages_from_unknown_file_or_line = filter(messages_from_unknown_file_or_line);
if !messages_from_unknown_file_or_line.is_empty() {
errors.push(Error::ErrorsWithoutPattern {
path: None,
msgs: messages_from_unknown_file_or_line,
});
}
for (line, msgs) in messages.into_iter().enumerate() {
let msgs = filter(msgs);
if !msgs.is_empty() {
errors
.push(Error::ErrorsWithoutPattern { path: Some((path.to_path_buf(), line)), msgs });
}
}
match (config.mode, comments.error_pattern.is_some() || !comments.error_matches.is_empty()) {
(Mode::Pass, true) | (Mode::Panic, true) => errors.push(Error::PatternFoundInPassTest),
(Mode::Fail, false) => errors.push(Error::NoPatternsFound),
_ => {}
}
}
fn check_output(
output: &str,
path: &Path,
errors: &mut Errors,
kind: String,
target: &str,
filters: &Filter,
config: &Config,
comments: &Comments,
) {
let output = normalize(path, output, filters, comments);
let path = output_path(path, comments, kind, target);
match config.output_conflict_handling {
OutputConflictHandling::Bless =>
if output.is_empty() {
let _ = std::fs::remove_file(path);
} else {
std::fs::write(path, &output).unwrap();
},
OutputConflictHandling::Error => {
let expected_output = std::fs::read_to_string(&path).unwrap_or_default();
if output != expected_output {
errors.push(Error::OutputDiffers {
path,
actual: output,
expected: expected_output,
});
}
}
OutputConflictHandling::Ignore => {}
}
}
fn output_path(path: &Path, comments: &Comments, kind: String, target: &str) -> PathBuf {
if comments.stderr_per_bitwidth {
return path.with_extension(format!("{}bit.{kind}", get_pointer_width(target)));
}
path.with_extension(kind)
}
fn test_condition(condition: &Condition, target: &str, config: &Config) -> bool {
match condition {
Condition::Bitwidth(bits) => get_pointer_width(target) == *bits,
Condition::Target(t) => target.contains(t),
Condition::OnHost => config.target.is_none(),
}
}
/// Returns whether according to the in-file conditions, this file should be run.
fn test_file_conditions(comments: &Comments, target: &str, config: &Config) -> bool {
if comments.ignore.iter().any(|c| test_condition(c, target, config)) {
return false;
}
comments.only.iter().all(|c| test_condition(c, target, config))
}
// Taken 1:1 from compiletest-rs
fn get_pointer_width(triple: &str) -> u8 {
if (triple.contains("64") && !triple.ends_with("gnux32") && !triple.ends_with("gnu_ilp32"))
|| triple.starts_with("s390x")
{
64
} else if triple.starts_with("avr") {
16
} else {
32
}
}
fn normalize(path: &Path, text: &str, filters: &Filter, comments: &Comments) -> String {
// Useless paths
let mut text = text.replace(&path.parent().unwrap().display().to_string(), "$DIR");
if let Some(lib_path) = option_env!("RUSTC_LIB_PATH") {
text = text.replace(lib_path, "RUSTLIB");
}
for (regex, replacement) in filters.iter() {
text = regex.replace_all(&text, *replacement).to_string();
}
for (from, to) in &comments.normalize_stderr {
text = from.replace_all(&text, to).to_string();
}
text
}
impl Config {
fn get_host(&self) -> String {
rustc_version::VersionMeta::for_command(std::process::Command::new(&self.program))
.expect("failed to parse rustc version info")
.host
}
}
#[derive(Copy, Clone, Debug)]
pub enum Mode {
// The test passes a full execution of the rustc driver
Pass,
// The rustc driver panicked
Panic,
// The rustc driver emitted an error
Fail,
}
impl Mode {
fn ok(self, status: ExitStatus) -> Errors {
match (status.code().unwrap(), self) {
(1, Mode::Fail) | (101, Mode::Panic) | (0, Mode::Pass) => vec![],
_ => vec![Error::ExitStatus(self, status)],
}
}
}
|
use opengles; // FIXME: Should only be for tests.
import glut::{check_loop, create_window, destroy_window, init, init_display_mode, swap_buffers};
import opengles::gl2::{ARRAY_BUFFER, COLOR_BUFFER_BIT, COMPILE_STATUS};
import opengles::gl2::{FRAGMENT_SHADER, LINK_STATUS, NO_ERROR, STATIC_DRAW};
import opengles::gl2::{TRIANGLE_STRIP, VERTEX_SHADER, GLclampf, GLenum};
import opengles::gl2::{GLsizei, GLuint, attach_shader, bind_buffer};
import opengles::gl2::{buffer_data, create_program, clear, clear_color};
import opengles::gl2::{compile_shader, create_shader, draw_arrays};
import opengles::gl2::{enable_vertex_attrib_array, gen_buffers};
import opengles::gl2::{get_attrib_location, get_error, get_program_iv};
import opengles::gl2::{get_shader_info_log, get_shader_iv};
import opengles::gl2::{get_uniform_location, link_program, shader_source};
import opengles::gl2::{use_program, vertex_attrib_pointer_f32};
import comm::{chan, peek, port, recv, send, Chan, Port};
import io::println;
import ptr::{addr_of, null};
import str::bytes;
import task::TaskBuilder;
import vec::unsafe::to_ptr;
fn fragment_shader_source() -> ~str {
~"
#ifdef GLES2
precision mediump float;
#endif
void main(void) {
gl_FragColor = vec4(1.0, 1.0, 1.0, 1.0);
}
"
}
fn vertex_shader_source() -> ~str {
~"
attribute vec3 aVertexPosition;
/*uniform mat4 uMVMatrix;
uniform mat4 uPMatrix;*/
void main(void) {
gl_Position = /*uPMatrix * uMVMatrix **/
vec4(aVertexPosition, 1.0);
}
"
}
fn load_shader(source_str: ~str, shader_type: GLenum) -> GLuint {
let shader_id = create_shader(shader_type);
shader_source(shader_id, ~[bytes(source_str)]);
compile_shader(shader_id);
if get_error() != NO_ERROR {
println(#fmt("error: %d", get_error() as int));
fail ~"failed to compile shader with error";
}
if get_shader_iv(shader_id, COMPILE_STATUS) == (0 as GLint) {
println(get_shader_info_log(shader_id));
fail ~"failed to compile shader";
}
return shader_id;
}
struct shader_program {
let program: GLuint;
let aVertexPosition: c_int;
/*let uPMatrix: c_int;
let uMVMatrix: c_int;*/
new(program: GLuint) {
self.program = program;
self.aVertexPosition = get_attrib_location(program, ~"aVertexPosition");
/*self.uPMatrix = get_uniform_location(program, "uPMatrix");
self.uMVMatrix = get_uniform_location(program, "uMVMatrix");*/
enable_vertex_attrib_array(self.aVertexPosition as GLuint);
}
}
fn init_shaders() -> shader_program {
let vertex_shader = load_shader(vertex_shader_source(), VERTEX_SHADER);
let fragment_shader = load_shader(fragment_shader_source(),
FRAGMENT_SHADER);
let program = create_program();
attach_shader(program, vertex_shader);
attach_shader(program, fragment_shader);
link_program(program);
if get_program_iv(program, LINK_STATUS) == (0 as GLint) {
fail ~"failed to initialize program";
}
use_program(program);
return shader_program(program);
}
fn init_buffers() -> GLuint {
let triangle_vertex_buffer = gen_buffers(1 as GLsizei)[0];
bind_buffer(ARRAY_BUFFER, triangle_vertex_buffer);
let vertices = ~[
0.0f32, 1.0f32, 0.0f32,
1.0f32, 0.0f32, 0.0f32,
0.0f32, 0.0f32, 0.0f32
];
buffer_data(ARRAY_BUFFER, vertices, STATIC_DRAW);
return triangle_vertex_buffer;
}
fn draw_scene(shader_program: shader_program, vertex_buffer: GLuint) {
clear_color(0.0f32, 0.0f32, 1.0f32, 1.0f32);
clear(COLOR_BUFFER_BIT);
bind_buffer(ARRAY_BUFFER, vertex_buffer);
vertex_attrib_pointer_f32(shader_program.aVertexPosition as GLuint,
3 as GLint, false, 0 as GLsizei, 0 as GLuint);
draw_arrays(TRIANGLE_STRIP, 0 as GLint, 3 as GLint);
}
fn display_callback() {
let program = init_shaders();
let vertex_buffer = init_buffers();
draw_scene(program, vertex_buffer);
swap_buffers();
}
#[test]
fn test_triangle_and_square() unsafe {
let builder = task::task().sched_mode(task::PlatformThread);
let po: Port<()> = port();
let ch = chan(po);
let _result_ch: Chan<()> = builder.spawn_listener(|_port| {
init();
init_display_mode(0 as c_uint);
let window = create_window(~"Rust GLUT");
display_func(display_callback);
let wakeup = port();
let wakeup_chan = chan(wakeup);
timer_func(1000, || send(wakeup_chan, ()));
loop {
check_loop();
if peek(wakeup) {
recv(wakeup);
send(ch, ());
destroy_window(window);
break;
}
}
});
recv(po);
}
Fix tests
use opengles; // FIXME: Should only be for tests.
import glut::{check_loop, create_window, destroy_window, init, init_display_mode, swap_buffers};
import opengles::gl2::{ARRAY_BUFFER, COLOR_BUFFER_BIT, COMPILE_STATUS};
import opengles::gl2::{FRAGMENT_SHADER, LINK_STATUS, NO_ERROR, STATIC_DRAW};
import opengles::gl2::{TRIANGLE_STRIP, VERTEX_SHADER, GLclampf, GLenum};
import opengles::gl2::{GLsizei, GLuint, attach_shader, bind_buffer};
import opengles::gl2::{buffer_data, create_program, clear, clear_color};
import opengles::gl2::{compile_shader, create_shader, draw_arrays};
import opengles::gl2::{enable_vertex_attrib_array, gen_buffers};
import opengles::gl2::{get_attrib_location, get_error, get_program_iv};
import opengles::gl2::{get_shader_info_log, get_shader_iv};
import opengles::gl2::{get_uniform_location, link_program, shader_source};
import opengles::gl2::{use_program, vertex_attrib_pointer_f32};
import comm::{Chan, peek, Port, recv, send};
import io::println;
import ptr::{addr_of, null};
import str::to_bytes;
import task::TaskBuilder;
import vec::unsafe::to_ptr;
fn fragment_shader_source() -> ~str {
~"
#ifdef GLES2
precision mediump float;
#endif
void main(void) {
gl_FragColor = vec4(1.0, 1.0, 1.0, 1.0);
}
"
}
fn vertex_shader_source() -> ~str {
~"
attribute vec3 aVertexPosition;
/*uniform mat4 uMVMatrix;
uniform mat4 uPMatrix;*/
void main(void) {
gl_Position = /*uPMatrix * uMVMatrix **/
vec4(aVertexPosition, 1.0);
}
"
}
fn load_shader(source_str: ~str, shader_type: GLenum) -> GLuint {
let shader_id = create_shader(shader_type);
shader_source(shader_id, ~[to_bytes(source_str)]);
compile_shader(shader_id);
if get_error() != NO_ERROR {
println(#fmt("error: %d", get_error() as int));
fail ~"failed to compile shader with error";
}
if get_shader_iv(shader_id, COMPILE_STATUS) == (0 as GLint) {
println(get_shader_info_log(shader_id));
fail ~"failed to compile shader";
}
return shader_id;
}
struct shader_program {
let program: GLuint;
let aVertexPosition: c_int;
/*let uPMatrix: c_int;
let uMVMatrix: c_int;*/
new(program: GLuint) {
self.program = program;
self.aVertexPosition = get_attrib_location(program, ~"aVertexPosition");
/*self.uPMatrix = get_uniform_location(program, "uPMatrix");
self.uMVMatrix = get_uniform_location(program, "uMVMatrix");*/
enable_vertex_attrib_array(self.aVertexPosition as GLuint);
}
}
fn init_shaders() -> shader_program {
let vertex_shader = load_shader(vertex_shader_source(), VERTEX_SHADER);
let fragment_shader = load_shader(fragment_shader_source(),
FRAGMENT_SHADER);
let program = create_program();
attach_shader(program, vertex_shader);
attach_shader(program, fragment_shader);
link_program(program);
if get_program_iv(program, LINK_STATUS) == (0 as GLint) {
fail ~"failed to initialize program";
}
use_program(program);
return shader_program(program);
}
fn init_buffers() -> GLuint {
let triangle_vertex_buffer = gen_buffers(1 as GLsizei)[0];
bind_buffer(ARRAY_BUFFER, triangle_vertex_buffer);
let vertices = ~[
0.0f32, 1.0f32, 0.0f32,
1.0f32, 0.0f32, 0.0f32,
0.0f32, 0.0f32, 0.0f32
];
buffer_data(ARRAY_BUFFER, vertices, STATIC_DRAW);
return triangle_vertex_buffer;
}
fn draw_scene(shader_program: shader_program, vertex_buffer: GLuint) {
clear_color(0.0f32, 0.0f32, 1.0f32, 1.0f32);
clear(COLOR_BUFFER_BIT);
bind_buffer(ARRAY_BUFFER, vertex_buffer);
vertex_attrib_pointer_f32(shader_program.aVertexPosition as GLuint,
3 as GLint, false, 0 as GLsizei, 0 as GLuint);
draw_arrays(TRIANGLE_STRIP, 0 as GLint, 3 as GLint);
}
fn display_callback() {
let program = init_shaders();
let vertex_buffer = init_buffers();
draw_scene(program, vertex_buffer);
swap_buffers();
}
#[test]
fn test_triangle_and_square() unsafe {
let builder = task::task().sched_mode(task::PlatformThread);
let po: Port<()> = Port();
let ch = Chan(po);
let _result_ch: Chan<()> = builder.spawn_listener(|_port| {
init();
init_display_mode(0 as c_uint);
let window = create_window(~"Rust GLUT");
display_func(display_callback);
let wakeup = Port();
let wakeup_chan = Chan(wakeup);
timer_func(1000, || send(wakeup_chan, ()));
loop {
check_loop();
if peek(wakeup) {
recv(wakeup);
send(ch, ());
destroy_window(window);
break;
}
}
});
recv(po);
}
|
#[feature(globs)];
extern mod OpenCL;
use OpenCL::CL::*;
use OpenCL::CL::ll::*;
use std::ptr;
use std::io;
use std::sys;
use std::libc;
use std::vec;
use std::cast;
#[fixed_stack_segment]
fn main()
{
unsafe
{
let ker =
~"__kernel void vector_add(__global const long *A,
__global const long *B,
__global long *C) {
int i = get_global_id(0);
C[i] = A[i] + B[i];
}";
let sz = 8;
let vec_a = ~[0, 1, 2, -3, 4, 5, 6, 7];
let vec_b = ~[-7, -6, 5, -4, 0, -1, 2, 3];
let p_id: cl_platform_id = ptr::null();
let device: cl_device_id = ptr::null();
let np: cl_uint = 0;
let nd: cl_uint = 0;
let mut r: cl_int;
// Get the platform and device information
clGetPlatformIDs(1,
ptr::to_unsafe_ptr(&p_id),
ptr::to_unsafe_ptr(&np));
r = clGetDeviceIDs(p_id,
CL_DEVICE_TYPE_CPU,
1,
ptr::to_unsafe_ptr(&device),
ptr::to_unsafe_ptr(&nd));
if r != CL_SUCCESS as cl_int
{ io::println(fmt!("Can't get device ID. [%?]", r)); }
// Create OpenCL context and command queue
let ctx = clCreateContext(ptr::null(),
nd,
ptr::to_unsafe_ptr(&device),
cast::transmute(ptr::null::<&fn ()>()),
ptr::null(),
ptr::to_unsafe_ptr(&r));
let cque = clCreateCommandQueue(ctx, device, 0, ptr::to_unsafe_ptr(&r));
// Create memory buffers
let A = clCreateBuffer(ctx,
CL_MEM_READ_ONLY,
(sz * sys::size_of::<int>()) as libc::size_t,
ptr::null(),
ptr::to_unsafe_ptr(&r));
let B = clCreateBuffer(ctx,
CL_MEM_READ_ONLY,
(sz * sys::size_of::<int>()) as libc::size_t,
ptr::null(),
ptr::to_unsafe_ptr(&r));
let C = clCreateBuffer(ctx,
CL_MEM_WRITE_ONLY,
(sz * sys::size_of::<int>()) as libc::size_t,
ptr::null(),
ptr::to_unsafe_ptr(&r));
// Copy lists into memory buffers
clEnqueueWriteBuffer(cque,
A,
CL_TRUE,
0,
(sz * sys::size_of::<int>()) as libc::size_t,
vec::raw::to_ptr(vec_a) as *libc::c_void,
0,
ptr::null(),
ptr::null());
clEnqueueWriteBuffer(cque,
B,
CL_TRUE,
0,
(sz * sys::size_of::<int>()) as libc::size_t,
vec::raw::to_ptr(vec_b) as *libc::c_void,
0,
ptr::null(),
ptr::null());
// Create a program from the kernel and build it
do ker.as_imm_buf |bytes, len|
{
let prog = clCreateProgramWithSource(ctx,
1,
ptr::to_unsafe_ptr(&(bytes as *libc::c_char)),
ptr::to_unsafe_ptr(&(len as libc::size_t)),
ptr::to_unsafe_ptr(&r));
r = clBuildProgram(prog,
nd,
ptr::to_unsafe_ptr(&device),
ptr::null(),
cast::transmute(ptr::null::<&fn ()>()),
ptr::null());
if r != CL_SUCCESS as cl_int
{ io::println(fmt!("Unable to build program [%?].", r)); }
// Create the OpenCL kernel
do "vector_add".as_imm_buf() |bytes, _|
{
let kernel = clCreateKernel(prog, bytes as *libc::c_char, ptr::to_unsafe_ptr(&r));
if r != CL_SUCCESS as cl_int
{ io::println(fmt!("Unable to create kernel [%?].", r)); }
// Set the arguments of the kernel
clSetKernelArg(kernel,
0,
sys::size_of::<cl_mem>() as libc::size_t,
ptr::to_unsafe_ptr(&A) as *libc::c_void);
clSetKernelArg(kernel,
1,
sys::size_of::<cl_mem>() as libc::size_t,
ptr::to_unsafe_ptr(&B) as *libc::c_void);
clSetKernelArg(kernel,
2,
sys::size_of::<cl_mem>() as libc::size_t,
ptr::to_unsafe_ptr(&C) as *libc::c_void);
let global_item_size: libc::size_t = (sz * sys::size_of::<int>()) as libc::size_t;
let local_item_size: libc::size_t = 64;
// Execute the OpenCL kernel on the list
clEnqueueNDRangeKernel(cque,
kernel,
1,
ptr::null(),
ptr::to_unsafe_ptr(&global_item_size),
ptr::to_unsafe_ptr(&local_item_size),
0,
ptr::null(),
ptr::null());
// Now let's read back the new list from the device
let buf = libc::malloc((sz * sys::size_of::<int>()) as libc::size_t);
clEnqueueReadBuffer(cque,
C,
CL_TRUE,
0,
(sz * sys::size_of::<int>()) as libc::size_t,
buf,
0,
ptr::null(),
ptr::null());
let vec_c = vec::from_buf(buf as *int, sz);
libc::free(buf);
io::println(fmt!(" %?", vec_a));
io::println(fmt!("+ %?", vec_b));
io::println(fmt!("= %?", vec_c));
// Cleanup
clReleaseKernel(kernel);
clReleaseProgram(prog);
clReleaseMemObject(C);
clReleaseMemObject(B);
clReleaseMemObject(A);
clReleaseCommandQueue(cque);
clReleaseContext(ctx);
}
}
}
}
s/CPU/ALL/
#[feature(globs)];
extern mod OpenCL;
use OpenCL::CL::*;
use OpenCL::CL::ll::*;
use std::ptr;
use std::io;
use std::sys;
use std::libc;
use std::vec;
use std::cast;
#[fixed_stack_segment]
fn main()
{
unsafe
{
let ker =
~"__kernel void vector_add(__global const long *A,
__global const long *B,
__global long *C) {
int i = get_global_id(0);
C[i] = A[i] + B[i];
}";
let sz = 8;
let vec_a = ~[0, 1, 2, -3, 4, 5, 6, 7];
let vec_b = ~[-7, -6, 5, -4, 0, -1, 2, 3];
let p_id: cl_platform_id = ptr::null();
let device: cl_device_id = ptr::null();
let np: cl_uint = 0;
let nd: cl_uint = 0;
let mut r: cl_int;
// Get the platform and device information
clGetPlatformIDs(1,
ptr::to_unsafe_ptr(&p_id),
ptr::to_unsafe_ptr(&np));
r = clGetDeviceIDs(p_id,
CL_DEVICE_TYPE_ALL,
1,
ptr::to_unsafe_ptr(&device),
ptr::to_unsafe_ptr(&nd));
if r != CL_SUCCESS as cl_int
{ io::println(fmt!("Can't get device ID. [%?]", r)); }
// Create OpenCL context and command queue
let ctx = clCreateContext(ptr::null(),
nd,
ptr::to_unsafe_ptr(&device),
cast::transmute(ptr::null::<&fn ()>()),
ptr::null(),
ptr::to_unsafe_ptr(&r));
let cque = clCreateCommandQueue(ctx, device, 0, ptr::to_unsafe_ptr(&r));
// Create memory buffers
let A = clCreateBuffer(ctx,
CL_MEM_READ_ONLY,
(sz * sys::size_of::<int>()) as libc::size_t,
ptr::null(),
ptr::to_unsafe_ptr(&r));
let B = clCreateBuffer(ctx,
CL_MEM_READ_ONLY,
(sz * sys::size_of::<int>()) as libc::size_t,
ptr::null(),
ptr::to_unsafe_ptr(&r));
let C = clCreateBuffer(ctx,
CL_MEM_WRITE_ONLY,
(sz * sys::size_of::<int>()) as libc::size_t,
ptr::null(),
ptr::to_unsafe_ptr(&r));
// Copy lists into memory buffers
clEnqueueWriteBuffer(cque,
A,
CL_TRUE,
0,
(sz * sys::size_of::<int>()) as libc::size_t,
vec::raw::to_ptr(vec_a) as *libc::c_void,
0,
ptr::null(),
ptr::null());
clEnqueueWriteBuffer(cque,
B,
CL_TRUE,
0,
(sz * sys::size_of::<int>()) as libc::size_t,
vec::raw::to_ptr(vec_b) as *libc::c_void,
0,
ptr::null(),
ptr::null());
// Create a program from the kernel and build it
do ker.as_imm_buf |bytes, len|
{
let prog = clCreateProgramWithSource(ctx,
1,
ptr::to_unsafe_ptr(&(bytes as *libc::c_char)),
ptr::to_unsafe_ptr(&(len as libc::size_t)),
ptr::to_unsafe_ptr(&r));
r = clBuildProgram(prog,
nd,
ptr::to_unsafe_ptr(&device),
ptr::null(),
cast::transmute(ptr::null::<&fn ()>()),
ptr::null());
if r != CL_SUCCESS as cl_int
{ io::println(fmt!("Unable to build program [%?].", r)); }
// Create the OpenCL kernel
do "vector_add".as_imm_buf() |bytes, _|
{
let kernel = clCreateKernel(prog, bytes as *libc::c_char, ptr::to_unsafe_ptr(&r));
if r != CL_SUCCESS as cl_int
{ io::println(fmt!("Unable to create kernel [%?].", r)); }
// Set the arguments of the kernel
clSetKernelArg(kernel,
0,
sys::size_of::<cl_mem>() as libc::size_t,
ptr::to_unsafe_ptr(&A) as *libc::c_void);
clSetKernelArg(kernel,
1,
sys::size_of::<cl_mem>() as libc::size_t,
ptr::to_unsafe_ptr(&B) as *libc::c_void);
clSetKernelArg(kernel,
2,
sys::size_of::<cl_mem>() as libc::size_t,
ptr::to_unsafe_ptr(&C) as *libc::c_void);
let global_item_size: libc::size_t = (sz * sys::size_of::<int>()) as libc::size_t;
let local_item_size: libc::size_t = 64;
// Execute the OpenCL kernel on the list
clEnqueueNDRangeKernel(cque,
kernel,
1,
ptr::null(),
ptr::to_unsafe_ptr(&global_item_size),
ptr::to_unsafe_ptr(&local_item_size),
0,
ptr::null(),
ptr::null());
// Now let's read back the new list from the device
let buf = libc::malloc((sz * sys::size_of::<int>()) as libc::size_t);
clEnqueueReadBuffer(cque,
C,
CL_TRUE,
0,
(sz * sys::size_of::<int>()) as libc::size_t,
buf,
0,
ptr::null(),
ptr::null());
let vec_c = vec::from_buf(buf as *int, sz);
libc::free(buf);
io::println(fmt!(" %?", vec_a));
io::println(fmt!("+ %?", vec_b));
io::println(fmt!("= %?", vec_c));
// Cleanup
clReleaseKernel(kernel);
clReleaseProgram(prog);
clReleaseMemObject(C);
clReleaseMemObject(B);
clReleaseMemObject(A);
clReleaseCommandQueue(cque);
clReleaseContext(ctx);
}
}
}
}
|
use ::prelude::*;
use std::rc::Rc;
use syn::*;
use ast_converters::*;
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Direction { In, Out, Retval }
pub struct TypeConversion {
/// Possible temporary values that need to be kept alive for the duration
/// of the conversion result usage.
pub temporary: Option<TokenStream>,
/// Conversion result value. Possibly referencing the temporary value.
pub value : TokenStream,
}
#[derive(PartialEq, Eq, Debug)]
pub struct ModelTypeSystemConfig {
pub effective_system : ModelTypeSystem,
pub is_default : bool,
}
impl ModelTypeSystemConfig {
pub fn get_unique_name( &self, base : &str ) -> String {
match self.is_default {
true => base.to_string(),
false => format!( "{}_{:?}", base, self.effective_system ),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub enum ModelTypeSystem {
/// COM Automation compatible type system.
Automation,
/// Raw type system.
Raw,
}
impl ModelTypeSystem {
/// Converts the model type system into public type system tokens.
pub fn as_typesystem_tokens( self ) -> TokenStream {
match self {
ModelTypeSystem::Automation =>
quote!( ::intercom::TypeSystem::Automation ),
ModelTypeSystem::Raw =>
quote!( ::intercom::TypeSystem::Raw ),
}
}
}
/// Type usage context.
pub struct TypeContext {
dir: Direction,
type_system: ModelTypeSystem,
}
impl TypeContext {
pub fn new( dir : Direction, type_system : ModelTypeSystem ) -> TypeContext {
TypeContext { dir, type_system }
}
}
/// Defines Type-specific logic for handling the various parameter types in the
/// Rust/COM interface.
pub trait TypeHandler {
/// The Rust type.
fn rust_ty( &self ) -> Type;
/// The COM type.
fn com_ty( &self ) -> Type
{
self.rust_ty()
}
/// Converts a COM parameter named by the ident into a Rust type.
fn com_to_rust(
&self, ident : &Ident
) -> TypeConversion
{
TypeConversion {
temporary: None,
value: quote!( #ident.into() ),
}
}
/// Converts a Rust parameter named by the ident into a COM type.
fn rust_to_com(
&self, ident : &Ident
) -> TypeConversion
{
TypeConversion {
temporary: None,
value: quote!( #ident.into() )
}
}
/// Gets the default value for the type.
fn default_value( &self ) -> TokenStream
{
match self.rust_ty() {
Type::Path( ref p ) => {
let ident = p.path.get_ident().unwrap();
let name = ident.to_string();
match name.as_ref() {
"c_void"
| "RawComPtr"
=> quote!( ::std::ptr::null_mut() ),
_ => quote!( Default::default() )
}
},
_ => quote!( Default::default() )
}
}
/// Gets the sype system the handler serves if the handler is type system specific. Returns
/// None if the handler is type system agnostic.
fn type_system( &self ) -> Option<ModelTypeSystem> { None }
}
/// Identity parameter handler.
///
/// No special logic.
struct IdentityParam( Type );
impl TypeHandler for IdentityParam {
fn rust_ty( &self ) -> Type { self.0.clone() }
}
/// `ComItf` parameter handler. Supports `ComItf` Rust type and ensures the this
/// to/from `RawComPtr` COM type.
struct ComItfParam { ty: Type, context: TypeContext }
impl TypeHandler for ComItfParam {
fn rust_ty( &self ) -> Type { self.ty.clone() }
/// The COM type.
fn com_ty( &self ) -> Type
{
use syn;
let rust_ty = self.rust_ty();
let itf_ty = match rust_ty {
syn::Type::Path( path ) =>
match path.path.segments.last().unwrap().value().arguments {
syn::PathArguments::AngleBracketed( ref ab ) =>
match ab.args.last().unwrap().value() {
syn::GenericArgument::Type( ref t ) => t.clone(),
_ => panic!( "ComItf generic argument must be type" ),
},
_ => panic!( "ComItf type parameter must be angle bracketed" ),
},
_ => panic!( "ComItf type parameter must be a type path" ),
};
parse_quote!( ::intercom::raw::InterfacePtr< #itf_ty > )
}
fn default_value( &self ) -> TokenStream
{
quote!( ::intercom::raw::InterfacePtr::new( ::std::ptr::null_mut() ) )
}
/// Converts a COM parameter named by the ident into a Rust type.
fn com_to_rust(
&self, ident : &Ident
) -> TypeConversion
{
let ts = self.context.type_system.as_typesystem_tokens();
TypeConversion {
temporary: None,
value: quote!( ::intercom::ComItf::wrap( #ident.ptr, #ts ) ),
}
}
/// Converts a Rust parameter named by the ident into a COM type.
fn rust_to_com(
&self, ident : &Ident
) -> TypeConversion
{
let ts = self.context.type_system.as_typesystem_tokens();
TypeConversion {
temporary: None,
value: quote!( ::intercom::ComItf::ptr( &#ident.into(), #ts ) )
}
}
}
/// String parameter handler. Converts between Rust String and COM BSTR types.
struct StringParam { ty: Type, context: TypeContext }
impl TypeHandler for StringParam
{
fn rust_ty( &self ) -> Type { self.ty.clone() }
fn com_ty( &self ) -> Type
{
match self.context.dir {
Direction::In => parse_quote!( ::intercom::raw::InBSTR ),
Direction::Out | Direction::Retval => parse_quote!( ::intercom::raw::OutBSTR ),
}
}
fn com_to_rust( &self, ident : &Ident ) -> TypeConversion
{
match self.context.dir {
Direction::In => {
let target_ty = self.rust_ty();
let intermediate_ty = quote!( &::intercom::BStr );
let to_intermediate = quote!( ::intercom::BStr::from_ptr( #ident ) );
let as_trait = quote!( < #target_ty as ::intercom::FromWithTemporary< #intermediate_ty > > );
let temp_ident = Ident::new( &format!( "__{}_temporary", ident.to_string() ), Span::call_site() );
TypeConversion {
temporary: Some( quote!( let mut #temp_ident = #as_trait::to_temporary( #to_intermediate )?; ) ),
value: quote!( #as_trait::from_temporary( &mut #temp_ident )? ),
}
},
Direction::Out | Direction::Retval => {
TypeConversion {
temporary: None,
value: quote!( ::intercom::BString::from_ptr( #ident ).com_into()? ),
}
},
}
}
fn rust_to_com( &self, ident : &Ident ) -> TypeConversion
{
match self.context.dir {
Direction::In => {
let target_ty = self.rust_ty();
let intermediate_ty = quote!( &::intercom::BStr );
let as_trait = quote!( < #intermediate_ty as ::intercom::FromWithTemporary< #target_ty > > );
let temp_ident = Ident::new( &format!( "__{}_temporary", ident.to_string() ), Span::call_site() );
TypeConversion {
temporary: Some( quote!( let mut #temp_ident = #as_trait::to_temporary( #ident )?; ) ),
value: quote!( #as_trait::from_temporary( &mut #temp_ident )?.as_ptr() ),
}
},
Direction::Out | Direction::Retval => {
TypeConversion {
temporary: None,
value: quote!( ::intercom::BString::from( #ident ).into_ptr() ),
}
},
}
}
fn default_value( &self ) -> TokenStream
{
quote!( ::std::ptr::null_mut() )
}
/// String parameters differ between the type systems.
fn type_system( &self ) -> Option<ModelTypeSystem> {
Some( self.context.type_system )
}
}
/// Resolves the `TypeHandler` to use.
pub fn get_ty_handler(
arg_ty : &Type,
context : TypeContext,
) -> Rc<TypeHandler>
{
let type_info = ::type_parser::parse( arg_ty )
.unwrap_or_else( || panic!( "Type {:?} could not be parsed.", arg_ty ) );
map_by_name(
type_info.get_name().as_ref(), type_info.original.clone(),
context )
}
/// Selects type handler based on the name of the type.
fn map_by_name(
name: &str,
original_type: Type,
context: TypeContext,
) -> Rc<TypeHandler> {
match name {
"ComItf" => Rc::new( ComItfParam { ty: original_type, context } ),
"BString" | "BStr" | "String" | "str" =>
Rc::new( StringParam { ty: original_type, context } ),
// "str" => Rc::new( StringRefParam( original_type ) ),
// Unknown. Use IdentityParam.
_ => Rc::new( IdentityParam( original_type ) )
}
}
Improve comments on ComItfParam com_ty
use ::prelude::*;
use std::rc::Rc;
use syn::*;
use ast_converters::*;
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Direction { In, Out, Retval }
pub struct TypeConversion {
/// Possible temporary values that need to be kept alive for the duration
/// of the conversion result usage.
pub temporary: Option<TokenStream>,
/// Conversion result value. Possibly referencing the temporary value.
pub value : TokenStream,
}
#[derive(PartialEq, Eq, Debug)]
pub struct ModelTypeSystemConfig {
pub effective_system : ModelTypeSystem,
pub is_default : bool,
}
impl ModelTypeSystemConfig {
pub fn get_unique_name( &self, base : &str ) -> String {
match self.is_default {
true => base.to_string(),
false => format!( "{}_{:?}", base, self.effective_system ),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub enum ModelTypeSystem {
/// COM Automation compatible type system.
Automation,
/// Raw type system.
Raw,
}
impl ModelTypeSystem {
/// Converts the model type system into public type system tokens.
pub fn as_typesystem_tokens( self ) -> TokenStream {
match self {
ModelTypeSystem::Automation =>
quote!( ::intercom::TypeSystem::Automation ),
ModelTypeSystem::Raw =>
quote!( ::intercom::TypeSystem::Raw ),
}
}
}
/// Type usage context.
pub struct TypeContext {
dir: Direction,
type_system: ModelTypeSystem,
}
impl TypeContext {
pub fn new( dir : Direction, type_system : ModelTypeSystem ) -> TypeContext {
TypeContext { dir, type_system }
}
}
/// Defines Type-specific logic for handling the various parameter types in the
/// Rust/COM interface.
pub trait TypeHandler {
/// The Rust type.
fn rust_ty( &self ) -> Type;
/// The COM type.
fn com_ty( &self ) -> Type
{
self.rust_ty()
}
/// Converts a COM parameter named by the ident into a Rust type.
fn com_to_rust(
&self, ident : &Ident
) -> TypeConversion
{
TypeConversion {
temporary: None,
value: quote!( #ident.into() ),
}
}
/// Converts a Rust parameter named by the ident into a COM type.
fn rust_to_com(
&self, ident : &Ident
) -> TypeConversion
{
TypeConversion {
temporary: None,
value: quote!( #ident.into() )
}
}
/// Gets the default value for the type.
fn default_value( &self ) -> TokenStream
{
match self.rust_ty() {
Type::Path( ref p ) => {
let ident = p.path.get_ident().unwrap();
let name = ident.to_string();
match name.as_ref() {
"c_void"
| "RawComPtr"
=> quote!( ::std::ptr::null_mut() ),
_ => quote!( Default::default() )
}
},
_ => quote!( Default::default() )
}
}
/// Gets the sype system the handler serves if the handler is type system specific. Returns
/// None if the handler is type system agnostic.
fn type_system( &self ) -> Option<ModelTypeSystem> { None }
}
/// Identity parameter handler.
///
/// No special logic.
struct IdentityParam( Type );
impl TypeHandler for IdentityParam {
fn rust_ty( &self ) -> Type { self.0.clone() }
}
/// `ComItf` parameter handler. Supports `ComItf` Rust type and ensures the this
/// to/from `RawComPtr` COM type.
struct ComItfParam { ty: Type, context: TypeContext }
impl TypeHandler for ComItfParam {
fn rust_ty( &self ) -> Type { self.ty.clone() }
/// The COM type.
fn com_ty( &self ) -> Type
{
let rust_ty = self.rust_ty();
// Extract the interface type T from the ComItf<T> type definition
use syn;
let itf_ty = match rust_ty {
syn::Type::Path( path ) =>
match path.path.segments.last().unwrap().value().arguments {
syn::PathArguments::AngleBracketed( ref ab ) =>
match ab.args.last().unwrap().value() {
syn::GenericArgument::Type( ref t ) => t.clone(),
_ => panic!( "ComItf generic argument must be type" ),
},
_ => panic!( "ComItf type parameter must be angle bracketed" ),
},
_ => panic!( "ComItf type parameter must be a type path" ),
};
// Construct the final InterfacePtr<T> type.
parse_quote!( ::intercom::raw::InterfacePtr< #itf_ty > )
}
fn default_value( &self ) -> TokenStream
{
quote!( ::intercom::raw::InterfacePtr::new( ::std::ptr::null_mut() ) )
}
/// Converts a COM parameter named by the ident into a Rust type.
fn com_to_rust(
&self, ident : &Ident
) -> TypeConversion
{
let ts = self.context.type_system.as_typesystem_tokens();
TypeConversion {
temporary: None,
value: quote!( ::intercom::ComItf::wrap( #ident.ptr, #ts ) ),
}
}
/// Converts a Rust parameter named by the ident into a COM type.
fn rust_to_com(
&self, ident : &Ident
) -> TypeConversion
{
let ts = self.context.type_system.as_typesystem_tokens();
TypeConversion {
temporary: None,
value: quote!( ::intercom::ComItf::ptr( &#ident.into(), #ts ) )
}
}
}
/// String parameter handler. Converts between Rust String and COM BSTR types.
struct StringParam { ty: Type, context: TypeContext }
impl TypeHandler for StringParam
{
fn rust_ty( &self ) -> Type { self.ty.clone() }
fn com_ty( &self ) -> Type
{
match self.context.dir {
Direction::In => parse_quote!( ::intercom::raw::InBSTR ),
Direction::Out | Direction::Retval => parse_quote!( ::intercom::raw::OutBSTR ),
}
}
fn com_to_rust( &self, ident : &Ident ) -> TypeConversion
{
match self.context.dir {
Direction::In => {
let target_ty = self.rust_ty();
let intermediate_ty = quote!( &::intercom::BStr );
let to_intermediate = quote!( ::intercom::BStr::from_ptr( #ident ) );
let as_trait = quote!( < #target_ty as ::intercom::FromWithTemporary< #intermediate_ty > > );
let temp_ident = Ident::new( &format!( "__{}_temporary", ident.to_string() ), Span::call_site() );
TypeConversion {
temporary: Some( quote!( let mut #temp_ident = #as_trait::to_temporary( #to_intermediate )?; ) ),
value: quote!( #as_trait::from_temporary( &mut #temp_ident )? ),
}
},
Direction::Out | Direction::Retval => {
TypeConversion {
temporary: None,
value: quote!( ::intercom::BString::from_ptr( #ident ).com_into()? ),
}
},
}
}
fn rust_to_com( &self, ident : &Ident ) -> TypeConversion
{
match self.context.dir {
Direction::In => {
let target_ty = self.rust_ty();
let intermediate_ty = quote!( &::intercom::BStr );
let as_trait = quote!( < #intermediate_ty as ::intercom::FromWithTemporary< #target_ty > > );
let temp_ident = Ident::new( &format!( "__{}_temporary", ident.to_string() ), Span::call_site() );
TypeConversion {
temporary: Some( quote!( let mut #temp_ident = #as_trait::to_temporary( #ident )?; ) ),
value: quote!( #as_trait::from_temporary( &mut #temp_ident )?.as_ptr() ),
}
},
Direction::Out | Direction::Retval => {
TypeConversion {
temporary: None,
value: quote!( ::intercom::BString::from( #ident ).into_ptr() ),
}
},
}
}
fn default_value( &self ) -> TokenStream
{
quote!( ::std::ptr::null_mut() )
}
/// String parameters differ between the type systems.
fn type_system( &self ) -> Option<ModelTypeSystem> {
Some( self.context.type_system )
}
}
/// Resolves the `TypeHandler` to use.
pub fn get_ty_handler(
arg_ty : &Type,
context : TypeContext,
) -> Rc<TypeHandler>
{
let type_info = ::type_parser::parse( arg_ty )
.unwrap_or_else( || panic!( "Type {:?} could not be parsed.", arg_ty ) );
map_by_name(
type_info.get_name().as_ref(), type_info.original.clone(),
context )
}
/// Selects type handler based on the name of the type.
fn map_by_name(
name: &str,
original_type: Type,
context: TypeContext,
) -> Rc<TypeHandler> {
match name {
"ComItf" => Rc::new( ComItfParam { ty: original_type, context } ),
"BString" | "BStr" | "String" | "str" =>
Rc::new( StringParam { ty: original_type, context } ),
// "str" => Rc::new( StringRefParam( original_type ) ),
// Unknown. Use IdentityParam.
_ => Rc::new( IdentityParam( original_type ) )
}
}
|
extern crate bio;
extern crate gte;
use gte::{GffType, GffReader,
ExonFeatureKind as EFK, Strand};
use Strand::*;
static SINGLE_GENE_GTF: &'static str = include_str!("data/single_gene.gtf");
#[test]
fn gtf_reader_multiple_transcripts() {
let mut reader = GffReader::from_reader(SINGLE_GENE_GTF.as_bytes(), GffType::GTF2);
let mut transcripts = reader.transcripts(None, None, None, None, false).expect("transcripts");
let trx1 = transcripts.next().expect("a transcript result").expect("a transcript");
assert_eq!(trx1.seq_name(), "chr2");
assert_eq!(trx1.start(), 176188578);
assert_eq!(trx1.end(), 176190907);
assert_eq!(trx1.strand(), &Forward);
assert_eq!(trx1.exons().len(), 2);
let trx1_exon1 = [
((176188578, 176188801), EFK::UTR5),
((176188801, 176188804), EFK::StartCodon { frame: Some(0) }),
((176188801, 176189453), EFK::CDS { frame: Some(0) })];
for (eidx, feat) in trx1.exons()[0].features().iter().enumerate() {
assert_eq!(feat.start(), (trx1_exon1[eidx].0).0);
assert_eq!(feat.end(), (trx1_exon1[eidx].0).1);
assert_eq!(feat.kind(), &trx1_exon1[eidx].1)
}
let trx1_exon2 = [
((176189807, 176190139), EFK::CDS { frame: Some(2) }),
((176190139, 176190142), EFK::StopCodon { frame: Some(0) }),
((176190142, 176190907), EFK::UTR3)];
for (eidx, feat) in trx1.exons()[1].features().iter().enumerate() {
assert_eq!(feat.start(), (trx1_exon2[eidx].0).0);
assert_eq!(feat.end(), (trx1_exon2[eidx].0).1);
assert_eq!(feat.kind(), &trx1_exon2[eidx].1)
}
let trx2 = transcripts.next().expect("a transcript result").expect("a transcript");
assert_eq!(trx2.seq_name(), "chr2");
assert_eq!(trx2.start(), 176188842);
assert_eq!(trx2.end(), 176188901);
assert_eq!(trx2.strand(), &Forward);
assert_eq!(trx2.exons().len(), 1);
assert_eq!(trx2.exons()[0].features().len(), 0);
assert!(transcripts.next().is_none());
}
Fix .transcripts() call in test
extern crate bio;
extern crate gte;
use gte::{GffType, GffReader,
ExonFeatureKind as EFK, Strand};
use Strand::*;
static SINGLE_GENE_GTF: &'static str = include_str!("data/single_gene.gtf");
#[test]
fn gtf_reader_multiple_transcripts() {
let mut reader = GffReader::from_reader(SINGLE_GENE_GTF.as_bytes(), GffType::GTF2);
let mut transcripts = reader.transcripts().expect("transcripts");
let trx1 = transcripts.next().expect("a transcript result").expect("a transcript");
assert_eq!(trx1.seq_name(), "chr2");
assert_eq!(trx1.start(), 176188578);
assert_eq!(trx1.end(), 176190907);
assert_eq!(trx1.strand(), &Forward);
assert_eq!(trx1.exons().len(), 2);
let trx1_exon1 = [
((176188578, 176188801), EFK::UTR5),
((176188801, 176188804), EFK::StartCodon { frame: Some(0) }),
((176188801, 176189453), EFK::CDS { frame: Some(0) })];
for (eidx, feat) in trx1.exons()[0].features().iter().enumerate() {
assert_eq!(feat.start(), (trx1_exon1[eidx].0).0);
assert_eq!(feat.end(), (trx1_exon1[eidx].0).1);
assert_eq!(feat.kind(), &trx1_exon1[eidx].1)
}
let trx1_exon2 = [
((176189807, 176190139), EFK::CDS { frame: Some(2) }),
((176190139, 176190142), EFK::StopCodon { frame: Some(0) }),
((176190142, 176190907), EFK::UTR3)];
for (eidx, feat) in trx1.exons()[1].features().iter().enumerate() {
assert_eq!(feat.start(), (trx1_exon2[eidx].0).0);
assert_eq!(feat.end(), (trx1_exon2[eidx].0).1);
assert_eq!(feat.kind(), &trx1_exon2[eidx].1)
}
let trx2 = transcripts.next().expect("a transcript result").expect("a transcript");
assert_eq!(trx2.seq_name(), "chr2");
assert_eq!(trx2.start(), 176188842);
assert_eq!(trx2.end(), 176188901);
assert_eq!(trx2.strand(), &Forward);
assert_eq!(trx2.exons().len(), 1);
assert_eq!(trx2.exons()[0].features().len(), 0);
assert!(transcripts.next().is_none());
}
|
// Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use crate::{
accept_type::AcceptType,
accounts::Account,
bcs_payload::Bcs,
context::Context,
failpoint::fail_point_poem,
generate_error_response, generate_success_response,
page::Page,
response::{
api_disabled, transaction_not_found_by_hash, transaction_not_found_by_version,
BadRequestError, BasicError, BasicErrorWith404, BasicResponse, BasicResponseStatus,
BasicResult, BasicResultWith404, InsufficientStorageError, InternalError,
},
ApiTags,
};
use anyhow::{anyhow, Context as AnyhowContext};
use aptos_api_types::{
verify_function_identifier, verify_module_identifier, Address, AptosError, AptosErrorCode,
AsConverter, EncodeSubmissionRequest, GasEstimation, HashValue, HexEncodedBytes, LedgerInfo,
MoveType, PendingTransaction, SubmitTransactionRequest, Transaction, TransactionData,
TransactionOnChainData, TransactionsBatchSingleSubmissionFailure,
TransactionsBatchSubmissionResult, UserTransaction, VerifyInput, VerifyInputWithRecursion,
MAX_RECURSIVE_TYPES_ALLOWED, U64,
};
use aptos_crypto::{hash::CryptoHash, signing_message};
use aptos_types::{
account_config::CoinStoreResource,
account_view::AccountView,
mempool_status::MempoolStatusCode,
transaction::{
ExecutionStatus, RawTransaction, RawTransactionWithData, SignedTransaction,
TransactionPayload, TransactionStatus,
},
vm_status::StatusCode,
};
use aptos_vm::AptosVM;
use poem_openapi::{
param::{Path, Query},
payload::Json,
ApiRequest, OpenApi,
};
use std::sync::Arc;
generate_success_response!(SubmitTransactionResponse, (202, Accepted));
generate_error_response!(
SubmitTransactionError,
(400, BadRequest),
(403, Forbidden),
(413, PayloadTooLarge),
(500, Internal),
(503, ServiceUnavailable),
(507, InsufficientStorage)
);
type SubmitTransactionResult<T> =
poem::Result<SubmitTransactionResponse<T>, SubmitTransactionError>;
generate_success_response!(
SubmitTransactionsBatchResponse,
(202, Accepted),
(206, AcceptedPartial)
);
type SubmitTransactionsBatchResult<T> =
poem::Result<SubmitTransactionsBatchResponse<T>, SubmitTransactionError>;
type SimulateTransactionResult<T> = poem::Result<BasicResponse<T>, SubmitTransactionError>;
// TODO: Consider making both content types accept either
// SubmitTransactionRequest or SignedTransaction, the way
// it is now is quite confusing.
// We need a custom type here because we use different types for each of the
// content types possible for the POST data.
#[derive(ApiRequest, Debug)]
pub enum SubmitTransactionPost {
#[oai(content_type = "application/json")]
Json(Json<SubmitTransactionRequest>),
// TODO: Since I don't want to impl all the Poem derives on SignedTransaction,
// find a way to at least indicate in the spec that it expects a SignedTransaction.
// TODO: https://github.com/aptos-labs/aptos-core/issues/2275
#[oai(content_type = "application/x.aptos.signed_transaction+bcs")]
Bcs(Bcs),
}
impl VerifyInput for SubmitTransactionPost {
fn verify(&self) -> anyhow::Result<()> {
match self {
SubmitTransactionPost::Json(inner) => inner.0.verify(),
SubmitTransactionPost::Bcs(_) => Ok(()),
}
}
}
// We need a custom type here because we use different types for each of the
// content types possible for the POST data.
#[derive(ApiRequest, Debug)]
pub enum SubmitTransactionsBatchPost {
#[oai(content_type = "application/json")]
Json(Json<Vec<SubmitTransactionRequest>>),
// TODO: Since I don't want to impl all the Poem derives on SignedTransaction,
// find a way to at least indicate in the spec that it expects a SignedTransaction.
// TODO: https://github.com/aptos-labs/aptos-core/issues/2275
#[oai(content_type = "application/x.aptos.signed_transaction+bcs")]
Bcs(Bcs),
}
impl VerifyInput for SubmitTransactionsBatchPost {
fn verify(&self) -> anyhow::Result<()> {
match self {
SubmitTransactionsBatchPost::Json(inner) => {
for request in inner.0.iter() {
request.verify()?;
}
}
SubmitTransactionsBatchPost::Bcs(_) => {}
}
Ok(())
}
}
/// API for interacting with transactions
pub struct TransactionsApi {
pub context: Arc<Context>,
}
#[OpenApi]
impl TransactionsApi {
/// Get transactions
///
/// Retrieve on-chain committed transactions. The page size and start can be provided to
/// get a specific sequence of transactions.
///
/// If the version has been pruned, then a 410 will be returned
#[oai(
path = "/transactions",
method = "get",
operation_id = "get_transactions",
tag = "ApiTags::Transactions"
)]
async fn get_transactions(
&self,
accept_type: AcceptType,
/// Ledger version to start list of transactions
///
/// If not provided, defaults to showing the latest transactions
start: Query<Option<U64>>,
/// Max number of transactions to retrieve.
///
/// If not provided, defaults to default page size
limit: Query<Option<u16>>,
) -> BasicResultWith404<Vec<Transaction>> {
fail_point_poem("endpoint_get_transactions")?;
self.context
.check_api_output_enabled("Get transactions", &accept_type)?;
let page = Page::new(
start.0.map(|v| v.0),
limit.0,
self.context.max_transactions_page_size(),
);
self.list(&accept_type, page)
}
/// Get transaction by hash
///
/// Look up a transaction by its hash. This is the same hash that is returned
/// by the API when submitting a transaction (see PendingTransaction).
///
/// When given a transaction hash, the server first looks for the transaction
/// in storage (on-chain, committed). If no on-chain transaction is found, it
/// looks the transaction up by hash in the mempool (pending, not yet committed).
///
/// To create a transaction hash by yourself, do the following:
/// 1. Hash message bytes: "RawTransaction" bytes + BCS bytes of [Transaction](https://aptos-labs.github.io/aptos-core/aptos_types/transaction/enum.Transaction.html).
/// 2. Apply hash algorithm `SHA3-256` to the hash message bytes.
/// 3. Hex-encode the hash bytes with `0x` prefix.
// TODO: Include a link to an example of how to do this ^
#[oai(
path = "/transactions/by_hash/:txn_hash",
method = "get",
operation_id = "get_transaction_by_hash",
tag = "ApiTags::Transactions"
)]
async fn get_transaction_by_hash(
&self,
accept_type: AcceptType,
/// Hash of transaction to retrieve
txn_hash: Path<HashValue>,
// TODO: Use a new request type that can't return 507.
) -> BasicResultWith404<Transaction> {
fail_point_poem("endpoint_transaction_by_hash")?;
self.context
.check_api_output_enabled("Get transactions by hash", &accept_type)?;
self.get_transaction_by_hash_inner(&accept_type, txn_hash.0)
.await
}
/// Get transaction by version
///
/// Retrieves a transaction by a given version. If the version has been pruned, a 410 will
/// be returned.
#[oai(
path = "/transactions/by_version/:txn_version",
method = "get",
operation_id = "get_transaction_by_version",
tag = "ApiTags::Transactions"
)]
async fn get_transaction_by_version(
&self,
accept_type: AcceptType,
/// Version of transaction to retrieve
txn_version: Path<U64>,
) -> BasicResultWith404<Transaction> {
fail_point_poem("endpoint_transaction_by_version")?;
self.context
.check_api_output_enabled("Get transactions by version", &accept_type)?;
self.get_transaction_by_version_inner(&accept_type, txn_version.0)
.await
}
/// Get account transactions
///
/// Retrieves transactions from an account. If the start version is too far in the past
/// a 410 will be returned.
///
/// If no start version is given, it will start at 0
#[oai(
path = "/accounts/:address/transactions",
method = "get",
operation_id = "get_account_transactions",
tag = "ApiTags::Transactions"
)]
async fn get_accounts_transactions(
&self,
accept_type: AcceptType,
/// Address of account with or without a `0x` prefix
address: Path<Address>,
/// Ledger version to start list of transactions
///
/// If not provided, defaults to showing the latest transactions
start: Query<Option<U64>>,
/// Max number of transactions to retrieve.
///
/// If not provided, defaults to default page size
limit: Query<Option<u16>>,
) -> BasicResultWith404<Vec<Transaction>> {
fail_point_poem("endpoint_get_accounts_transactions")?;
self.context
.check_api_output_enabled("Get account transactions", &accept_type)?;
let page = Page::new(
start.0.map(|v| v.0),
limit.0,
self.context.max_transactions_page_size(),
);
self.list_by_account(&accept_type, page, address.0)
}
/// Submit transaction
///
/// This endpoint accepts transaction submissions in two formats.
///
/// To submit a transaction as JSON, you must submit a SubmitTransactionRequest.
/// To build this request, do the following:
///
/// 1. Encode the transaction as BCS. If you are using a language that has
/// native BCS support, make sure of that library. If not, you may take
/// advantage of /transactions/encode_submission. When using this
/// endpoint, make sure you trust the node you're talking to, as it is
/// possible they could manipulate your request.
/// 2. Sign the encoded transaction and use it to create a TransactionSignature.
/// 3. Submit the request. Make sure to use the "application/json" Content-Type.
///
/// To submit a transaction as BCS, you must submit a SignedTransaction
/// encoded as BCS. See SignedTransaction in types/src/transaction/mod.rs.
/// Make sure to use the `application/x.aptos.signed_transaction+bcs` Content-Type.
// TODO: Point to examples of both of these flows, in multiple languages.
#[oai(
path = "/transactions",
method = "post",
operation_id = "submit_transaction",
tag = "ApiTags::Transactions"
)]
async fn submit_transaction(
&self,
accept_type: AcceptType,
data: SubmitTransactionPost,
) -> SubmitTransactionResult<PendingTransaction> {
data.verify()
.context("Submitted transaction invalid'")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code_no_info(
err,
AptosErrorCode::InvalidInput,
)
})?;
fail_point_poem("endpoint_submit_transaction")?;
self.context
.check_api_output_enabled("Submit transaction", &accept_type)?;
if !self.context.node_config.api.transaction_submission_enabled {
return Err(api_disabled("Submit transaction"));
}
let ledger_info = self.context.get_latest_ledger_info()?;
let signed_transaction = self.get_signed_transaction(&ledger_info, data)?;
self.create(&accept_type, &ledger_info, signed_transaction)
.await
}
/// Submit batch transactions
///
/// This allows you to submit multiple transactions. The response has three outcomes:
///
/// 1. All transactions succeed, and it will return a 202
/// 2. Some transactions succeed, and it will return the failed transactions and a 206
/// 3. No transactions succeed, and it will also return the failed transactions and a 206
///
/// To submit a transaction as JSON, you must submit a SubmitTransactionRequest.
/// To build this request, do the following:
///
/// 1. Encode the transaction as BCS. If you are using a language that has
/// native BCS support, make sure to use that library. If not, you may take
/// advantage of /transactions/encode_submission. When using this
/// endpoint, make sure you trust the node you're talking to, as it is
/// possible they could manipulate your request.
/// 2. Sign the encoded transaction and use it to create a TransactionSignature.
/// 3. Submit the request. Make sure to use the "application/json" Content-Type.
///
/// To submit a transaction as BCS, you must submit a SignedTransaction
/// encoded as BCS. See SignedTransaction in types/src/transaction/mod.rs.
/// Make sure to use the `application/x.aptos.signed_transaction+bcs` Content-Type.
#[oai(
path = "/transactions/batch",
method = "post",
operation_id = "submit_batch_transactions",
tag = "ApiTags::Transactions"
)]
async fn submit_transactions_batch(
&self,
accept_type: AcceptType,
data: SubmitTransactionsBatchPost,
) -> SubmitTransactionsBatchResult<TransactionsBatchSubmissionResult> {
data.verify()
.context("Submitted transactions invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code_no_info(
err,
AptosErrorCode::InvalidInput,
)
})?;
fail_point_poem("endpoint_submit_batch_transactions")?;
self.context
.check_api_output_enabled("Submit batch transactions", &accept_type)?;
if !self.context.node_config.api.transaction_submission_enabled {
return Err(api_disabled("Submit batch transaction"));
}
let ledger_info = self.context.get_latest_ledger_info()?;
let signed_transactions_batch = self.get_signed_transactions_batch(&ledger_info, data)?;
if self.context.max_submit_transaction_batch_size() < signed_transactions_batch.len() {
return Err(SubmitTransactionError::bad_request_with_code(
&format!(
"Submitted too many transactions: {}, while limit is {}",
signed_transactions_batch.len(),
self.context.max_submit_transaction_batch_size(),
),
AptosErrorCode::InvalidInput,
&ledger_info,
));
}
self.create_batch(&accept_type, &ledger_info, signed_transactions_batch)
.await
}
/// Simulate transaction
///
/// The output of the transaction will have the exact transaction outputs and events that running
/// an actual signed transaction would have. However, it will not have the associated state
/// hashes, as they are not updated in storage. This can be used to estimate the maximum gas
/// units for a submitted transaction.
///
/// To use this, you must:
/// - Create a SignedTransaction with a zero-padded signature.
/// - Submit a SubmitTransactionRequest containing a UserTransactionRequest containing that signature.
///
/// To use this endpoint with BCS, you must submit a SignedTransaction
/// encoded as BCS. See SignedTransaction in types/src/transaction/mod.rs.
#[oai(
path = "/transactions/simulate",
method = "post",
operation_id = "simulate_transaction",
tag = "ApiTags::Transactions"
)]
async fn simulate_transaction(
&self,
accept_type: AcceptType,
/// If set to true, the max gas value in the transaction will be ignored
/// and the maximum possible gas will be used
estimate_max_gas_amount: Query<Option<bool>>,
/// If set to true, the gas unit price in the transaction will be ignored
/// and the estimated value will be used
estimate_gas_unit_price: Query<Option<bool>>,
data: SubmitTransactionPost,
) -> SimulateTransactionResult<Vec<UserTransaction>> {
data.verify()
.context("Simulated transaction invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code_no_info(
err,
AptosErrorCode::InvalidInput,
)
})?;
fail_point_poem("endpoint_simulate_transaction")?;
self.context
.check_api_output_enabled("Simulate transaction", &accept_type)?;
if !self.context.node_config.api.transaction_simulation_enabled {
return Err(api_disabled("Simulate transaction"));
}
let ledger_info = self.context.get_latest_ledger_info()?;
let mut signed_transaction = self.get_signed_transaction(&ledger_info, data)?;
let estimated_gas_unit_price = if estimate_gas_unit_price.0.unwrap_or_default() {
Some(self.context.estimate_gas_price(&ledger_info)?)
} else {
None
};
// If estimate max gas amount is provided, we will just make it the maximum value
let estimated_max_gas_amount = if estimate_max_gas_amount.0.unwrap_or_default() {
// Retrieve max possible gas units
let gas_params = self.context.get_gas_schedule(&ledger_info)?;
let max_number_of_gas_units = u64::from(gas_params.txn.maximum_number_of_gas_units);
// Retrieve account balance to determine max gas available
let account_state = self
.context
.get_account_state(
signed_transaction.sender(),
ledger_info.version(),
&ledger_info,
)?
.ok_or_else(|| {
SubmitTransactionError::bad_request_with_code(
"Account not found",
AptosErrorCode::InvalidInput,
&ledger_info,
)
})?;
let coin_store: CoinStoreResource = account_state
.get_coin_store_resource()
.and_then(|inner| {
inner.ok_or_else(|| {
anyhow!(
"No coin store found for account {}",
signed_transaction.sender()
)
})
})
.map_err(|err| {
SubmitTransactionError::internal_with_code(
format!("Failed to get coin store resource {}", err),
AptosErrorCode::InternalError,
&ledger_info,
)
})?;
let gas_unit_price =
estimated_gas_unit_price.unwrap_or_else(|| signed_transaction.gas_unit_price());
// With 0 gas price, we set it to max gas units, since we can't divide by 0
let max_account_gas_units = if gas_unit_price == 0 {
coin_store.coin()
} else {
coin_store.coin() / gas_unit_price
};
// Minimum of the max account and the max total needs to be used for estimation
Some(std::cmp::min(
max_account_gas_units,
max_number_of_gas_units,
))
} else {
None
};
// If there is an estimation of either, replace the values
if estimated_max_gas_amount.is_some() || estimated_gas_unit_price.is_some() {
signed_transaction = override_gas_parameters(
&signed_transaction,
estimated_max_gas_amount,
estimated_gas_unit_price,
);
}
self.simulate(&accept_type, ledger_info, signed_transaction)
.await
}
/// Encode submission
///
/// This endpoint accepts an EncodeSubmissionRequest, which internally is a
/// UserTransactionRequestInner (and optionally secondary signers) encoded
/// as JSON, validates the request format, and then returns that request
/// encoded in BCS. The client can then use this to create a transaction
/// signature to be used in a SubmitTransactionRequest, which it then
/// passes to the /transactions POST endpoint.
///
/// To be clear, this endpoint makes it possible to submit transaction
/// requests to the API from languages that do not have library support for
/// BCS. If you are using an SDK that has BCS support, such as the official
/// Rust, TypeScript, or Python SDKs, you do not need to use this endpoint.
///
/// To sign a message using the response from this endpoint:
/// - Decode the hex encoded string in the response to bytes.
/// - Sign the bytes to create the signature.
/// - Use that as the signature field in something like Ed25519Signature, which you then use to build a TransactionSignature.
//
// TODO: Link an example of how to do this. Use externalDoc.
#[oai(
path = "/transactions/encode_submission",
method = "post",
operation_id = "encode_submission",
tag = "ApiTags::Transactions"
)]
async fn encode_submission(
&self,
accept_type: AcceptType,
data: Json<EncodeSubmissionRequest>,
// TODO: Use a new request type that can't return 507 but still returns all the other necessary errors.
) -> BasicResult<HexEncodedBytes> {
data.0
.verify()
.context("'UserTransactionRequest' invalid")
.map_err(|err| {
BasicError::bad_request_with_code_no_info(err, AptosErrorCode::InvalidInput)
})?;
fail_point_poem("endpoint_encode_submission")?;
self.context
.check_api_output_enabled("Encode submission", &accept_type)?;
if !self.context.node_config.api.encode_submission_enabled {
return Err(api_disabled("Encode submission"));
}
self.get_signing_message(&accept_type, data.0)
}
/// Estimate gas price
///
/// Currently, the gas estimation is handled by taking the median of the last 100,000 transactions
/// If a user wants to prioritize their transaction and is willing to pay, they can pay more
/// than the gas price. If they're willing to wait longer, they can pay less. Note that the
/// gas price moves with the fee market, and should only increase when demand outweighs supply.
///
/// If there have been no transactions in the last 100,000 transactions, the price will be 1.
#[oai(
path = "/estimate_gas_price",
method = "get",
operation_id = "estimate_gas_price",
tag = "ApiTags::Transactions"
)]
async fn estimate_gas_price(&self, accept_type: AcceptType) -> BasicResult<GasEstimation> {
fail_point_poem("endpoint_encode_submission")?;
self.context
.check_api_output_enabled("Estimate gas price", &accept_type)?;
let latest_ledger_info = self.context.get_latest_ledger_info()?;
let estimated_gas_price = self.context.estimate_gas_price(&latest_ledger_info)?;
// TODO: Do we want to give more than just a single gas price? Percentiles?
let gas_estimation = GasEstimation {
gas_estimate: estimated_gas_price,
};
match accept_type {
AcceptType::Json => BasicResponse::try_from_json((
gas_estimation,
&latest_ledger_info,
BasicResponseStatus::Ok,
)),
AcceptType::Bcs => BasicResponse::try_from_bcs((
gas_estimation,
&latest_ledger_info,
BasicResponseStatus::Ok,
)),
}
}
}
impl TransactionsApi {
/// List all transactions paging by ledger version
fn list(&self, accept_type: &AcceptType, page: Page) -> BasicResultWith404<Vec<Transaction>> {
let latest_ledger_info = self.context.get_latest_ledger_info()?;
let ledger_version = latest_ledger_info.version();
let limit = page.limit(&latest_ledger_info)?;
let start_version = page.compute_start(limit, ledger_version, &latest_ledger_info)?;
let data = self
.context
.get_transactions(start_version, limit, ledger_version)
.context("Failed to read raw transactions from storage")
.map_err(|err| {
BasicErrorWith404::internal_with_code(
err,
AptosErrorCode::InternalError,
&latest_ledger_info,
)
})?;
match accept_type {
AcceptType::Json => {
let timestamp = self
.context
.get_block_timestamp(&latest_ledger_info, start_version)?;
BasicResponse::try_from_json((
self.context.render_transactions_sequential(
&latest_ledger_info,
data,
timestamp,
)?,
&latest_ledger_info,
BasicResponseStatus::Ok,
))
}
AcceptType::Bcs => {
BasicResponse::try_from_bcs((data, &latest_ledger_info, BasicResponseStatus::Ok))
}
}
}
async fn get_transaction_by_hash_inner(
&self,
accept_type: &AcceptType,
hash: HashValue,
) -> BasicResultWith404<Transaction> {
let ledger_info = self.context.get_latest_ledger_info()?;
let txn_data = self
.get_by_hash(hash.into(), &ledger_info)
.await
.context(format!("Failed to get transaction by hash {}", hash))
.map_err(|err| {
BasicErrorWith404::internal_with_code(
err,
AptosErrorCode::InternalError,
&ledger_info,
)
})?
.context(format!("Failed to find transaction with hash: {}", hash))
.map_err(|_| transaction_not_found_by_hash(hash, &ledger_info))?;
self.get_transaction_inner(accept_type, txn_data, &ledger_info)
.await
}
async fn get_transaction_by_version_inner(
&self,
accept_type: &AcceptType,
version: U64,
) -> BasicResultWith404<Transaction> {
let ledger_info = self.context.get_latest_ledger_info()?;
let txn_data = self
.get_by_version(version.0, &ledger_info)
.context(format!("Failed to get transaction by version {}", version))
.map_err(|err| {
BasicErrorWith404::internal_with_code(
err,
AptosErrorCode::InternalError,
&ledger_info,
)
})?
.context(format!(
"Failed to find transaction at version: {}",
version
))
.map_err(|_| transaction_not_found_by_version(version.0, &ledger_info))?;
self.get_transaction_inner(accept_type, txn_data, &ledger_info)
.await
}
/// Converts a transaction into the outgoing type
async fn get_transaction_inner(
&self,
accept_type: &AcceptType,
transaction_data: TransactionData,
ledger_info: &LedgerInfo,
) -> BasicResultWith404<Transaction> {
match accept_type {
AcceptType::Json => {
let resolver = self.context.move_resolver_poem(ledger_info)?;
let transaction = match transaction_data {
TransactionData::OnChain(txn) => {
let timestamp =
self.context.get_block_timestamp(ledger_info, txn.version)?;
resolver
.as_converter(self.context.db.clone())
.try_into_onchain_transaction(timestamp, txn)
.context("Failed to convert on chain transaction to Transaction")
.map_err(|err| {
BasicErrorWith404::internal_with_code(
err,
AptosErrorCode::InternalError,
ledger_info,
)
})?
}
TransactionData::Pending(txn) => resolver
.as_converter(self.context.db.clone())
.try_into_pending_transaction(*txn)
.context("Failed to convert on pending transaction to Transaction")
.map_err(|err| {
BasicErrorWith404::internal_with_code(
err,
AptosErrorCode::InternalError,
ledger_info,
)
})?,
};
BasicResponse::try_from_json((transaction, ledger_info, BasicResponseStatus::Ok))
}
AcceptType::Bcs => BasicResponse::try_from_bcs((
transaction_data,
ledger_info,
BasicResponseStatus::Ok,
)),
}
}
/// Retrieves a transaction by ledger version
fn get_by_version(
&self,
version: u64,
ledger_info: &LedgerInfo,
) -> anyhow::Result<Option<TransactionData>> {
if version > ledger_info.version() {
return Ok(None);
}
Ok(Some(
self.context
.get_transaction_by_version(version, ledger_info.version())?
.into(),
))
}
/// Retrieves a transaction by hash. First the node tries to find the transaction
/// in the DB. If the transaction is found there, it means the transaction is
/// committed. If it is not found there, it looks in mempool. If it is found there,
/// it means the transaction is still pending.
async fn get_by_hash(
&self,
hash: aptos_crypto::HashValue,
ledger_info: &LedgerInfo,
) -> anyhow::Result<Option<TransactionData>> {
let from_db = self
.context
.get_transaction_by_hash(hash, ledger_info.version())?;
Ok(match from_db {
None => self
.context
.get_pending_transaction_by_hash(hash)
.await?
.map(|t| t.into()),
_ => from_db.map(|t| t.into()),
})
}
/// List all transactions for an account
fn list_by_account(
&self,
accept_type: &AcceptType,
page: Page,
address: Address,
) -> BasicResultWith404<Vec<Transaction>> {
// Verify the account exists
let account = Account::new(self.context.clone(), address, None)?;
account.account_state()?;
let latest_ledger_info = account.latest_ledger_info;
// TODO: Return more specific errors from within this function.
let data = self.context.get_account_transactions(
address.into(),
page.start_option(),
page.limit(&latest_ledger_info)?,
latest_ledger_info.version(),
&latest_ledger_info,
)?;
match accept_type {
AcceptType::Json => BasicResponse::try_from_json((
self.context
.render_transactions_non_sequential(&latest_ledger_info, data)?,
&latest_ledger_info,
BasicResponseStatus::Ok,
)),
AcceptType::Bcs => {
BasicResponse::try_from_bcs((data, &latest_ledger_info, BasicResponseStatus::Ok))
}
}
}
/// Parses a single signed transaction
fn get_signed_transaction(
&self,
ledger_info: &LedgerInfo,
data: SubmitTransactionPost,
) -> Result<SignedTransaction, SubmitTransactionError> {
match data {
SubmitTransactionPost::Bcs(data) => {
let signed_transaction: SignedTransaction =
bcs::from_bytes_with_limit(&data.0, MAX_RECURSIVE_TYPES_ALLOWED as usize)
.context("Failed to deserialize input into SignedTransaction")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
// Verify the signed transaction
match signed_transaction.payload() {
TransactionPayload::EntryFunction(entry_function) => {
verify_module_identifier(entry_function.module().name().as_str())
.context("Transaction entry function module invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
verify_function_identifier(entry_function.function().as_str())
.context("Transaction entry function name invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
for arg in entry_function.ty_args() {
let arg: MoveType = arg.into();
arg.verify(0)
.context("Transaction entry function type arg invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
}
}
TransactionPayload::Script(script) => {
if script.code().is_empty() {
return Err(SubmitTransactionError::bad_request_with_code(
"Script payload bytecode must not be empty",
AptosErrorCode::InvalidInput,
ledger_info,
));
}
for arg in script.ty_args() {
let arg = MoveType::from(arg);
arg.verify(0)
.context("Transaction script function type arg invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
}
}
TransactionPayload::ModuleBundle(_) => {}
}
// TODO: Verify script args?
Ok(signed_transaction)
}
SubmitTransactionPost::Json(data) => self
.context
.move_resolver_poem(ledger_info)?
.as_converter(self.context.db.clone())
.try_into_signed_transaction_poem(data.0, self.context.chain_id())
.context("Failed to create SignedTransaction from SubmitTransactionRequest")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
}),
}
}
/// Parses a batch of signed transactions
fn get_signed_transactions_batch(
&self,
ledger_info: &LedgerInfo,
data: SubmitTransactionsBatchPost,
) -> Result<Vec<SignedTransaction>, SubmitTransactionError> {
match data {
SubmitTransactionsBatchPost::Bcs(data) => {
let signed_transactions = bcs::from_bytes(&data.0)
.context("Failed to deserialize input into SignedTransaction")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
Ok(signed_transactions)
}
SubmitTransactionsBatchPost::Json(data) => data
.0
.into_iter()
.enumerate()
.map(|(index, txn)| {
self.context
.move_resolver_poem(ledger_info)?
.as_converter(self.context.db.clone())
.try_into_signed_transaction_poem(txn, self.context.chain_id())
.context(format!("Failed to create SignedTransaction from SubmitTransactionRequest at position {}", index))
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})
})
.collect(),
}
}
/// Submits a single transaction, and converts mempool codes to errors
async fn create_internal(&self, txn: SignedTransaction) -> Result<(), AptosError> {
let (mempool_status, vm_status_opt) = self
.context
.submit_transaction(txn)
.await
.context("Mempool failed to initially evaluate submitted transaction")
.map_err(|err| {
aptos_api_types::AptosError::new_with_error_code(err, AptosErrorCode::InternalError)
})?;
match mempool_status.code {
MempoolStatusCode::Accepted => Ok(()),
MempoolStatusCode::MempoolIsFull | MempoolStatusCode::TooManyTransactions => {
Err(AptosError::new_with_error_code(
&mempool_status.message,
AptosErrorCode::MempoolIsFull,
))
}
MempoolStatusCode::VmError => {
if let Some(status) = vm_status_opt {
Err(AptosError::new_with_vm_status(
format!(
"Invalid transaction: Type: {:?} Code: {:?}",
status.status_type(),
status
),
AptosErrorCode::VmError,
status,
))
} else {
Err(AptosError::new_with_vm_status(
"Invalid transaction: unknown",
AptosErrorCode::VmError,
StatusCode::UNKNOWN_STATUS,
))
}
}
MempoolStatusCode::InvalidSeqNumber => Err(AptosError::new_with_error_code(
mempool_status.message,
AptosErrorCode::SequenceNumberTooOld,
)),
MempoolStatusCode::InvalidUpdate => Err(AptosError::new_with_error_code(
mempool_status.message,
AptosErrorCode::InvalidTransactionUpdate,
)),
MempoolStatusCode::UnknownStatus => Err(AptosError::new_with_error_code(
format!("Transaction was rejected with status {}", mempool_status,),
AptosErrorCode::InternalError,
)),
}
}
/// Submits a single transaction
async fn create(
&self,
accept_type: &AcceptType,
ledger_info: &LedgerInfo,
txn: SignedTransaction,
) -> SubmitTransactionResult<PendingTransaction> {
match self.create_internal(txn.clone()).await {
Ok(()) => match accept_type {
AcceptType::Json => {
let resolver = self
.context
.move_resolver()
.context("Failed to read latest state checkpoint from DB")
.map_err(|e| {
SubmitTransactionError::internal_with_code(
e,
AptosErrorCode::InternalError,
ledger_info,
)
})?;
// We provide the pending transaction so that users have the hash associated
let pending_txn = resolver
.as_converter(self.context.db.clone())
.try_into_pending_transaction_poem(txn)
.context("Failed to build PendingTransaction from mempool response, even though it said the request was accepted")
.map_err(|err| SubmitTransactionError::internal_with_code(
err,
AptosErrorCode::InternalError,
ledger_info,
))?;
SubmitTransactionResponse::try_from_json((
pending_txn,
ledger_info,
SubmitTransactionResponseStatus::Accepted,
))
}
// With BCS, we don't return the pending transaction for efficiency, because there
// is no new information. The hash can be retrieved by hashing the original
// transaction.
AcceptType::Bcs => SubmitTransactionResponse::try_from_bcs((
(),
ledger_info,
SubmitTransactionResponseStatus::Accepted,
)),
},
Err(error) => match error.error_code {
AptosErrorCode::InternalError => Err(
SubmitTransactionError::internal_from_aptos_error(error, ledger_info),
),
AptosErrorCode::VmError
| AptosErrorCode::SequenceNumberTooOld
| AptosErrorCode::InvalidTransactionUpdate => Err(
SubmitTransactionError::bad_request_from_aptos_error(error, ledger_info),
),
AptosErrorCode::MempoolIsFull => Err(
SubmitTransactionError::insufficient_storage_from_aptos_error(
error,
ledger_info,
),
),
_ => Err(SubmitTransactionError::internal_from_aptos_error(
error,
ledger_info,
)),
},
}
}
/// Submits a batch of transactions
async fn create_batch(
&self,
accept_type: &AcceptType,
ledger_info: &LedgerInfo,
txns: Vec<SignedTransaction>,
) -> SubmitTransactionsBatchResult<TransactionsBatchSubmissionResult> {
// Iterate through transactions keeping track of failures
let mut txn_failures = Vec::new();
for (idx, txn) in txns.iter().enumerate() {
if let Err(error) = self.create_internal(txn.clone()).await {
txn_failures.push(TransactionsBatchSingleSubmissionFailure {
error,
transaction_index: idx,
})
}
}
// Return the possible failures, and have a different success code for partial success
let response_status = if txn_failures.is_empty() {
SubmitTransactionsBatchResponseStatus::Accepted
} else {
// TODO: This should really throw an error if all fail
SubmitTransactionsBatchResponseStatus::AcceptedPartial
};
SubmitTransactionsBatchResponse::try_from_rust_value((
TransactionsBatchSubmissionResult {
transaction_failures: txn_failures,
},
ledger_info,
response_status,
accept_type,
))
}
// TODO: This function leverages a lot of types from aptos_types, use the
// local API types and just return those directly, instead of converting
// from these types in render_transactions.
/// Simulate a transaction in the VM
///
/// Note: this returns a `Vec<UserTransaction>`, but for backwards compatibility, this can't
/// be removed even though, there is only one possible transaction
pub async fn simulate(
&self,
accept_type: &AcceptType,
ledger_info: LedgerInfo,
txn: SignedTransaction,
) -> SimulateTransactionResult<Vec<UserTransaction>> {
// Transactions shouldn't have a valid signature or this could be used to attack
if txn.signature_is_valid() {
return Err(SubmitTransactionError::bad_request_with_code(
"Simulated transactions must have a non-valid signature",
AptosErrorCode::InvalidInput,
&ledger_info,
));
}
// Simulate transaction
let move_resolver = self.context.move_resolver_poem(&ledger_info)?;
let (_, output_ext) = AptosVM::simulate_signed_transaction(&txn, &move_resolver);
let version = ledger_info.version();
// Apply transaction outputs to build up a transaction
// TODO: while `into_transaction_output_with_status()` should never fail
// to apply deltas, we should propagate errors properly. Fix this when
// VM error handling is fixed.
let output = output_ext.into_transaction_output(&move_resolver);
// Ensure that all known statuses return their values in the output (even if they aren't supposed to)
let exe_status = match output.status().clone() {
TransactionStatus::Keep(exec_status) => exec_status,
TransactionStatus::Discard(status) => ExecutionStatus::MiscellaneousError(Some(status)),
_ => ExecutionStatus::MiscellaneousError(None),
};
// Build up a transaction from the outputs
// All state hashes are invalid, and will be filled with 0s
let txn = aptos_types::transaction::Transaction::UserTransaction(txn);
let zero_hash = aptos_crypto::HashValue::zero();
let info = aptos_types::transaction::TransactionInfo::new(
txn.hash(),
zero_hash,
zero_hash,
None,
output.gas_used(),
exe_status,
);
let simulated_txn = TransactionOnChainData {
version,
transaction: txn,
info,
events: output.events().to_vec(),
accumulator_root_hash: zero_hash,
changes: output.write_set().clone(),
};
match accept_type {
AcceptType::Json => {
let transactions = self
.context
.render_transactions_non_sequential(&ledger_info, vec![simulated_txn])?;
// Users can only make requests to simulate UserTransactions, so unpack
// the Vec<Transaction> into Vec<UserTransaction>.
let mut user_transactions = Vec::new();
for transaction in transactions.into_iter() {
match transaction {
Transaction::UserTransaction(user_txn) => user_transactions.push(*user_txn),
_ => {
return Err(SubmitTransactionError::internal_with_code(
"Simulation transaction resulted in a non-UserTransaction",
AptosErrorCode::InternalError,
&ledger_info,
))
}
}
}
BasicResponse::try_from_json((
user_transactions,
&ledger_info,
BasicResponseStatus::Ok,
))
}
AcceptType::Bcs => {
BasicResponse::try_from_bcs((simulated_txn, &ledger_info, BasicResponseStatus::Ok))
}
}
}
/// Encode message as BCS
pub fn get_signing_message(
&self,
accept_type: &AcceptType,
request: EncodeSubmissionRequest,
) -> BasicResult<HexEncodedBytes> {
// We don't want to encourage people to use this API if they can sign the request directly
if accept_type == &AcceptType::Bcs {
return Err(BasicError::bad_request_with_code_no_info(
"BCS is not supported for encode submission",
AptosErrorCode::BcsNotSupported,
));
}
let ledger_info = self.context.get_latest_ledger_info()?;
let resolver = self.context.move_resolver_poem(&ledger_info)?;
let raw_txn: RawTransaction = resolver
.as_converter(self.context.db.clone())
.try_into_raw_transaction_poem(request.transaction, self.context.chain_id())
.context("The given transaction is invalid")
.map_err(|err| {
BasicError::bad_request_with_code(err, AptosErrorCode::InvalidInput, &ledger_info)
})?;
let raw_message = match request.secondary_signers {
Some(secondary_signer_addresses) => signing_message(
&RawTransactionWithData::new_multi_agent(
raw_txn,
secondary_signer_addresses
.into_iter()
.map(|v| v.into())
.collect(),
),
)
.context("Invalid transaction to generate signing message")
.map_err(|err| {
BasicError::bad_request_with_code(err, AptosErrorCode::InvalidInput, &ledger_info)
})?,
None => raw_txn
.signing_message()
.context("Invalid transaction to generate signing message")
.map_err(|err| {
BasicError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
&ledger_info,
)
})?,
};
BasicResponse::try_from_json((
HexEncodedBytes::from(raw_message),
&ledger_info,
BasicResponseStatus::Ok,
))
}
}
fn override_gas_parameters(
signed_txn: &SignedTransaction,
max_gas_amount: Option<u64>,
gas_unit_price: Option<u64>,
) -> SignedTransaction {
let payload = signed_txn.payload();
let raw_txn = RawTransaction::new(
signed_txn.sender(),
signed_txn.sequence_number(),
payload.clone(),
max_gas_amount.unwrap_or_else(|| signed_txn.max_gas_amount()),
gas_unit_price.unwrap_or_else(|| signed_txn.gas_unit_price()),
signed_txn.expiration_timestamp_secs(),
signed_txn.chain_id(),
);
// TODO: Check that signature is null, this would just be helpful for downstream use
SignedTransaction::new_with_authenticator(raw_txn, signed_txn.authenticator())
}
[api] Fix estimation error for too little funds
Right now simulation with too little gas will put your max gas amount
below the min gas units, which will cause it to error with
MAX_GAS_UNITS_BELOW_MIN_TRANSACTION_GAS_UNITS. Now this fixes, it to
INSUFFICIENT_BALANCE_FOR_TRANSACTION_FEE which makes more sense if
you don't have enough money
// Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use crate::{
accept_type::AcceptType,
accounts::Account,
bcs_payload::Bcs,
context::Context,
failpoint::fail_point_poem,
generate_error_response, generate_success_response,
page::Page,
response::{
api_disabled, transaction_not_found_by_hash, transaction_not_found_by_version,
BadRequestError, BasicError, BasicErrorWith404, BasicResponse, BasicResponseStatus,
BasicResult, BasicResultWith404, InsufficientStorageError, InternalError,
},
ApiTags,
};
use anyhow::{anyhow, Context as AnyhowContext};
use aptos_api_types::{
verify_function_identifier, verify_module_identifier, Address, AptosError, AptosErrorCode,
AsConverter, EncodeSubmissionRequest, GasEstimation, HashValue, HexEncodedBytes, LedgerInfo,
MoveType, PendingTransaction, SubmitTransactionRequest, Transaction, TransactionData,
TransactionOnChainData, TransactionsBatchSingleSubmissionFailure,
TransactionsBatchSubmissionResult, UserTransaction, VerifyInput, VerifyInputWithRecursion,
MAX_RECURSIVE_TYPES_ALLOWED, U64,
};
use aptos_crypto::{hash::CryptoHash, signing_message};
use aptos_types::{
account_config::CoinStoreResource,
account_view::AccountView,
mempool_status::MempoolStatusCode,
transaction::{
ExecutionStatus, RawTransaction, RawTransactionWithData, SignedTransaction,
TransactionPayload, TransactionStatus,
},
vm_status::StatusCode,
};
use aptos_vm::AptosVM;
use poem_openapi::{
param::{Path, Query},
payload::Json,
ApiRequest, OpenApi,
};
use std::sync::Arc;
generate_success_response!(SubmitTransactionResponse, (202, Accepted));
generate_error_response!(
SubmitTransactionError,
(400, BadRequest),
(403, Forbidden),
(413, PayloadTooLarge),
(500, Internal),
(503, ServiceUnavailable),
(507, InsufficientStorage)
);
type SubmitTransactionResult<T> =
poem::Result<SubmitTransactionResponse<T>, SubmitTransactionError>;
generate_success_response!(
SubmitTransactionsBatchResponse,
(202, Accepted),
(206, AcceptedPartial)
);
type SubmitTransactionsBatchResult<T> =
poem::Result<SubmitTransactionsBatchResponse<T>, SubmitTransactionError>;
type SimulateTransactionResult<T> = poem::Result<BasicResponse<T>, SubmitTransactionError>;
// TODO: Consider making both content types accept either
// SubmitTransactionRequest or SignedTransaction, the way
// it is now is quite confusing.
// We need a custom type here because we use different types for each of the
// content types possible for the POST data.
#[derive(ApiRequest, Debug)]
pub enum SubmitTransactionPost {
#[oai(content_type = "application/json")]
Json(Json<SubmitTransactionRequest>),
// TODO: Since I don't want to impl all the Poem derives on SignedTransaction,
// find a way to at least indicate in the spec that it expects a SignedTransaction.
// TODO: https://github.com/aptos-labs/aptos-core/issues/2275
#[oai(content_type = "application/x.aptos.signed_transaction+bcs")]
Bcs(Bcs),
}
impl VerifyInput for SubmitTransactionPost {
fn verify(&self) -> anyhow::Result<()> {
match self {
SubmitTransactionPost::Json(inner) => inner.0.verify(),
SubmitTransactionPost::Bcs(_) => Ok(()),
}
}
}
// We need a custom type here because we use different types for each of the
// content types possible for the POST data.
#[derive(ApiRequest, Debug)]
pub enum SubmitTransactionsBatchPost {
#[oai(content_type = "application/json")]
Json(Json<Vec<SubmitTransactionRequest>>),
// TODO: Since I don't want to impl all the Poem derives on SignedTransaction,
// find a way to at least indicate in the spec that it expects a SignedTransaction.
// TODO: https://github.com/aptos-labs/aptos-core/issues/2275
#[oai(content_type = "application/x.aptos.signed_transaction+bcs")]
Bcs(Bcs),
}
impl VerifyInput for SubmitTransactionsBatchPost {
fn verify(&self) -> anyhow::Result<()> {
match self {
SubmitTransactionsBatchPost::Json(inner) => {
for request in inner.0.iter() {
request.verify()?;
}
}
SubmitTransactionsBatchPost::Bcs(_) => {}
}
Ok(())
}
}
/// API for interacting with transactions
pub struct TransactionsApi {
pub context: Arc<Context>,
}
#[OpenApi]
impl TransactionsApi {
/// Get transactions
///
/// Retrieve on-chain committed transactions. The page size and start can be provided to
/// get a specific sequence of transactions.
///
/// If the version has been pruned, then a 410 will be returned
#[oai(
path = "/transactions",
method = "get",
operation_id = "get_transactions",
tag = "ApiTags::Transactions"
)]
async fn get_transactions(
&self,
accept_type: AcceptType,
/// Ledger version to start list of transactions
///
/// If not provided, defaults to showing the latest transactions
start: Query<Option<U64>>,
/// Max number of transactions to retrieve.
///
/// If not provided, defaults to default page size
limit: Query<Option<u16>>,
) -> BasicResultWith404<Vec<Transaction>> {
fail_point_poem("endpoint_get_transactions")?;
self.context
.check_api_output_enabled("Get transactions", &accept_type)?;
let page = Page::new(
start.0.map(|v| v.0),
limit.0,
self.context.max_transactions_page_size(),
);
self.list(&accept_type, page)
}
/// Get transaction by hash
///
/// Look up a transaction by its hash. This is the same hash that is returned
/// by the API when submitting a transaction (see PendingTransaction).
///
/// When given a transaction hash, the server first looks for the transaction
/// in storage (on-chain, committed). If no on-chain transaction is found, it
/// looks the transaction up by hash in the mempool (pending, not yet committed).
///
/// To create a transaction hash by yourself, do the following:
/// 1. Hash message bytes: "RawTransaction" bytes + BCS bytes of [Transaction](https://aptos-labs.github.io/aptos-core/aptos_types/transaction/enum.Transaction.html).
/// 2. Apply hash algorithm `SHA3-256` to the hash message bytes.
/// 3. Hex-encode the hash bytes with `0x` prefix.
// TODO: Include a link to an example of how to do this ^
#[oai(
path = "/transactions/by_hash/:txn_hash",
method = "get",
operation_id = "get_transaction_by_hash",
tag = "ApiTags::Transactions"
)]
async fn get_transaction_by_hash(
&self,
accept_type: AcceptType,
/// Hash of transaction to retrieve
txn_hash: Path<HashValue>,
// TODO: Use a new request type that can't return 507.
) -> BasicResultWith404<Transaction> {
fail_point_poem("endpoint_transaction_by_hash")?;
self.context
.check_api_output_enabled("Get transactions by hash", &accept_type)?;
self.get_transaction_by_hash_inner(&accept_type, txn_hash.0)
.await
}
/// Get transaction by version
///
/// Retrieves a transaction by a given version. If the version has been pruned, a 410 will
/// be returned.
#[oai(
path = "/transactions/by_version/:txn_version",
method = "get",
operation_id = "get_transaction_by_version",
tag = "ApiTags::Transactions"
)]
async fn get_transaction_by_version(
&self,
accept_type: AcceptType,
/// Version of transaction to retrieve
txn_version: Path<U64>,
) -> BasicResultWith404<Transaction> {
fail_point_poem("endpoint_transaction_by_version")?;
self.context
.check_api_output_enabled("Get transactions by version", &accept_type)?;
self.get_transaction_by_version_inner(&accept_type, txn_version.0)
.await
}
/// Get account transactions
///
/// Retrieves transactions from an account. If the start version is too far in the past
/// a 410 will be returned.
///
/// If no start version is given, it will start at 0
#[oai(
path = "/accounts/:address/transactions",
method = "get",
operation_id = "get_account_transactions",
tag = "ApiTags::Transactions"
)]
async fn get_accounts_transactions(
&self,
accept_type: AcceptType,
/// Address of account with or without a `0x` prefix
address: Path<Address>,
/// Ledger version to start list of transactions
///
/// If not provided, defaults to showing the latest transactions
start: Query<Option<U64>>,
/// Max number of transactions to retrieve.
///
/// If not provided, defaults to default page size
limit: Query<Option<u16>>,
) -> BasicResultWith404<Vec<Transaction>> {
fail_point_poem("endpoint_get_accounts_transactions")?;
self.context
.check_api_output_enabled("Get account transactions", &accept_type)?;
let page = Page::new(
start.0.map(|v| v.0),
limit.0,
self.context.max_transactions_page_size(),
);
self.list_by_account(&accept_type, page, address.0)
}
/// Submit transaction
///
/// This endpoint accepts transaction submissions in two formats.
///
/// To submit a transaction as JSON, you must submit a SubmitTransactionRequest.
/// To build this request, do the following:
///
/// 1. Encode the transaction as BCS. If you are using a language that has
/// native BCS support, make sure of that library. If not, you may take
/// advantage of /transactions/encode_submission. When using this
/// endpoint, make sure you trust the node you're talking to, as it is
/// possible they could manipulate your request.
/// 2. Sign the encoded transaction and use it to create a TransactionSignature.
/// 3. Submit the request. Make sure to use the "application/json" Content-Type.
///
/// To submit a transaction as BCS, you must submit a SignedTransaction
/// encoded as BCS. See SignedTransaction in types/src/transaction/mod.rs.
/// Make sure to use the `application/x.aptos.signed_transaction+bcs` Content-Type.
// TODO: Point to examples of both of these flows, in multiple languages.
#[oai(
path = "/transactions",
method = "post",
operation_id = "submit_transaction",
tag = "ApiTags::Transactions"
)]
async fn submit_transaction(
&self,
accept_type: AcceptType,
data: SubmitTransactionPost,
) -> SubmitTransactionResult<PendingTransaction> {
data.verify()
.context("Submitted transaction invalid'")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code_no_info(
err,
AptosErrorCode::InvalidInput,
)
})?;
fail_point_poem("endpoint_submit_transaction")?;
self.context
.check_api_output_enabled("Submit transaction", &accept_type)?;
if !self.context.node_config.api.transaction_submission_enabled {
return Err(api_disabled("Submit transaction"));
}
let ledger_info = self.context.get_latest_ledger_info()?;
let signed_transaction = self.get_signed_transaction(&ledger_info, data)?;
self.create(&accept_type, &ledger_info, signed_transaction)
.await
}
/// Submit batch transactions
///
/// This allows you to submit multiple transactions. The response has three outcomes:
///
/// 1. All transactions succeed, and it will return a 202
/// 2. Some transactions succeed, and it will return the failed transactions and a 206
/// 3. No transactions succeed, and it will also return the failed transactions and a 206
///
/// To submit a transaction as JSON, you must submit a SubmitTransactionRequest.
/// To build this request, do the following:
///
/// 1. Encode the transaction as BCS. If you are using a language that has
/// native BCS support, make sure to use that library. If not, you may take
/// advantage of /transactions/encode_submission. When using this
/// endpoint, make sure you trust the node you're talking to, as it is
/// possible they could manipulate your request.
/// 2. Sign the encoded transaction and use it to create a TransactionSignature.
/// 3. Submit the request. Make sure to use the "application/json" Content-Type.
///
/// To submit a transaction as BCS, you must submit a SignedTransaction
/// encoded as BCS. See SignedTransaction in types/src/transaction/mod.rs.
/// Make sure to use the `application/x.aptos.signed_transaction+bcs` Content-Type.
#[oai(
path = "/transactions/batch",
method = "post",
operation_id = "submit_batch_transactions",
tag = "ApiTags::Transactions"
)]
async fn submit_transactions_batch(
&self,
accept_type: AcceptType,
data: SubmitTransactionsBatchPost,
) -> SubmitTransactionsBatchResult<TransactionsBatchSubmissionResult> {
data.verify()
.context("Submitted transactions invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code_no_info(
err,
AptosErrorCode::InvalidInput,
)
})?;
fail_point_poem("endpoint_submit_batch_transactions")?;
self.context
.check_api_output_enabled("Submit batch transactions", &accept_type)?;
if !self.context.node_config.api.transaction_submission_enabled {
return Err(api_disabled("Submit batch transaction"));
}
let ledger_info = self.context.get_latest_ledger_info()?;
let signed_transactions_batch = self.get_signed_transactions_batch(&ledger_info, data)?;
if self.context.max_submit_transaction_batch_size() < signed_transactions_batch.len() {
return Err(SubmitTransactionError::bad_request_with_code(
&format!(
"Submitted too many transactions: {}, while limit is {}",
signed_transactions_batch.len(),
self.context.max_submit_transaction_batch_size(),
),
AptosErrorCode::InvalidInput,
&ledger_info,
));
}
self.create_batch(&accept_type, &ledger_info, signed_transactions_batch)
.await
}
/// Simulate transaction
///
/// The output of the transaction will have the exact transaction outputs and events that running
/// an actual signed transaction would have. However, it will not have the associated state
/// hashes, as they are not updated in storage. This can be used to estimate the maximum gas
/// units for a submitted transaction.
///
/// To use this, you must:
/// - Create a SignedTransaction with a zero-padded signature.
/// - Submit a SubmitTransactionRequest containing a UserTransactionRequest containing that signature.
///
/// To use this endpoint with BCS, you must submit a SignedTransaction
/// encoded as BCS. See SignedTransaction in types/src/transaction/mod.rs.
#[oai(
path = "/transactions/simulate",
method = "post",
operation_id = "simulate_transaction",
tag = "ApiTags::Transactions"
)]
async fn simulate_transaction(
&self,
accept_type: AcceptType,
/// If set to true, the max gas value in the transaction will be ignored
/// and the maximum possible gas will be used
estimate_max_gas_amount: Query<Option<bool>>,
/// If set to true, the gas unit price in the transaction will be ignored
/// and the estimated value will be used
estimate_gas_unit_price: Query<Option<bool>>,
data: SubmitTransactionPost,
) -> SimulateTransactionResult<Vec<UserTransaction>> {
data.verify()
.context("Simulated transaction invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code_no_info(
err,
AptosErrorCode::InvalidInput,
)
})?;
fail_point_poem("endpoint_simulate_transaction")?;
self.context
.check_api_output_enabled("Simulate transaction", &accept_type)?;
if !self.context.node_config.api.transaction_simulation_enabled {
return Err(api_disabled("Simulate transaction"));
}
let ledger_info = self.context.get_latest_ledger_info()?;
let mut signed_transaction = self.get_signed_transaction(&ledger_info, data)?;
let estimated_gas_unit_price = if estimate_gas_unit_price.0.unwrap_or_default() {
Some(self.context.estimate_gas_price(&ledger_info)?)
} else {
None
};
// If estimate max gas amount is provided, we will just make it the maximum value
let estimated_max_gas_amount = if estimate_max_gas_amount.0.unwrap_or_default() {
// Retrieve max possible gas units
let gas_params = self.context.get_gas_schedule(&ledger_info)?;
let min_number_of_gas_units = u64::from(gas_params.txn.min_transaction_gas_units)
/ u64::from(gas_params.txn.gas_unit_scaling_factor);
let max_number_of_gas_units = u64::from(gas_params.txn.maximum_number_of_gas_units);
// Retrieve account balance to determine max gas available
let account_state = self
.context
.get_account_state(
signed_transaction.sender(),
ledger_info.version(),
&ledger_info,
)?
.ok_or_else(|| {
SubmitTransactionError::bad_request_with_code(
"Account not found",
AptosErrorCode::InvalidInput,
&ledger_info,
)
})?;
let coin_store: CoinStoreResource = account_state
.get_coin_store_resource()
.and_then(|inner| {
inner.ok_or_else(|| {
anyhow!(
"No coin store found for account {}",
signed_transaction.sender()
)
})
})
.map_err(|err| {
SubmitTransactionError::internal_with_code(
format!("Failed to get coin store resource {}", err),
AptosErrorCode::InternalError,
&ledger_info,
)
})?;
let gas_unit_price =
estimated_gas_unit_price.unwrap_or_else(|| signed_transaction.gas_unit_price());
// With 0 gas price, we set it to max gas units, since we can't divide by 0
let max_account_gas_units = if gas_unit_price == 0 {
coin_store.coin()
} else {
coin_store.coin() / gas_unit_price
};
// To give better error messaging, we should not go below the minimum number of gas units
let max_account_gas_units =
std::cmp::max(min_number_of_gas_units, max_account_gas_units);
// Minimum of the max account and the max total needs to be used for estimation
Some(std::cmp::min(
max_account_gas_units,
max_number_of_gas_units,
))
} else {
None
};
// If there is an estimation of either, replace the values
if estimated_max_gas_amount.is_some() || estimated_gas_unit_price.is_some() {
signed_transaction = override_gas_parameters(
&signed_transaction,
estimated_max_gas_amount,
estimated_gas_unit_price,
);
}
self.simulate(&accept_type, ledger_info, signed_transaction)
.await
}
/// Encode submission
///
/// This endpoint accepts an EncodeSubmissionRequest, which internally is a
/// UserTransactionRequestInner (and optionally secondary signers) encoded
/// as JSON, validates the request format, and then returns that request
/// encoded in BCS. The client can then use this to create a transaction
/// signature to be used in a SubmitTransactionRequest, which it then
/// passes to the /transactions POST endpoint.
///
/// To be clear, this endpoint makes it possible to submit transaction
/// requests to the API from languages that do not have library support for
/// BCS. If you are using an SDK that has BCS support, such as the official
/// Rust, TypeScript, or Python SDKs, you do not need to use this endpoint.
///
/// To sign a message using the response from this endpoint:
/// - Decode the hex encoded string in the response to bytes.
/// - Sign the bytes to create the signature.
/// - Use that as the signature field in something like Ed25519Signature, which you then use to build a TransactionSignature.
//
// TODO: Link an example of how to do this. Use externalDoc.
#[oai(
path = "/transactions/encode_submission",
method = "post",
operation_id = "encode_submission",
tag = "ApiTags::Transactions"
)]
async fn encode_submission(
&self,
accept_type: AcceptType,
data: Json<EncodeSubmissionRequest>,
// TODO: Use a new request type that can't return 507 but still returns all the other necessary errors.
) -> BasicResult<HexEncodedBytes> {
data.0
.verify()
.context("'UserTransactionRequest' invalid")
.map_err(|err| {
BasicError::bad_request_with_code_no_info(err, AptosErrorCode::InvalidInput)
})?;
fail_point_poem("endpoint_encode_submission")?;
self.context
.check_api_output_enabled("Encode submission", &accept_type)?;
if !self.context.node_config.api.encode_submission_enabled {
return Err(api_disabled("Encode submission"));
}
self.get_signing_message(&accept_type, data.0)
}
/// Estimate gas price
///
/// Currently, the gas estimation is handled by taking the median of the last 100,000 transactions
/// If a user wants to prioritize their transaction and is willing to pay, they can pay more
/// than the gas price. If they're willing to wait longer, they can pay less. Note that the
/// gas price moves with the fee market, and should only increase when demand outweighs supply.
///
/// If there have been no transactions in the last 100,000 transactions, the price will be 1.
#[oai(
path = "/estimate_gas_price",
method = "get",
operation_id = "estimate_gas_price",
tag = "ApiTags::Transactions"
)]
async fn estimate_gas_price(&self, accept_type: AcceptType) -> BasicResult<GasEstimation> {
fail_point_poem("endpoint_encode_submission")?;
self.context
.check_api_output_enabled("Estimate gas price", &accept_type)?;
let latest_ledger_info = self.context.get_latest_ledger_info()?;
let estimated_gas_price = self.context.estimate_gas_price(&latest_ledger_info)?;
// TODO: Do we want to give more than just a single gas price? Percentiles?
let gas_estimation = GasEstimation {
gas_estimate: estimated_gas_price,
};
match accept_type {
AcceptType::Json => BasicResponse::try_from_json((
gas_estimation,
&latest_ledger_info,
BasicResponseStatus::Ok,
)),
AcceptType::Bcs => BasicResponse::try_from_bcs((
gas_estimation,
&latest_ledger_info,
BasicResponseStatus::Ok,
)),
}
}
}
impl TransactionsApi {
/// List all transactions paging by ledger version
fn list(&self, accept_type: &AcceptType, page: Page) -> BasicResultWith404<Vec<Transaction>> {
let latest_ledger_info = self.context.get_latest_ledger_info()?;
let ledger_version = latest_ledger_info.version();
let limit = page.limit(&latest_ledger_info)?;
let start_version = page.compute_start(limit, ledger_version, &latest_ledger_info)?;
let data = self
.context
.get_transactions(start_version, limit, ledger_version)
.context("Failed to read raw transactions from storage")
.map_err(|err| {
BasicErrorWith404::internal_with_code(
err,
AptosErrorCode::InternalError,
&latest_ledger_info,
)
})?;
match accept_type {
AcceptType::Json => {
let timestamp = self
.context
.get_block_timestamp(&latest_ledger_info, start_version)?;
BasicResponse::try_from_json((
self.context.render_transactions_sequential(
&latest_ledger_info,
data,
timestamp,
)?,
&latest_ledger_info,
BasicResponseStatus::Ok,
))
}
AcceptType::Bcs => {
BasicResponse::try_from_bcs((data, &latest_ledger_info, BasicResponseStatus::Ok))
}
}
}
async fn get_transaction_by_hash_inner(
&self,
accept_type: &AcceptType,
hash: HashValue,
) -> BasicResultWith404<Transaction> {
let ledger_info = self.context.get_latest_ledger_info()?;
let txn_data = self
.get_by_hash(hash.into(), &ledger_info)
.await
.context(format!("Failed to get transaction by hash {}", hash))
.map_err(|err| {
BasicErrorWith404::internal_with_code(
err,
AptosErrorCode::InternalError,
&ledger_info,
)
})?
.context(format!("Failed to find transaction with hash: {}", hash))
.map_err(|_| transaction_not_found_by_hash(hash, &ledger_info))?;
self.get_transaction_inner(accept_type, txn_data, &ledger_info)
.await
}
async fn get_transaction_by_version_inner(
&self,
accept_type: &AcceptType,
version: U64,
) -> BasicResultWith404<Transaction> {
let ledger_info = self.context.get_latest_ledger_info()?;
let txn_data = self
.get_by_version(version.0, &ledger_info)
.context(format!("Failed to get transaction by version {}", version))
.map_err(|err| {
BasicErrorWith404::internal_with_code(
err,
AptosErrorCode::InternalError,
&ledger_info,
)
})?
.context(format!(
"Failed to find transaction at version: {}",
version
))
.map_err(|_| transaction_not_found_by_version(version.0, &ledger_info))?;
self.get_transaction_inner(accept_type, txn_data, &ledger_info)
.await
}
/// Converts a transaction into the outgoing type
async fn get_transaction_inner(
&self,
accept_type: &AcceptType,
transaction_data: TransactionData,
ledger_info: &LedgerInfo,
) -> BasicResultWith404<Transaction> {
match accept_type {
AcceptType::Json => {
let resolver = self.context.move_resolver_poem(ledger_info)?;
let transaction = match transaction_data {
TransactionData::OnChain(txn) => {
let timestamp =
self.context.get_block_timestamp(ledger_info, txn.version)?;
resolver
.as_converter(self.context.db.clone())
.try_into_onchain_transaction(timestamp, txn)
.context("Failed to convert on chain transaction to Transaction")
.map_err(|err| {
BasicErrorWith404::internal_with_code(
err,
AptosErrorCode::InternalError,
ledger_info,
)
})?
}
TransactionData::Pending(txn) => resolver
.as_converter(self.context.db.clone())
.try_into_pending_transaction(*txn)
.context("Failed to convert on pending transaction to Transaction")
.map_err(|err| {
BasicErrorWith404::internal_with_code(
err,
AptosErrorCode::InternalError,
ledger_info,
)
})?,
};
BasicResponse::try_from_json((transaction, ledger_info, BasicResponseStatus::Ok))
}
AcceptType::Bcs => BasicResponse::try_from_bcs((
transaction_data,
ledger_info,
BasicResponseStatus::Ok,
)),
}
}
/// Retrieves a transaction by ledger version
fn get_by_version(
&self,
version: u64,
ledger_info: &LedgerInfo,
) -> anyhow::Result<Option<TransactionData>> {
if version > ledger_info.version() {
return Ok(None);
}
Ok(Some(
self.context
.get_transaction_by_version(version, ledger_info.version())?
.into(),
))
}
/// Retrieves a transaction by hash. First the node tries to find the transaction
/// in the DB. If the transaction is found there, it means the transaction is
/// committed. If it is not found there, it looks in mempool. If it is found there,
/// it means the transaction is still pending.
async fn get_by_hash(
&self,
hash: aptos_crypto::HashValue,
ledger_info: &LedgerInfo,
) -> anyhow::Result<Option<TransactionData>> {
let from_db = self
.context
.get_transaction_by_hash(hash, ledger_info.version())?;
Ok(match from_db {
None => self
.context
.get_pending_transaction_by_hash(hash)
.await?
.map(|t| t.into()),
_ => from_db.map(|t| t.into()),
})
}
/// List all transactions for an account
fn list_by_account(
&self,
accept_type: &AcceptType,
page: Page,
address: Address,
) -> BasicResultWith404<Vec<Transaction>> {
// Verify the account exists
let account = Account::new(self.context.clone(), address, None)?;
account.account_state()?;
let latest_ledger_info = account.latest_ledger_info;
// TODO: Return more specific errors from within this function.
let data = self.context.get_account_transactions(
address.into(),
page.start_option(),
page.limit(&latest_ledger_info)?,
latest_ledger_info.version(),
&latest_ledger_info,
)?;
match accept_type {
AcceptType::Json => BasicResponse::try_from_json((
self.context
.render_transactions_non_sequential(&latest_ledger_info, data)?,
&latest_ledger_info,
BasicResponseStatus::Ok,
)),
AcceptType::Bcs => {
BasicResponse::try_from_bcs((data, &latest_ledger_info, BasicResponseStatus::Ok))
}
}
}
/// Parses a single signed transaction
fn get_signed_transaction(
&self,
ledger_info: &LedgerInfo,
data: SubmitTransactionPost,
) -> Result<SignedTransaction, SubmitTransactionError> {
match data {
SubmitTransactionPost::Bcs(data) => {
let signed_transaction: SignedTransaction =
bcs::from_bytes_with_limit(&data.0, MAX_RECURSIVE_TYPES_ALLOWED as usize)
.context("Failed to deserialize input into SignedTransaction")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
// Verify the signed transaction
match signed_transaction.payload() {
TransactionPayload::EntryFunction(entry_function) => {
verify_module_identifier(entry_function.module().name().as_str())
.context("Transaction entry function module invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
verify_function_identifier(entry_function.function().as_str())
.context("Transaction entry function name invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
for arg in entry_function.ty_args() {
let arg: MoveType = arg.into();
arg.verify(0)
.context("Transaction entry function type arg invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
}
}
TransactionPayload::Script(script) => {
if script.code().is_empty() {
return Err(SubmitTransactionError::bad_request_with_code(
"Script payload bytecode must not be empty",
AptosErrorCode::InvalidInput,
ledger_info,
));
}
for arg in script.ty_args() {
let arg = MoveType::from(arg);
arg.verify(0)
.context("Transaction script function type arg invalid")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
}
}
TransactionPayload::ModuleBundle(_) => {}
}
// TODO: Verify script args?
Ok(signed_transaction)
}
SubmitTransactionPost::Json(data) => self
.context
.move_resolver_poem(ledger_info)?
.as_converter(self.context.db.clone())
.try_into_signed_transaction_poem(data.0, self.context.chain_id())
.context("Failed to create SignedTransaction from SubmitTransactionRequest")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
}),
}
}
/// Parses a batch of signed transactions
fn get_signed_transactions_batch(
&self,
ledger_info: &LedgerInfo,
data: SubmitTransactionsBatchPost,
) -> Result<Vec<SignedTransaction>, SubmitTransactionError> {
match data {
SubmitTransactionsBatchPost::Bcs(data) => {
let signed_transactions = bcs::from_bytes(&data.0)
.context("Failed to deserialize input into SignedTransaction")
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})?;
Ok(signed_transactions)
}
SubmitTransactionsBatchPost::Json(data) => data
.0
.into_iter()
.enumerate()
.map(|(index, txn)| {
self.context
.move_resolver_poem(ledger_info)?
.as_converter(self.context.db.clone())
.try_into_signed_transaction_poem(txn, self.context.chain_id())
.context(format!("Failed to create SignedTransaction from SubmitTransactionRequest at position {}", index))
.map_err(|err| {
SubmitTransactionError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
ledger_info,
)
})
})
.collect(),
}
}
/// Submits a single transaction, and converts mempool codes to errors
async fn create_internal(&self, txn: SignedTransaction) -> Result<(), AptosError> {
let (mempool_status, vm_status_opt) = self
.context
.submit_transaction(txn)
.await
.context("Mempool failed to initially evaluate submitted transaction")
.map_err(|err| {
aptos_api_types::AptosError::new_with_error_code(err, AptosErrorCode::InternalError)
})?;
match mempool_status.code {
MempoolStatusCode::Accepted => Ok(()),
MempoolStatusCode::MempoolIsFull | MempoolStatusCode::TooManyTransactions => {
Err(AptosError::new_with_error_code(
&mempool_status.message,
AptosErrorCode::MempoolIsFull,
))
}
MempoolStatusCode::VmError => {
if let Some(status) = vm_status_opt {
Err(AptosError::new_with_vm_status(
format!(
"Invalid transaction: Type: {:?} Code: {:?}",
status.status_type(),
status
),
AptosErrorCode::VmError,
status,
))
} else {
Err(AptosError::new_with_vm_status(
"Invalid transaction: unknown",
AptosErrorCode::VmError,
StatusCode::UNKNOWN_STATUS,
))
}
}
MempoolStatusCode::InvalidSeqNumber => Err(AptosError::new_with_error_code(
mempool_status.message,
AptosErrorCode::SequenceNumberTooOld,
)),
MempoolStatusCode::InvalidUpdate => Err(AptosError::new_with_error_code(
mempool_status.message,
AptosErrorCode::InvalidTransactionUpdate,
)),
MempoolStatusCode::UnknownStatus => Err(AptosError::new_with_error_code(
format!("Transaction was rejected with status {}", mempool_status,),
AptosErrorCode::InternalError,
)),
}
}
/// Submits a single transaction
async fn create(
&self,
accept_type: &AcceptType,
ledger_info: &LedgerInfo,
txn: SignedTransaction,
) -> SubmitTransactionResult<PendingTransaction> {
match self.create_internal(txn.clone()).await {
Ok(()) => match accept_type {
AcceptType::Json => {
let resolver = self
.context
.move_resolver()
.context("Failed to read latest state checkpoint from DB")
.map_err(|e| {
SubmitTransactionError::internal_with_code(
e,
AptosErrorCode::InternalError,
ledger_info,
)
})?;
// We provide the pending transaction so that users have the hash associated
let pending_txn = resolver
.as_converter(self.context.db.clone())
.try_into_pending_transaction_poem(txn)
.context("Failed to build PendingTransaction from mempool response, even though it said the request was accepted")
.map_err(|err| SubmitTransactionError::internal_with_code(
err,
AptosErrorCode::InternalError,
ledger_info,
))?;
SubmitTransactionResponse::try_from_json((
pending_txn,
ledger_info,
SubmitTransactionResponseStatus::Accepted,
))
}
// With BCS, we don't return the pending transaction for efficiency, because there
// is no new information. The hash can be retrieved by hashing the original
// transaction.
AcceptType::Bcs => SubmitTransactionResponse::try_from_bcs((
(),
ledger_info,
SubmitTransactionResponseStatus::Accepted,
)),
},
Err(error) => match error.error_code {
AptosErrorCode::InternalError => Err(
SubmitTransactionError::internal_from_aptos_error(error, ledger_info),
),
AptosErrorCode::VmError
| AptosErrorCode::SequenceNumberTooOld
| AptosErrorCode::InvalidTransactionUpdate => Err(
SubmitTransactionError::bad_request_from_aptos_error(error, ledger_info),
),
AptosErrorCode::MempoolIsFull => Err(
SubmitTransactionError::insufficient_storage_from_aptos_error(
error,
ledger_info,
),
),
_ => Err(SubmitTransactionError::internal_from_aptos_error(
error,
ledger_info,
)),
},
}
}
/// Submits a batch of transactions
async fn create_batch(
&self,
accept_type: &AcceptType,
ledger_info: &LedgerInfo,
txns: Vec<SignedTransaction>,
) -> SubmitTransactionsBatchResult<TransactionsBatchSubmissionResult> {
// Iterate through transactions keeping track of failures
let mut txn_failures = Vec::new();
for (idx, txn) in txns.iter().enumerate() {
if let Err(error) = self.create_internal(txn.clone()).await {
txn_failures.push(TransactionsBatchSingleSubmissionFailure {
error,
transaction_index: idx,
})
}
}
// Return the possible failures, and have a different success code for partial success
let response_status = if txn_failures.is_empty() {
SubmitTransactionsBatchResponseStatus::Accepted
} else {
// TODO: This should really throw an error if all fail
SubmitTransactionsBatchResponseStatus::AcceptedPartial
};
SubmitTransactionsBatchResponse::try_from_rust_value((
TransactionsBatchSubmissionResult {
transaction_failures: txn_failures,
},
ledger_info,
response_status,
accept_type,
))
}
// TODO: This function leverages a lot of types from aptos_types, use the
// local API types and just return those directly, instead of converting
// from these types in render_transactions.
/// Simulate a transaction in the VM
///
/// Note: this returns a `Vec<UserTransaction>`, but for backwards compatibility, this can't
/// be removed even though, there is only one possible transaction
pub async fn simulate(
&self,
accept_type: &AcceptType,
ledger_info: LedgerInfo,
txn: SignedTransaction,
) -> SimulateTransactionResult<Vec<UserTransaction>> {
// Transactions shouldn't have a valid signature or this could be used to attack
if txn.signature_is_valid() {
return Err(SubmitTransactionError::bad_request_with_code(
"Simulated transactions must have a non-valid signature",
AptosErrorCode::InvalidInput,
&ledger_info,
));
}
// Simulate transaction
let move_resolver = self.context.move_resolver_poem(&ledger_info)?;
let (_, output_ext) = AptosVM::simulate_signed_transaction(&txn, &move_resolver);
let version = ledger_info.version();
// Apply transaction outputs to build up a transaction
// TODO: while `into_transaction_output_with_status()` should never fail
// to apply deltas, we should propagate errors properly. Fix this when
// VM error handling is fixed.
let output = output_ext.into_transaction_output(&move_resolver);
// Ensure that all known statuses return their values in the output (even if they aren't supposed to)
let exe_status = match output.status().clone() {
TransactionStatus::Keep(exec_status) => exec_status,
TransactionStatus::Discard(status) => ExecutionStatus::MiscellaneousError(Some(status)),
_ => ExecutionStatus::MiscellaneousError(None),
};
// Build up a transaction from the outputs
// All state hashes are invalid, and will be filled with 0s
let txn = aptos_types::transaction::Transaction::UserTransaction(txn);
let zero_hash = aptos_crypto::HashValue::zero();
let info = aptos_types::transaction::TransactionInfo::new(
txn.hash(),
zero_hash,
zero_hash,
None,
output.gas_used(),
exe_status,
);
let simulated_txn = TransactionOnChainData {
version,
transaction: txn,
info,
events: output.events().to_vec(),
accumulator_root_hash: zero_hash,
changes: output.write_set().clone(),
};
match accept_type {
AcceptType::Json => {
let transactions = self
.context
.render_transactions_non_sequential(&ledger_info, vec![simulated_txn])?;
// Users can only make requests to simulate UserTransactions, so unpack
// the Vec<Transaction> into Vec<UserTransaction>.
let mut user_transactions = Vec::new();
for transaction in transactions.into_iter() {
match transaction {
Transaction::UserTransaction(user_txn) => user_transactions.push(*user_txn),
_ => {
return Err(SubmitTransactionError::internal_with_code(
"Simulation transaction resulted in a non-UserTransaction",
AptosErrorCode::InternalError,
&ledger_info,
))
}
}
}
BasicResponse::try_from_json((
user_transactions,
&ledger_info,
BasicResponseStatus::Ok,
))
}
AcceptType::Bcs => {
BasicResponse::try_from_bcs((simulated_txn, &ledger_info, BasicResponseStatus::Ok))
}
}
}
/// Encode message as BCS
pub fn get_signing_message(
&self,
accept_type: &AcceptType,
request: EncodeSubmissionRequest,
) -> BasicResult<HexEncodedBytes> {
// We don't want to encourage people to use this API if they can sign the request directly
if accept_type == &AcceptType::Bcs {
return Err(BasicError::bad_request_with_code_no_info(
"BCS is not supported for encode submission",
AptosErrorCode::BcsNotSupported,
));
}
let ledger_info = self.context.get_latest_ledger_info()?;
let resolver = self.context.move_resolver_poem(&ledger_info)?;
let raw_txn: RawTransaction = resolver
.as_converter(self.context.db.clone())
.try_into_raw_transaction_poem(request.transaction, self.context.chain_id())
.context("The given transaction is invalid")
.map_err(|err| {
BasicError::bad_request_with_code(err, AptosErrorCode::InvalidInput, &ledger_info)
})?;
let raw_message = match request.secondary_signers {
Some(secondary_signer_addresses) => signing_message(
&RawTransactionWithData::new_multi_agent(
raw_txn,
secondary_signer_addresses
.into_iter()
.map(|v| v.into())
.collect(),
),
)
.context("Invalid transaction to generate signing message")
.map_err(|err| {
BasicError::bad_request_with_code(err, AptosErrorCode::InvalidInput, &ledger_info)
})?,
None => raw_txn
.signing_message()
.context("Invalid transaction to generate signing message")
.map_err(|err| {
BasicError::bad_request_with_code(
err,
AptosErrorCode::InvalidInput,
&ledger_info,
)
})?,
};
BasicResponse::try_from_json((
HexEncodedBytes::from(raw_message),
&ledger_info,
BasicResponseStatus::Ok,
))
}
}
fn override_gas_parameters(
signed_txn: &SignedTransaction,
max_gas_amount: Option<u64>,
gas_unit_price: Option<u64>,
) -> SignedTransaction {
let payload = signed_txn.payload();
let raw_txn = RawTransaction::new(
signed_txn.sender(),
signed_txn.sequence_number(),
payload.clone(),
max_gas_amount.unwrap_or_else(|| signed_txn.max_gas_amount()),
gas_unit_price.unwrap_or_else(|| signed_txn.gas_unit_price()),
signed_txn.expiration_timestamp_secs(),
signed_txn.chain_id(),
);
// TODO: Check that signature is null, this would just be helpful for downstream use
SignedTransaction::new_with_authenticator(raw_txn, signed_txn.authenticator())
}
|
use std::env;
use std::process::{Command, Stdio};
use std::path::PathBuf;
use std::io::{Read, Write};
use std::fs::File;
extern crate nom_bibtex;
use nom_bibtex::Bibtex;
mod text;
fn read_file(filename: &str) -> Vec<u8> {
let mut file = File::open(filename).unwrap();
let mut content = Vec::new();
file.read_to_end(&mut content).unwrap();
content
}
fn get_bin_dir() -> PathBuf {
env::current_exe()
.expect("test bin's directory")
.parent()
.expect("test bin's parent directory")
.parent()
.expect("executable's directory")
.to_path_buf()
}
fn cmd_aux2bib() -> Command {
let path;
if cfg!(not(windows)) {
path = get_bin_dir().join("aux2bib");
} else {
path = get_bin_dir().join("aux2bib.exe");
}
if !path.is_file() {
panic!("aux2bib binary {:?} was not found", path);
}
let mut cmd = Command::new(path);
cmd.env_clear().stderr(Stdio::piped()).stdout(
Stdio::piped(),
);
cmd
}
fn cmd_blg2bib() -> Command {
let path;
if cfg!(not(windows)) {
path = get_bin_dir().join("blg2bib");
} else {
path = get_bin_dir().join("blg2bib.exe");
}
if !path.is_file() {
panic!("blg2bib binary {:?} was not found", path);
}
let mut cmd = Command::new(path);
cmd.env_clear().stderr(Stdio::piped()).stdout(
Stdio::piped(),
);
cmd
}
#[cfg(not(windows))]
#[test]
fn aux2bib_runs() {
let mut cmd = cmd_aux2bib().arg("--help").spawn().expect(
"Failed to execute aux2bib",
);
let error_code = cmd.wait().expect("Failed to wait on aux2bib");
assert!(error_code.success());
}
#[cfg(not(windows))]
#[test]
fn blg2bib_runs() {
let mut cmd = cmd_blg2bib().arg("--help").spawn().expect(
"Failed to execute blg2bib",
);
let error_code = cmd.wait().expect("Failed to wait on blg2bib");
assert!(error_code.success());
}
#[cfg(not(windows))]
#[test]
fn aux2bib_stdin_empty() {
let mut child = cmd_aux2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute aux2bib",
);
{
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(b"").expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
assert_eq!(output.stdout, []);
}
#[cfg(not(windows))]
#[test]
fn blg2bib_stdin_empty() {
let mut child = cmd_blg2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute blg2bib",
);
{
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(b"").expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
assert_eq!(output.stdout, []);
}
#[cfg(not(windows))]
#[test]
fn aux2bib_stdin_bibtex() {
let mut child = cmd_aux2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute aux2bib",
);
{
let input = read_file("example_files/test_bibtex.aux");
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(&input).expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
let bibtex = Bibtex::parse(std::str::from_utf8(&output.stdout).unwrap())
.expect("Valid bibtex file content");
let bib = bibtex.bibliographies();
assert_eq!(2, bib.len());
// Output could conceivably change in future, so just check that some things are right
assert_eq!(bib[0].entry_type(), "article");
assert_eq!(bib[0].citation_key(), "Higgs:2014aqa");
assert_eq!(
bib[0].tags()[0],
("author".into(), "Higgs, Peter W.".into())
);
assert_eq!(bib[0].tags()[1], (
"title".into(),
"{Nobel Lecture: Evading the Goldstone theorem}"
.into(),
));
assert_eq!(bib[0].tags()[4], ("year".into(), "2014".into()));
assert_eq!(bib[1].entry_type(), "article");
assert_eq!(bib[1].citation_key(), "Higgs:2015mei");
assert_eq!(bib[1].tags()[0], ("author".into(), "Higgs, P. W.".into()));
assert_eq!(bib[1].tags()[1], (
"title".into(),
"{Evading the Goldstone theorem}".into(),
));
assert_eq!(bib[1].tags()[4], ("year".into(), "2015".into()));
}
#[cfg(not(windows))]
#[test]
fn aux2bib_stdin_biblatex() {
let mut child = cmd_aux2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute aux2bib",
);
{
let input = read_file("example_files/test_biber.aux");
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(&input).expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
let bibtex_raw = &[
text::MONTH_STRINGS,
std::str::from_utf8(&output.stdout).unwrap(),
].join("\n");
let bibtex = Bibtex::parse(bibtex_raw).expect("Valid bibtex file content");
let bib = bibtex.bibliographies();
assert_eq!(4, bib.len());
// Output could conceivably change in future, so just check that some things are right
assert_eq!(bib[0].entry_type(), "article");
assert_eq!(bib[0].citation_key(), "Guth:1980zm");
assert_eq!(bib[0].tags()[0], ("author".into(), "Guth, Alan H.".into()));
assert_eq!(bib[0].tags()[1], (
"title".into(),
"{The Inflationary Universe: A Possible Solution to the\n Horizon and Flatness Problems}"
.into(),
));
assert_eq!(bib[0].tags()[4], ("year".into(), "1981".into()));
assert_eq!(bib[3].entry_type(), "ARTICLE");
assert_eq!(bib[3].citation_key(), "1982PhRvL..48.1220A");
assert_eq!(bib[3].tags()[1], (
"title".into(),
"{Cosmology for grand unified theories with radiatively induced symmetry breaking}"
.into(),
));
assert_eq!(bib[3].tags()[4], ("year".into(), "1982".into()));
}
#[cfg(not(windows))]
#[test]
fn blg2bib_stdin_bibtex() {
let mut child = cmd_blg2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute aux2bib",
);
{
let input = read_file("example_files/test_bibtex.blg");
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(&input).expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
let bibtex = Bibtex::parse(std::str::from_utf8(&output.stdout).unwrap())
.expect("Valid bibtex file content");
let bib = bibtex.bibliographies();
assert_eq!(2, bib.len());
// Output could conceivably change in future, so just check that some things are right
assert_eq!(bib[0].entry_type(), "article");
assert_eq!(bib[0].citation_key(), "Higgs:2014aqa");
assert_eq!(
bib[0].tags()[0],
("author".into(), "Higgs, Peter W.".into())
);
assert_eq!(bib[0].tags()[1], (
"title".into(),
"{Nobel Lecture: Evading the Goldstone theorem}"
.into(),
));
assert_eq!(bib[0].tags()[4], ("year".into(), "2014".into()));
assert_eq!(bib[1].entry_type(), "article");
assert_eq!(bib[1].citation_key(), "Higgs:2015mei");
assert_eq!(bib[1].tags()[0], ("author".into(), "Higgs, P. W.".into()));
assert_eq!(bib[1].tags()[1], (
"title".into(),
"{Evading the Goldstone theorem}".into(),
));
assert_eq!(bib[1].tags()[4], ("year".into(), "2015".into()));
}
#[cfg(not(windows))]
#[test]
fn blg2bib_stdin_biblatex() {
let mut child = cmd_blg2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute blg2bib",
);
{
let input = read_file("example_files/test_biber.blg");
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(&input).expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
let bibtex_raw = &[
text::MONTH_STRINGS,
std::str::from_utf8(&output.stdout).unwrap(),
].join("\n");
let bibtex = Bibtex::parse(bibtex_raw).expect("Valid bibtex file content");
let bib = bibtex.bibliographies();
assert_eq!(4, bib.len());
// Output could conceivably change in future, so just check that some things are right
assert_eq!(bib[0].entry_type(), "article");
assert_eq!(bib[0].citation_key(), "Guth:1980zm");
assert_eq!(bib[0].tags()[0], ("author".into(), "Guth, Alan H.".into()));
assert_eq!(bib[0].tags()[1], (
"title".into(),
"{The Inflationary Universe: A Possible Solution to the\n Horizon and Flatness Problems}"
.into(),
));
assert_eq!(bib[0].tags()[4], ("year".into(), "1981".into()));
assert_eq!(bib[3].entry_type(), "ARTICLE");
assert_eq!(bib[3].citation_key(), "1982PhRvL..48.1220A");
assert_eq!(bib[3].tags()[1], (
"title".into(),
"{Cosmology for grand unified theories with radiatively induced symmetry breaking}"
.into(),
));
assert_eq!(bib[3].tags()[4], ("year".into(), "1982".into()));
}
refactor: factor out tests of output from binaries
This enables further tests on same input (eg reading and writing from
files) much more easily.
use std::env;
use std::process::{Command, Stdio};
use std::path::PathBuf;
use std::io::{Read, Write};
use std::fs::File;
extern crate nom_bibtex;
use nom_bibtex::Bibtex;
mod text;
fn read_file(filename: &str) -> Vec<u8> {
let mut file = File::open(filename).unwrap();
let mut content = Vec::new();
file.read_to_end(&mut content).unwrap();
content
}
fn get_bin_dir() -> PathBuf {
env::current_exe()
.expect("test bin's directory")
.parent()
.expect("test bin's parent directory")
.parent()
.expect("executable's directory")
.to_path_buf()
}
fn cmd_aux2bib() -> Command {
let path;
if cfg!(not(windows)) {
path = get_bin_dir().join("aux2bib");
} else {
path = get_bin_dir().join("aux2bib.exe");
}
if !path.is_file() {
panic!("aux2bib binary {:?} was not found", path);
}
let mut cmd = Command::new(path);
cmd.env_clear().stderr(Stdio::piped()).stdout(
Stdio::piped(),
);
cmd
}
fn cmd_blg2bib() -> Command {
let path;
if cfg!(not(windows)) {
path = get_bin_dir().join("blg2bib");
} else {
path = get_bin_dir().join("blg2bib.exe");
}
if !path.is_file() {
panic!("blg2bib binary {:?} was not found", path);
}
let mut cmd = Command::new(path);
cmd.env_clear().stderr(Stdio::piped()).stdout(
Stdio::piped(),
);
cmd
}
fn check_output_aux_bibtex(bibtex: &Bibtex) {
let bib = bibtex.bibliographies();
assert_eq!(2, bib.len());
// Output could conceivably change in future, so just check that some things are right
assert_eq!(bib[0].entry_type(), "article");
assert_eq!(bib[0].citation_key(), "Higgs:2014aqa");
assert_eq!(
bib[0].tags()[0],
("author".into(), "Higgs, Peter W.".into())
);
assert_eq!(bib[0].tags()[1], (
"title".into(),
"{Nobel Lecture: Evading the Goldstone theorem}"
.into(),
));
assert_eq!(bib[0].tags()[4], ("year".into(), "2014".into()));
assert_eq!(bib[1].entry_type(), "article");
assert_eq!(bib[1].citation_key(), "Higgs:2015mei");
assert_eq!(bib[1].tags()[0], ("author".into(), "Higgs, P. W.".into()));
assert_eq!(bib[1].tags()[1], (
"title".into(),
"{Evading the Goldstone theorem}".into(),
));
assert_eq!(bib[1].tags()[4], ("year".into(), "2015".into()));
}
fn check_output_aux_biblatex(bibtex: &Bibtex) {
let bib = bibtex.bibliographies();
assert_eq!(4, bib.len());
// Output could conceivably change in future, so just check that some things are right
assert_eq!(bib[0].entry_type(), "article");
assert_eq!(bib[0].citation_key(), "Guth:1980zm");
assert_eq!(bib[0].tags()[0], ("author".into(), "Guth, Alan H.".into()));
assert_eq!(bib[0].tags()[1], (
"title".into(),
"{The Inflationary Universe: A Possible Solution to the\n Horizon and Flatness Problems}"
.into(),
));
assert_eq!(bib[0].tags()[4], ("year".into(), "1981".into()));
assert_eq!(bib[3].entry_type(), "ARTICLE");
assert_eq!(bib[3].citation_key(), "1982PhRvL..48.1220A");
assert_eq!(bib[3].tags()[1], (
"title".into(),
"{Cosmology for grand unified theories with radiatively induced symmetry breaking}"
.into(),
));
assert_eq!(bib[3].tags()[4], ("year".into(), "1982".into()));
}
fn check_output_blg_bibtex(bibtex: &Bibtex) {
let bib = bibtex.bibliographies();
assert_eq!(2, bib.len());
// Output could conceivably change in future, so just check that some things are right
assert_eq!(bib[0].entry_type(), "article");
assert_eq!(bib[0].citation_key(), "Higgs:2014aqa");
assert_eq!(
bib[0].tags()[0],
("author".into(), "Higgs, Peter W.".into())
);
assert_eq!(bib[0].tags()[1], (
"title".into(),
"{Nobel Lecture: Evading the Goldstone theorem}"
.into(),
));
assert_eq!(bib[0].tags()[4], ("year".into(), "2014".into()));
assert_eq!(bib[1].entry_type(), "article");
assert_eq!(bib[1].citation_key(), "Higgs:2015mei");
assert_eq!(bib[1].tags()[0], ("author".into(), "Higgs, P. W.".into()));
assert_eq!(bib[1].tags()[1], (
"title".into(),
"{Evading the Goldstone theorem}".into(),
));
assert_eq!(bib[1].tags()[4], ("year".into(), "2015".into()));
}
fn check_output_blg_biblatex(bibtex: &Bibtex) {
let bib = bibtex.bibliographies();
assert_eq!(4, bib.len());
// Output could conceivably change in future, so just check that some things are right
assert_eq!(bib[0].entry_type(), "article");
assert_eq!(bib[0].citation_key(), "Guth:1980zm");
assert_eq!(bib[0].tags()[0], ("author".into(), "Guth, Alan H.".into()));
assert_eq!(bib[0].tags()[1], (
"title".into(),
"{The Inflationary Universe: A Possible Solution to the\n Horizon and Flatness Problems}"
.into(),
));
assert_eq!(bib[0].tags()[4], ("year".into(), "1981".into()));
assert_eq!(bib[3].entry_type(), "ARTICLE");
assert_eq!(bib[3].citation_key(), "1982PhRvL..48.1220A");
assert_eq!(bib[3].tags()[1], (
"title".into(),
"{Cosmology for grand unified theories with radiatively induced symmetry breaking}"
.into(),
));
assert_eq!(bib[3].tags()[4], ("year".into(), "1982".into()));
}
#[cfg(not(windows))]
#[test]
fn aux2bib_runs() {
let mut cmd = cmd_aux2bib().arg("--help").spawn().expect(
"Failed to execute aux2bib",
);
let error_code = cmd.wait().expect("Failed to wait on aux2bib");
assert!(error_code.success());
}
#[cfg(not(windows))]
#[test]
fn blg2bib_runs() {
let mut cmd = cmd_blg2bib().arg("--help").spawn().expect(
"Failed to execute blg2bib",
);
let error_code = cmd.wait().expect("Failed to wait on blg2bib");
assert!(error_code.success());
}
#[cfg(not(windows))]
#[test]
fn aux2bib_stdin_stdout_empty() {
let mut child = cmd_aux2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute aux2bib",
);
{
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(b"").expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
assert_eq!(output.stdout, []);
}
#[cfg(not(windows))]
#[test]
fn blg2bib_stdin_stdout_empty() {
let mut child = cmd_blg2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute blg2bib",
);
{
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(b"").expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
assert_eq!(output.stdout, []);
}
#[cfg(not(windows))]
#[test]
fn aux2bib_stdin_stdout_bibtex() {
let mut child = cmd_aux2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute aux2bib",
);
{
let input = read_file("example_files/test_bibtex.aux");
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(&input).expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
let bibtex = Bibtex::parse(std::str::from_utf8(&output.stdout).unwrap())
.expect("Valid bibtex file content");
check_output_aux_bibtex(&bibtex);
}
#[cfg(not(windows))]
#[test]
fn aux2bib_stdin_stdout_biblatex() {
let mut child = cmd_aux2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute aux2bib",
);
{
let input = read_file("example_files/test_biber.aux");
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(&input).expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
let bibtex_raw = &[
text::MONTH_STRINGS,
std::str::from_utf8(&output.stdout).unwrap(),
].join("\n");
let bibtex = Bibtex::parse(bibtex_raw).expect("Valid bibtex file content");
check_output_aux_biblatex(&bibtex);
}
#[cfg(not(windows))]
#[test]
fn blg2bib_stdin_stdout_bibtex() {
let mut child = cmd_blg2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute aux2bib",
);
{
let input = read_file("example_files/test_bibtex.blg");
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(&input).expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
let bibtex = Bibtex::parse(std::str::from_utf8(&output.stdout).unwrap())
.expect("Valid bibtex file content");
check_output_blg_bibtex(&bibtex);
}
#[cfg(not(windows))]
#[test]
fn blg2bib_stdin_stdout_biblatex() {
let mut child = cmd_blg2bib().stdin(Stdio::piped()).spawn().expect(
"Failed to execute blg2bib",
);
{
let input = read_file("example_files/test_biber.blg");
let stdin = child.stdin.as_mut().expect("Failed to get stdin");
stdin.write_all(&input).expect("Failed to write to stdin");
}
let output = child.wait_with_output().expect("Failed to wait on aux2bib");
assert!(output.status.success());
let bibtex_raw = &[
text::MONTH_STRINGS,
std::str::from_utf8(&output.stdout).unwrap(),
].join("\n");
let bibtex = Bibtex::parse(bibtex_raw).expect("Valid bibtex file content");
check_output_blg_biblatex(&bibtex);
}
|
extern crate arrayvec;
#[macro_use] extern crate matches;
use arrayvec::ArrayVec;
use arrayvec::ArrayString;
use std::mem;
use arrayvec::CapacityError;
use std::collections::HashMap;
#[test]
fn test_simple() {
use std::ops::Add;
let mut vec: ArrayVec<[Vec<i32>; 3]> = ArrayVec::new();
vec.push(vec![1, 2, 3, 4]);
vec.push(vec![10]);
vec.push(vec![-1, 13, -2]);
for elt in &vec {
assert_eq!(elt.iter().fold(0, Add::add), 10);
}
let sum_len = vec.into_iter().map(|x| x.len()).fold(0, Add::add);
assert_eq!(sum_len, 8);
}
#[test]
fn test_u16_index() {
const N: usize = 4096;
let mut vec: ArrayVec<[_; N]> = ArrayVec::new();
for _ in 0..N {
assert!(vec.try_push(1u8).is_ok());
}
assert!(vec.try_push(0).is_err());
assert_eq!(vec.len(), N);
}
#[test]
fn test_iter() {
let mut iter = ArrayVec::from([1, 2, 3]).into_iter();
assert_eq!(iter.size_hint(), (3, Some(3)));
assert_eq!(iter.next_back(), Some(3));
assert_eq!(iter.next(), Some(1));
assert_eq!(iter.next_back(), Some(2));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next_back(), None);
}
#[test]
fn test_drop() {
use std::cell::Cell;
let flag = &Cell::new(0);
#[derive(Clone)]
struct Bump<'a>(&'a Cell<i32>);
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
}
}
{
let mut array = ArrayVec::<[Bump; 128]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
}
assert_eq!(flag.get(), 2);
// test something with the nullable pointer optimization
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(vec![Bump(flag)]);
array.push(vec![Bump(flag), Bump(flag)]);
array.push(vec![]);
let push4 = array.try_push(vec![Bump(flag)]);
assert_eq!(flag.get(), 0);
drop(push4);
assert_eq!(flag.get(), 1);
drop(array.pop());
assert_eq!(flag.get(), 1);
drop(array.pop());
assert_eq!(flag.get(), 3);
}
assert_eq!(flag.get(), 4);
// test into_inner
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let inner = array.into_inner();
assert!(inner.is_ok());
assert_eq!(flag.get(), 0);
drop(inner);
assert_eq!(flag.get(), 3);
}
// test cloning into_iter
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let mut iter = array.into_iter();
assert_eq!(flag.get(), 0);
iter.next();
assert_eq!(flag.get(), 1);
let clone = iter.clone();
assert_eq!(flag.get(), 1);
drop(clone);
assert_eq!(flag.get(), 3);
drop(iter);
assert_eq!(flag.get(), 5);
}
}
#[test]
fn test_extend() {
let mut range = 0..10;
let mut array: ArrayVec<[_; 5]> = range.by_ref().collect();
assert_eq!(&array[..], &[0, 1, 2, 3, 4]);
assert_eq!(range.next(), Some(5));
array.extend(range.by_ref());
assert_eq!(range.next(), Some(6));
let mut array: ArrayVec<[_; 10]> = (0..3).collect();
assert_eq!(&array[..], &[0, 1, 2]);
array.extend(3..5);
assert_eq!(&array[..], &[0, 1, 2, 3, 4]);
}
#[test]
fn test_is_send_sync() {
let data = ArrayVec::<[Vec<i32>; 5]>::new();
&data as &Send;
&data as &Sync;
}
#[test]
fn test_compact_size() {
// Future rust will kill these drop flags!
// 4 elements size + 1 len + 1 enum tag + [1 drop flag]
type ByteArray = ArrayVec<[u8; 4]>;
println!("{}", mem::size_of::<ByteArray>());
assert!(mem::size_of::<ByteArray>() <= 8);
// 12 element size + 1 enum tag + 3 padding + 1 len + 1 drop flag + 2 padding
type QuadArray = ArrayVec<[u32; 3]>;
println!("{}", mem::size_of::<QuadArray>());
assert!(mem::size_of::<QuadArray>() <= 24);
}
#[test]
fn test_drain() {
let mut v = ArrayVec::from([0; 8]);
v.pop();
v.drain(0..7);
assert_eq!(&v[..], &[]);
v.extend(0..);
v.drain(1..4);
assert_eq!(&v[..], &[0, 4, 5, 6, 7]);
let u: ArrayVec<[_; 3]> = v.drain(1..4).rev().collect();
assert_eq!(&u[..], &[6, 5, 4]);
assert_eq!(&v[..], &[0, 7]);
v.drain(..);
assert_eq!(&v[..], &[]);
}
#[test]
fn test_retain() {
let mut v = ArrayVec::from([0; 8]);
for (i, elt) in v.iter_mut().enumerate() {
*elt = i;
}
v.retain(|_| true);
assert_eq!(&v[..], &[0, 1, 2, 3, 4, 5, 6, 7]);
v.retain(|elt| {
*elt /= 2;
*elt % 2 == 0
});
assert_eq!(&v[..], &[0, 0, 2, 2]);
v.retain(|_| false);
assert_eq!(&v[..], &[]);
}
#[test]
#[should_panic]
fn test_drain_oob() {
let mut v = ArrayVec::from([0; 8]);
v.pop();
v.drain(0..8);
}
#[test]
#[should_panic]
fn test_drop_panic() {
struct DropPanic;
impl Drop for DropPanic {
fn drop(&mut self) {
panic!("drop");
}
}
let mut array = ArrayVec::<[DropPanic; 1]>::new();
array.push(DropPanic);
}
#[test]
#[should_panic]
fn test_drop_panic_into_iter() {
struct DropPanic;
impl Drop for DropPanic {
fn drop(&mut self) {
panic!("drop");
}
}
let mut array = ArrayVec::<[DropPanic; 1]>::new();
array.push(DropPanic);
array.into_iter();
}
#[test]
fn test_insert() {
let mut v = ArrayVec::from([]);
assert_matches!(v.try_push(1), Err(_));
let mut v = ArrayVec::<[_; 3]>::new();
v.insert(0, 0);
v.insert(1, 1);
//let ret1 = v.try_insert(3, 3);
//assert_matches!(ret1, Err(InsertError::OutOfBounds(_)));
assert_eq!(&v[..], &[0, 1]);
v.insert(2, 2);
assert_eq!(&v[..], &[0, 1, 2]);
let ret2 = v.try_insert(1, 9);
assert_eq!(&v[..], &[0, 1, 2]);
assert_matches!(ret2, Err(_));
let mut v = ArrayVec::from([2]);
assert_matches!(v.try_insert(0, 1), Err(CapacityError { .. }));
assert_matches!(v.try_insert(1, 1), Err(CapacityError { .. }));
//assert_matches!(v.try_insert(2, 1), Err(CapacityError { .. }));
}
#[test]
fn test_into_inner_1() {
let mut v = ArrayVec::from([1, 2]);
v.pop();
let u = v.clone();
assert_eq!(v.into_inner(), Err(u));
}
#[test]
fn test_into_inner_2() {
let mut v = ArrayVec::<[String; 4]>::new();
v.push("a".into());
v.push("b".into());
v.push("c".into());
v.push("d".into());
assert_eq!(v.into_inner().unwrap(), ["a", "b", "c", "d"]);
}
#[test]
fn test_into_inner_3_() {
let mut v = ArrayVec::<[i32; 4]>::new();
v.extend(1..);
assert_eq!(v.into_inner().unwrap(), [1, 2, 3, 4]);
}
#[test]
fn test_write() {
use std::io::Write;
let mut v = ArrayVec::<[_; 8]>::new();
write!(&mut v, "\x01\x02\x03").unwrap();
assert_eq!(&v[..], &[1, 2, 3]);
let r = v.write(&[9; 16]).unwrap();
assert_eq!(r, 5);
assert_eq!(&v[..], &[1, 2, 3, 9, 9, 9, 9, 9]);
}
#[test]
fn array_clone_from() {
let mut v = ArrayVec::<[_; 4]>::new();
v.push(vec![1, 2]);
v.push(vec![3, 4, 5]);
v.push(vec![6]);
let reference = v.to_vec();
let mut u = ArrayVec::<[_; 4]>::new();
u.clone_from(&v);
assert_eq!(&u, &reference[..]);
let mut t = ArrayVec::<[_; 4]>::new();
t.push(vec![97]);
t.push(vec![]);
t.push(vec![5, 6, 2]);
t.push(vec![2]);
t.clone_from(&v);
assert_eq!(&t, &reference[..]);
t.clear();
t.clone_from(&v);
assert_eq!(&t, &reference[..]);
}
#[test]
fn test_string() {
use std::error::Error;
let text = "hello world";
let mut s = ArrayString::<[_; 16]>::new();
s.try_push_str(text).unwrap();
assert_eq!(&s, text);
assert_eq!(text, &s);
// Make sure Hash / Eq / Borrow match up so we can use HashMap
let mut map = HashMap::new();
map.insert(s, 1);
assert_eq!(map[text], 1);
let mut t = ArrayString::<[_; 2]>::new();
assert!(t.try_push_str(text).is_err());
assert_eq!(&t, "");
t.push_str("ab");
// DerefMut
let tmut: &mut str = &mut t;
assert_eq!(tmut, "ab");
// Test Error trait / try
let t = || -> Result<(), Box<Error>> {
let mut t = ArrayString::<[_; 2]>::new();
try!(t.try_push_str(text));
Ok(())
}();
assert!(t.is_err());
}
#[test]
fn test_string_from() {
let text = "hello world";
// Test `from` constructor
let u = ArrayString::<[_; 11]>::from(text).unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_from_bytes() {
let text = "hello world";
let u = ArrayString::from_byte_string(b"hello world").unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_clone() {
let text = "hi";
let mut s = ArrayString::<[_; 4]>::new();
s.push_str("abcd");
let t = ArrayString::<[_; 4]>::from(text).unwrap();
s.clone_from(&t);
assert_eq!(&t, &s);
}
#[test]
fn test_string_push() {
let text = "abcαβγ";
let mut s = ArrayString::<[_; 8]>::new();
for c in text.chars() {
if let Err(_) = s.try_push(c) {
break;
}
}
assert_eq!("abcαβ", &s[..]);
s.push('x');
assert_eq!("abcαβx", &s[..]);
assert!(s.try_push('x').is_err());
}
#[test]
fn test_insert_at_length() {
let mut v = ArrayVec::<[_; 8]>::new();
let result1 = v.try_insert(0, "a");
let result2 = v.try_insert(1, "b");
assert!(result1.is_ok() && result2.is_ok());
assert_eq!(&v[..], &["a", "b"]);
}
#[should_panic]
#[test]
fn test_insert_out_of_bounds() {
let mut v = ArrayVec::<[_; 8]>::new();
let _ = v.try_insert(1, "test");
}
/*
* insert that pushes out the last
let mut u = ArrayVec::from([1, 2, 3, 4]);
let ret = u.try_insert(3, 99);
assert_eq!(&u[..], &[1, 2, 3, 99]);
assert_matches!(ret, Err(_));
let ret = u.try_insert(4, 77);
assert_eq!(&u[..], &[1, 2, 3, 99]);
assert_matches!(ret, Err(_));
*/
#[test]
fn test_drop_in_insert() {
use std::cell::Cell;
let flag = &Cell::new(0);
struct Bump<'a>(&'a Cell<i32>);
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
}
}
flag.set(0);
{
let mut array = ArrayVec::<[_; 2]>::new();
array.push(Bump(flag));
array.insert(0, Bump(flag));
assert_eq!(flag.get(), 0);
let ret = array.try_insert(1, Bump(flag));
assert_eq!(flag.get(), 0);
assert_matches!(ret, Err(_));
drop(ret);
assert_eq!(flag.get(), 1);
}
assert_eq!(flag.get(), 3);
}
#[test]
fn test_pop_at() {
let mut v = ArrayVec::<[String; 4]>::new();
let s = String::from;
v.push(s("a"));
v.push(s("b"));
v.push(s("c"));
v.push(s("d"));
assert_eq!(v.pop_at(4), None);
assert_eq!(v.pop_at(1), Some(s("b")));
assert_eq!(v.pop_at(1), Some(s("c")));
assert_eq!(v.pop_at(2), None);
assert_eq!(&v[..], &["a", "d"]);
}
#[test]
fn test_sizes() {
let v = ArrayVec::from([0u8; 1 << 16]);
assert_eq!(vec![0u8; v.len()], &v[..]);
}
#[test]
fn test_default() {
use std::net;
let s: ArrayString<[u8; 4]> = Default::default();
// Something without `Default` implementation.
let v: ArrayVec<[net::TcpStream; 4]> = Default::default();
assert_eq!(s.len(), 0);
assert_eq!(v.len(), 0);
}
TEST: Add minimal tests for new array sizes
extern crate arrayvec;
#[macro_use] extern crate matches;
use arrayvec::ArrayVec;
use arrayvec::ArrayString;
use std::mem;
use arrayvec::CapacityError;
use std::collections::HashMap;
#[test]
fn test_simple() {
use std::ops::Add;
let mut vec: ArrayVec<[Vec<i32>; 3]> = ArrayVec::new();
vec.push(vec![1, 2, 3, 4]);
vec.push(vec![10]);
vec.push(vec![-1, 13, -2]);
for elt in &vec {
assert_eq!(elt.iter().fold(0, Add::add), 10);
}
let sum_len = vec.into_iter().map(|x| x.len()).fold(0, Add::add);
assert_eq!(sum_len, 8);
}
#[test]
fn test_u16_index() {
const N: usize = 4096;
let mut vec: ArrayVec<[_; N]> = ArrayVec::new();
for _ in 0..N {
assert!(vec.try_push(1u8).is_ok());
}
assert!(vec.try_push(0).is_err());
assert_eq!(vec.len(), N);
}
#[test]
fn test_iter() {
let mut iter = ArrayVec::from([1, 2, 3]).into_iter();
assert_eq!(iter.size_hint(), (3, Some(3)));
assert_eq!(iter.next_back(), Some(3));
assert_eq!(iter.next(), Some(1));
assert_eq!(iter.next_back(), Some(2));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next_back(), None);
}
#[test]
fn test_drop() {
use std::cell::Cell;
let flag = &Cell::new(0);
#[derive(Clone)]
struct Bump<'a>(&'a Cell<i32>);
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
}
}
{
let mut array = ArrayVec::<[Bump; 128]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
}
assert_eq!(flag.get(), 2);
// test something with the nullable pointer optimization
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(vec![Bump(flag)]);
array.push(vec![Bump(flag), Bump(flag)]);
array.push(vec![]);
let push4 = array.try_push(vec![Bump(flag)]);
assert_eq!(flag.get(), 0);
drop(push4);
assert_eq!(flag.get(), 1);
drop(array.pop());
assert_eq!(flag.get(), 1);
drop(array.pop());
assert_eq!(flag.get(), 3);
}
assert_eq!(flag.get(), 4);
// test into_inner
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let inner = array.into_inner();
assert!(inner.is_ok());
assert_eq!(flag.get(), 0);
drop(inner);
assert_eq!(flag.get(), 3);
}
// test cloning into_iter
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let mut iter = array.into_iter();
assert_eq!(flag.get(), 0);
iter.next();
assert_eq!(flag.get(), 1);
let clone = iter.clone();
assert_eq!(flag.get(), 1);
drop(clone);
assert_eq!(flag.get(), 3);
drop(iter);
assert_eq!(flag.get(), 5);
}
}
#[test]
fn test_extend() {
let mut range = 0..10;
let mut array: ArrayVec<[_; 5]> = range.by_ref().collect();
assert_eq!(&array[..], &[0, 1, 2, 3, 4]);
assert_eq!(range.next(), Some(5));
array.extend(range.by_ref());
assert_eq!(range.next(), Some(6));
let mut array: ArrayVec<[_; 10]> = (0..3).collect();
assert_eq!(&array[..], &[0, 1, 2]);
array.extend(3..5);
assert_eq!(&array[..], &[0, 1, 2, 3, 4]);
}
#[test]
fn test_is_send_sync() {
let data = ArrayVec::<[Vec<i32>; 5]>::new();
&data as &Send;
&data as &Sync;
}
#[test]
fn test_compact_size() {
// Future rust will kill these drop flags!
// 4 elements size + 1 len + 1 enum tag + [1 drop flag]
type ByteArray = ArrayVec<[u8; 4]>;
println!("{}", mem::size_of::<ByteArray>());
assert!(mem::size_of::<ByteArray>() <= 8);
// 12 element size + 1 enum tag + 3 padding + 1 len + 1 drop flag + 2 padding
type QuadArray = ArrayVec<[u32; 3]>;
println!("{}", mem::size_of::<QuadArray>());
assert!(mem::size_of::<QuadArray>() <= 24);
}
#[test]
fn test_drain() {
let mut v = ArrayVec::from([0; 8]);
v.pop();
v.drain(0..7);
assert_eq!(&v[..], &[]);
v.extend(0..);
v.drain(1..4);
assert_eq!(&v[..], &[0, 4, 5, 6, 7]);
let u: ArrayVec<[_; 3]> = v.drain(1..4).rev().collect();
assert_eq!(&u[..], &[6, 5, 4]);
assert_eq!(&v[..], &[0, 7]);
v.drain(..);
assert_eq!(&v[..], &[]);
}
#[test]
fn test_retain() {
let mut v = ArrayVec::from([0; 8]);
for (i, elt) in v.iter_mut().enumerate() {
*elt = i;
}
v.retain(|_| true);
assert_eq!(&v[..], &[0, 1, 2, 3, 4, 5, 6, 7]);
v.retain(|elt| {
*elt /= 2;
*elt % 2 == 0
});
assert_eq!(&v[..], &[0, 0, 2, 2]);
v.retain(|_| false);
assert_eq!(&v[..], &[]);
}
#[test]
#[should_panic]
fn test_drain_oob() {
let mut v = ArrayVec::from([0; 8]);
v.pop();
v.drain(0..8);
}
#[test]
#[should_panic]
fn test_drop_panic() {
struct DropPanic;
impl Drop for DropPanic {
fn drop(&mut self) {
panic!("drop");
}
}
let mut array = ArrayVec::<[DropPanic; 1]>::new();
array.push(DropPanic);
}
#[test]
#[should_panic]
fn test_drop_panic_into_iter() {
struct DropPanic;
impl Drop for DropPanic {
fn drop(&mut self) {
panic!("drop");
}
}
let mut array = ArrayVec::<[DropPanic; 1]>::new();
array.push(DropPanic);
array.into_iter();
}
#[test]
fn test_insert() {
let mut v = ArrayVec::from([]);
assert_matches!(v.try_push(1), Err(_));
let mut v = ArrayVec::<[_; 3]>::new();
v.insert(0, 0);
v.insert(1, 1);
//let ret1 = v.try_insert(3, 3);
//assert_matches!(ret1, Err(InsertError::OutOfBounds(_)));
assert_eq!(&v[..], &[0, 1]);
v.insert(2, 2);
assert_eq!(&v[..], &[0, 1, 2]);
let ret2 = v.try_insert(1, 9);
assert_eq!(&v[..], &[0, 1, 2]);
assert_matches!(ret2, Err(_));
let mut v = ArrayVec::from([2]);
assert_matches!(v.try_insert(0, 1), Err(CapacityError { .. }));
assert_matches!(v.try_insert(1, 1), Err(CapacityError { .. }));
//assert_matches!(v.try_insert(2, 1), Err(CapacityError { .. }));
}
#[test]
fn test_into_inner_1() {
let mut v = ArrayVec::from([1, 2]);
v.pop();
let u = v.clone();
assert_eq!(v.into_inner(), Err(u));
}
#[test]
fn test_into_inner_2() {
let mut v = ArrayVec::<[String; 4]>::new();
v.push("a".into());
v.push("b".into());
v.push("c".into());
v.push("d".into());
assert_eq!(v.into_inner().unwrap(), ["a", "b", "c", "d"]);
}
#[test]
fn test_into_inner_3_() {
let mut v = ArrayVec::<[i32; 4]>::new();
v.extend(1..);
assert_eq!(v.into_inner().unwrap(), [1, 2, 3, 4]);
}
#[test]
fn test_write() {
use std::io::Write;
let mut v = ArrayVec::<[_; 8]>::new();
write!(&mut v, "\x01\x02\x03").unwrap();
assert_eq!(&v[..], &[1, 2, 3]);
let r = v.write(&[9; 16]).unwrap();
assert_eq!(r, 5);
assert_eq!(&v[..], &[1, 2, 3, 9, 9, 9, 9, 9]);
}
#[test]
fn array_clone_from() {
let mut v = ArrayVec::<[_; 4]>::new();
v.push(vec![1, 2]);
v.push(vec![3, 4, 5]);
v.push(vec![6]);
let reference = v.to_vec();
let mut u = ArrayVec::<[_; 4]>::new();
u.clone_from(&v);
assert_eq!(&u, &reference[..]);
let mut t = ArrayVec::<[_; 4]>::new();
t.push(vec![97]);
t.push(vec![]);
t.push(vec![5, 6, 2]);
t.push(vec![2]);
t.clone_from(&v);
assert_eq!(&t, &reference[..]);
t.clear();
t.clone_from(&v);
assert_eq!(&t, &reference[..]);
}
#[test]
fn test_string() {
use std::error::Error;
let text = "hello world";
let mut s = ArrayString::<[_; 16]>::new();
s.try_push_str(text).unwrap();
assert_eq!(&s, text);
assert_eq!(text, &s);
// Make sure Hash / Eq / Borrow match up so we can use HashMap
let mut map = HashMap::new();
map.insert(s, 1);
assert_eq!(map[text], 1);
let mut t = ArrayString::<[_; 2]>::new();
assert!(t.try_push_str(text).is_err());
assert_eq!(&t, "");
t.push_str("ab");
// DerefMut
let tmut: &mut str = &mut t;
assert_eq!(tmut, "ab");
// Test Error trait / try
let t = || -> Result<(), Box<Error>> {
let mut t = ArrayString::<[_; 2]>::new();
try!(t.try_push_str(text));
Ok(())
}();
assert!(t.is_err());
}
#[test]
fn test_string_from() {
let text = "hello world";
// Test `from` constructor
let u = ArrayString::<[_; 11]>::from(text).unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_from_bytes() {
let text = "hello world";
let u = ArrayString::from_byte_string(b"hello world").unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_clone() {
let text = "hi";
let mut s = ArrayString::<[_; 4]>::new();
s.push_str("abcd");
let t = ArrayString::<[_; 4]>::from(text).unwrap();
s.clone_from(&t);
assert_eq!(&t, &s);
}
#[test]
fn test_string_push() {
let text = "abcαβγ";
let mut s = ArrayString::<[_; 8]>::new();
for c in text.chars() {
if let Err(_) = s.try_push(c) {
break;
}
}
assert_eq!("abcαβ", &s[..]);
s.push('x');
assert_eq!("abcαβx", &s[..]);
assert!(s.try_push('x').is_err());
}
#[test]
fn test_insert_at_length() {
let mut v = ArrayVec::<[_; 8]>::new();
let result1 = v.try_insert(0, "a");
let result2 = v.try_insert(1, "b");
assert!(result1.is_ok() && result2.is_ok());
assert_eq!(&v[..], &["a", "b"]);
}
#[should_panic]
#[test]
fn test_insert_out_of_bounds() {
let mut v = ArrayVec::<[_; 8]>::new();
let _ = v.try_insert(1, "test");
}
/*
* insert that pushes out the last
let mut u = ArrayVec::from([1, 2, 3, 4]);
let ret = u.try_insert(3, 99);
assert_eq!(&u[..], &[1, 2, 3, 99]);
assert_matches!(ret, Err(_));
let ret = u.try_insert(4, 77);
assert_eq!(&u[..], &[1, 2, 3, 99]);
assert_matches!(ret, Err(_));
*/
#[test]
fn test_drop_in_insert() {
use std::cell::Cell;
let flag = &Cell::new(0);
struct Bump<'a>(&'a Cell<i32>);
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
}
}
flag.set(0);
{
let mut array = ArrayVec::<[_; 2]>::new();
array.push(Bump(flag));
array.insert(0, Bump(flag));
assert_eq!(flag.get(), 0);
let ret = array.try_insert(1, Bump(flag));
assert_eq!(flag.get(), 0);
assert_matches!(ret, Err(_));
drop(ret);
assert_eq!(flag.get(), 1);
}
assert_eq!(flag.get(), 3);
}
#[test]
fn test_pop_at() {
let mut v = ArrayVec::<[String; 4]>::new();
let s = String::from;
v.push(s("a"));
v.push(s("b"));
v.push(s("c"));
v.push(s("d"));
assert_eq!(v.pop_at(4), None);
assert_eq!(v.pop_at(1), Some(s("b")));
assert_eq!(v.pop_at(1), Some(s("c")));
assert_eq!(v.pop_at(2), None);
assert_eq!(&v[..], &["a", "d"]);
}
#[test]
fn test_sizes() {
let v = ArrayVec::from([0u8; 1 << 16]);
assert_eq!(vec![0u8; v.len()], &v[..]);
}
#[test]
fn test_default() {
use std::net;
let s: ArrayString<[u8; 4]> = Default::default();
// Something without `Default` implementation.
let v: ArrayVec<[net::TcpStream; 4]> = Default::default();
assert_eq!(s.len(), 0);
assert_eq!(v.len(), 0);
}
#[cfg(feature="array-sizes-33-128")]
#[test]
fn test_sizes_33_128() {
ArrayVec::from([0u8; 52]);
ArrayVec::from([0u8; 127]);
}
#[cfg(feature="array-sizes-129-255")]
#[test]
fn test_sizes_129_255() {
ArrayVec::from([0u8; 237]);
ArrayVec::from([0u8; 255]);
}
|
/*!
This module contains *integration* tests. Their purpose is to test the CLI
interface. Namely, that passing a flag does what it says on the tin.
Tests for more fine grained behavior (like the search or the globber) should be
unit tests in their respective modules.
*/
#![allow(dead_code, unused_imports)]
use std::process::Command;
use workdir::WorkDir;
mod hay;
mod workdir;
macro_rules! sherlock {
($name:ident, $fun:expr) => {
sherlock!($name, "Sherlock", $fun);
};
($name:ident, $query:expr, $fun:expr) => {
sherlock!($name, $query, "sherlock", $fun);
};
($name:ident, $query:expr, $path:expr, $fun:expr) => {
#[test]
fn $name() {
let wd = WorkDir::new(stringify!($name));
wd.create("sherlock", hay::SHERLOCK);
let mut cmd = wd.command();
cmd.arg($query).arg($path);
$fun(wd, cmd);
}
};
}
macro_rules! clean {
($name:ident, $query:expr, $path:expr, $fun:expr) => {
#[test]
fn $name() {
let wd = WorkDir::new(stringify!($name));
let mut cmd = wd.command();
cmd.arg($query).arg($path);
$fun(wd, cmd);
}
};
}
fn path(unix: &str) -> String {
if cfg!(windows) {
unix.replace("/", "\\")
} else {
unix.to_string()
}
}
fn paths(unix: &[&str]) -> Vec<String> {
let mut xs: Vec<_> = unix.iter().map(|s| path(s)).collect();
xs.sort();
xs
}
fn paths_from_stdout(stdout: String) -> Vec<String> {
let mut paths: Vec<_> = stdout.lines().map(|s| {
s.split(":").next().unwrap().to_string()
}).collect();
paths.sort();
paths
}
fn sort_lines(lines: &str) -> String {
let mut lines: Vec<String> =
lines.trim().lines().map(|s| s.to_owned()).collect();
lines.sort();
format!("{}\n", lines.join("\n"))
}
sherlock!(single_file, |wd: WorkDir, mut cmd| {
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(dir, "Sherlock", ".", |wd: WorkDir, mut cmd| {
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(line_numbers, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-n");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
3:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(columns, |wd: WorkDir, mut cmd: Command| {
cmd.arg("--column");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
57:For the Doctor Watsons of this world, as opposed to the Sherlock
49:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(with_filename, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-H");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(with_heading, |wd: WorkDir, mut cmd: Command| {
// This forces the issue since --with-filename is disabled by default
// when searching one fil.e
cmd.arg("--with-filename").arg("--heading");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(with_heading_default, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
// Search two or more and get --with-filename enabled by default.
// Use -j1 to get deterministic results.
wd.create("foo", "Sherlock Holmes lives on Baker Street.");
cmd.arg("-j1").arg("--heading");
let lines: String = wd.stdout(&mut cmd);
let expected1 = "\
foo
Sherlock Holmes lives on Baker Street.
sherlock
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
let expected2 = "\
sherlock
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
foo
Sherlock Holmes lives on Baker Street.
";
if lines != expected1 {
assert_eq!(lines, expected2);
} else {
assert_eq!(lines, expected1);
}
});
sherlock!(inverted, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-v");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
Holmeses, success in the province of detective work must always
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
assert_eq!(lines, expected);
});
sherlock!(inverted_line_numbers, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-n").arg("-v");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
2:Holmeses, success in the province of detective work must always
4:can extract a clew from a wisp of straw or a flake of cigar ash;
5:but Doctor Watson has to have it taken out for him and dusted,
6:and exhibited clearly, with a label attached.
";
assert_eq!(lines, expected);
});
sherlock!(case_insensitive, "sherlock", |wd: WorkDir, mut cmd: Command| {
cmd.arg("-i");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(word, "as", |wd: WorkDir, mut cmd: Command| {
cmd.arg("-w");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
";
assert_eq!(lines, expected);
});
sherlock!(literal, "()", "file", |wd: WorkDir, mut cmd: Command| {
wd.create("file", "blib\n()\nblab\n");
cmd.arg("-F");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "()\n");
});
sherlock!(quiet, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-q");
let lines: String = wd.stdout(&mut cmd);
assert!(lines.is_empty());
});
sherlock!(replace, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-r").arg("FooBar");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the FooBar
be, to a very large extent, the result of luck. FooBar Holmes
";
assert_eq!(lines, expected);
});
sherlock!(replace_groups, "([A-Z][a-z]+) ([A-Z][a-z]+)",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("-r").arg("$2, $1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Watsons, Doctor of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Holmes, Sherlock
but Watson, Doctor has to have it taken out for him and dusted,
";
assert_eq!(lines, expected);
});
sherlock!(replace_named_groups, "(?P<first>[A-Z][a-z]+) (?P<last>[A-Z][a-z]+)",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("-r").arg("$last, $first");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Watsons, Doctor of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Holmes, Sherlock
but Watson, Doctor has to have it taken out for him and dusted,
";
assert_eq!(lines, expected);
});
sherlock!(file_types, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
cmd.arg("-t").arg("rust");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.rs:Sherlock\n");
});
sherlock!(file_types_all, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
cmd.arg("-t").arg("all");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.py:Sherlock\n");
});
sherlock!(file_types_negate, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
cmd.arg("-T").arg("rust");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.py:Sherlock\n");
});
sherlock!(file_types_negate_all, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
cmd.arg("-T").arg("all");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
");
});
sherlock!(file_type_clear, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
cmd.arg("--type-clear").arg("rust").arg("-t").arg("rust");
wd.assert_err(&mut cmd);
});
sherlock!(file_type_add, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
wd.create("file.wat", "Sherlock");
cmd.arg("--type-add").arg("wat:*.wat").arg("-t").arg("wat");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.wat:Sherlock\n");
});
sherlock!(glob, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
cmd.arg("-g").arg("*.rs");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.rs:Sherlock\n");
});
sherlock!(glob_negate, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
cmd.arg("-g").arg("!*.rs");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.py:Sherlock\n");
});
sherlock!(count, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
cmd.arg("--count");
let lines: String = wd.stdout(&mut cmd);
let expected = "sherlock:2\n";
assert_eq!(lines, expected);
});
sherlock!(files_with_matches, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
cmd.arg("--files-with-matches");
let lines: String = wd.stdout(&mut cmd);
let expected = "sherlock\n";
assert_eq!(lines, expected);
});
sherlock!(after_context, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-A").arg("1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
";
assert_eq!(lines, expected);
});
sherlock!(after_context_line_numbers, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-A").arg("1").arg("-n");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2-Holmeses, success in the province of detective work must always
3:be, to a very large extent, the result of luck. Sherlock Holmes
4-can extract a clew from a wisp of straw or a flake of cigar ash;
";
assert_eq!(lines, expected);
});
sherlock!(before_context, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-B").arg("1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(before_context_line_numbers, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-B").arg("1").arg("-n");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2-Holmeses, success in the province of detective work must always
3:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(context, "world|attached", |wd: WorkDir, mut cmd: Command| {
cmd.arg("-C").arg("1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
--
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
assert_eq!(lines, expected);
});
sherlock!(context_line_numbers, "world|attached",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("-C").arg("1").arg("-n");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2-Holmeses, success in the province of detective work must always
--
5-but Doctor Watson has to have it taken out for him and dusted,
6:and exhibited clearly, with a label attached.
";
assert_eq!(lines, expected);
});
sherlock!(ignore_hidden, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create(".sherlock", hay::SHERLOCK);
wd.assert_err(&mut cmd);
});
sherlock!(no_ignore_hidden, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create(".sherlock", hay::SHERLOCK);
cmd.arg("--hidden");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
.sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
.sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(ignore_git, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "sherlock\n");
wd.assert_err(&mut cmd);
});
sherlock!(ignore_generic, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".ignore", "sherlock\n");
wd.assert_err(&mut cmd);
});
sherlock!(ignore_ripgrep, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".rgignore", "sherlock\n");
wd.assert_err(&mut cmd);
});
sherlock!(no_ignore, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "sherlock\n");
cmd.arg("--no-ignore");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(ignore_git_parent, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create(".gitignore", "sherlock\n");
wd.create_dir(".git");
wd.create_dir("foo");
wd.create("foo/sherlock", hay::SHERLOCK);
// Even though we search in foo/, which has no .gitignore, ripgrep will
// search parent directories and respect the gitignore files found.
cmd.current_dir(wd.path().join("foo"));
wd.assert_err(&mut cmd);
});
sherlock!(ignore_git_parent_stop, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
// This tests that searching parent directories for .gitignore files stops
// after it sees a .git directory. To test this, we create this directory
// hierarchy:
//
// .gitignore (contains `sherlock`)
// foo/
// .git
// bar/
// sherlock
//
// And we perform the search inside `foo/bar/`. ripgrep will stop looking
// for .gitignore files after it sees `foo/.git/`, and therefore not
// respect the top-level `.gitignore` containing `sherlock`.
wd.remove("sherlock");
wd.create(".gitignore", "sherlock\n");
wd.create_dir("foo");
wd.create_dir("foo/.git");
wd.create_dir("foo/bar");
wd.create("foo/bar/sherlock", hay::SHERLOCK);
cmd.current_dir(wd.path().join("foo").join("bar"));
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(ignore_ripgrep_parent_no_stop, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
// This is like the `ignore_git_parent_stop` test, except it checks that
// ripgrep *doesn't* stop checking for .rgignore files.
wd.remove("sherlock");
wd.create(".rgignore", "sherlock\n");
wd.create_dir("foo");
wd.create_dir("foo/.git");
wd.create_dir("foo/bar");
wd.create("foo/bar/sherlock", hay::SHERLOCK);
cmd.current_dir(wd.path().join("foo").join("bar"));
// The top-level .rgignore applies.
wd.assert_err(&mut cmd);
});
sherlock!(no_parent_ignore_git, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
// Set up a directory hierarchy like this:
//
// .gitignore
// foo/
// .gitignore
// sherlock
// watson
//
// Where `.gitignore` contains `sherlock` and `foo/.gitignore` contains
// `watson`.
//
// Now *do the search* from the foo directory. By default, ripgrep will
// search parent directories for .gitignore files. The --no-ignore-parent
// flag should prevent that. At the same time, the `foo/.gitignore` file
// will still be respected (since the search is happening in `foo/`).
//
// In other words, we should only see results from `sherlock`, not from
// `watson`.
wd.remove("sherlock");
wd.create(".gitignore", "sherlock\n");
wd.create_dir("foo");
wd.create("foo/.gitignore", "watson\n");
wd.create("foo/sherlock", hay::SHERLOCK);
wd.create("foo/watson", hay::SHERLOCK);
cmd.current_dir(wd.path().join("foo"));
cmd.arg("--no-ignore-parent");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(symlink_nofollow, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create_dir("foo");
wd.create_dir("foo/bar");
wd.link_dir("foo/baz", "foo/bar/baz");
wd.create_dir("foo/baz");
wd.create("foo/baz/sherlock", hay::SHERLOCK);
cmd.current_dir(wd.path().join("foo/bar"));
wd.assert_err(&mut cmd);
});
sherlock!(symlink_follow, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create_dir("foo");
wd.create_dir("foo/bar");
wd.create_dir("foo/baz");
wd.create("foo/baz/sherlock", hay::SHERLOCK);
wd.link_dir("foo/baz", "foo/bar/baz");
cmd.arg("-L");
cmd.current_dir(wd.path().join("foo/bar"));
let lines: String = wd.stdout(&mut cmd);
let expected = "\
baz/sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
baz/sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, path(expected));
});
sherlock!(unrestricted1, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "sherlock\n");
cmd.arg("-u");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(unrestricted2, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create(".sherlock", hay::SHERLOCK);
cmd.arg("-uu");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
.sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
.sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(unrestricted3, "foo", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file", "foo\x00bar\nfoo\x00baz\n");
cmd.arg("-uuu");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file:foo\x00bar\nfile:foo\x00baz\n");
});
sherlock!(vimgrep, "Sherlock|Watson", ".", |wd: WorkDir, mut cmd: Command| {
cmd.arg("--vimgrep");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:1:16:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:1:57:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:3:49:be, to a very large extent, the result of luck. Sherlock Holmes
sherlock:5:12:but Doctor Watson has to have it taken out for him and dusted,
";
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/16
clean!(regression_16, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "ghi/");
wd.create_dir("ghi");
wd.create_dir("def/ghi");
wd.create("ghi/toplevel.txt", "xyz");
wd.create("def/ghi/subdir.txt", "xyz");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/25
clean!(regression_25, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "/llvm/");
wd.create_dir("src/llvm");
wd.create("src/llvm/foo", "test");
let lines: String = wd.stdout(&mut cmd);
let expected = path("src/llvm/foo:test\n");
assert_eq!(lines, expected);
cmd.current_dir(wd.path().join("src"));
let lines: String = wd.stdout(&mut cmd);
let expected = path("llvm/foo:test\n");
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/30
clean!(regression_30, "test", ".", |wd: WorkDir, mut cmd: Command| {
if cfg!(windows) {
wd.create(".gitignore", "vendor/**\n!vendor\\manifest");
} else {
wd.create(".gitignore", "vendor/**\n!vendor/manifest");
}
wd.create_dir("vendor");
wd.create("vendor/manifest", "test");
let lines: String = wd.stdout(&mut cmd);
let expected = path("vendor/manifest:test\n");
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/49
clean!(regression_49, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "foo/bar");
wd.create_dir("test/foo/bar");
wd.create("test/foo/bar/baz", "test");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/50
clean!(regression_50, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "XXX/YYY/");
wd.create_dir("abc/def/XXX/YYY");
wd.create_dir("ghi/XXX/YYY");
wd.create("abc/def/XXX/YYY/bar", "test");
wd.create("ghi/XXX/YYY/bar", "test");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/65
clean!(regression_65, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "a/");
wd.create_dir("a");
wd.create("a/foo", "xyz");
wd.create("a/bar", "xyz");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/67
clean!(regression_67, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "/*\n!/dir");
wd.create_dir("dir");
wd.create_dir("foo");
wd.create("foo/bar", "test");
wd.create("dir/bar", "test");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, path("dir/bar:test\n"));
});
// See: https://github.com/BurntSushi/ripgrep/issues/87
clean!(regression_87, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "foo\n**no-vcs**");
wd.create("foo", "test");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/90
clean!(regression_90, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "!.foo");
wd.create(".foo", "test");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, ".foo:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/93
clean!(regression_93, r"(\d{1,3}\.){3}\d{1,3}", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create("foo", "192.168.1.1");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:192.168.1.1\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/99
clean!(regression_99, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create("foo1", "test");
wd.create("foo2", "zzz");
wd.create("bar", "test");
cmd.arg("-j1").arg("--heading");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(sort_lines(&lines), sort_lines("bar\ntest\n\nfoo1\ntest\n"));
});
// See: https://github.com/BurntSushi/ripgrep/issues/105
clean!(regression_105_part1, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("foo", "zztest");
cmd.arg("--vimgrep");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:1:3:zztest\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/105
clean!(regression_105_part2, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("foo", "zztest");
cmd.arg("--column");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:3:zztest\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/127
clean!(regression_127, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
// Set up a directory hierarchy like this:
//
// .gitignore
// foo/
// sherlock
// watson
//
// Where `.gitignore` contains `foo/sherlock`.
//
// ripgrep should ignore 'foo/sherlock' giving us results only from
// 'foo/watson' but on Windows ripgrep will include both 'foo/sherlock' and
// 'foo/watson' in the search results.
wd.create(".gitignore", "foo/sherlock\n");
wd.create_dir("foo");
wd.create("foo/sherlock", hay::SHERLOCK);
wd.create("foo/watson", hay::SHERLOCK);
let lines: String = wd.stdout(&mut cmd);
let expected = format!("\
{path}:For the Doctor Watsons of this world, as opposed to the Sherlock
{path}:be, to a very large extent, the result of luck. Sherlock Holmes
", path=path("foo/watson"));
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/128
clean!(regression_128, "x", ".", |wd: WorkDir, mut cmd: Command| {
wd.create_bytes("foo", b"01234567\x0b\n\x0b\n\x0b\n\x0b\nx");
cmd.arg("-n");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:5:x\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/131
//
// TODO(burntsushi): Darwin doesn't like this test for some reason.
#[cfg(not(target_os = "macos"))]
clean!(regression_131, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "TopÑapa");
wd.create("TopÑapa", "test");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/137
//
// TODO(burntsushi): Figure out why Windows gives "access denied" errors
// when trying to create a file symlink. For now, disable test on Windows.
#[cfg(not(windows))]
sherlock!(regression_137, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.link_file("sherlock", "sym1");
wd.link_file("sherlock", "sym2");
cmd.arg("sym1");
cmd.arg("sym2");
cmd.arg("-j1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
sym1:For the Doctor Watsons of this world, as opposed to the Sherlock
sym1:be, to a very large extent, the result of luck. Sherlock Holmes
sym2:For the Doctor Watsons of this world, as opposed to the Sherlock
sym2:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, path(expected));
});
// See: https://github.com/BurntSushi/ripgrep/issues/156
clean!(
regression_156,
r#"#(?:parse|include)\s*\(\s*(?:"|')[./A-Za-z_-]+(?:"|')"#,
"testcase.txt",
|wd: WorkDir, mut cmd: Command| {
const TESTCASE: &'static str = r#"#parse('widgets/foo_bar_macros.vm')
#parse ( 'widgets/mobile/foo_bar_macros.vm' )
#parse ("widgets/foobarhiddenformfields.vm")
#parse ( "widgets/foo_bar_legal.vm" )
#include( 'widgets/foo_bar_tips.vm' )
#include('widgets/mobile/foo_bar_macros.vm')
#include ("widgets/mobile/foo_bar_resetpw.vm")
#parse('widgets/foo-bar-macros.vm')
#parse ( 'widgets/mobile/foo-bar-macros.vm' )
#parse ("widgets/foo-bar-hiddenformfields.vm")
#parse ( "widgets/foo-bar-legal.vm" )
#include( 'widgets/foo-bar-tips.vm' )
#include('widgets/mobile/foo-bar-macros.vm')
#include ("widgets/mobile/foo-bar-resetpw.vm")
"#;
wd.create("testcase.txt", TESTCASE);
cmd.arg("-N");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, TESTCASE);
});
// See: https://github.com/BurntSushi/ripgrep/issues/184
clean!(regression_184, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", ".*");
wd.create_dir("foo/bar");
wd.create("foo/bar/baz", "test");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, format!("{}:test\n", path("foo/bar/baz")));
cmd.current_dir(wd.path().join("./foo/bar"));
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "baz:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/199
clean!(regression_199, r"\btest\b", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("foo", "tEsT");
cmd.arg("--smart-case");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:tEsT\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/206
clean!(regression_206, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create_dir("foo");
wd.create("foo/bar.txt", "test");
cmd.arg("-g").arg("*.txt");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, format!("{}:test\n", path("foo/bar.txt")));
});
// See: https://github.com/BurntSushi/ripgrep/issues/228
clean!(regression_228, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create_dir("foo");
cmd.arg("--ignore-file").arg("foo");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/20
sherlock!(feature_20_no_filename, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--no-filename");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/45
sherlock!(feature_45_relative_cwd, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create(".not-an-ignore", "foo\n/bar");
wd.create_dir("bar");
wd.create_dir("baz/bar");
wd.create_dir("baz/baz/bar");
wd.create("bar/test", "test");
wd.create("baz/bar/test", "test");
wd.create("baz/baz/bar/test", "test");
wd.create("baz/foo", "test");
wd.create("baz/test", "test");
wd.create("foo", "test");
wd.create("test", "test");
// First, get a baseline without applying ignore rules.
let lines = paths_from_stdout(wd.stdout(&mut cmd));
assert_eq!(lines, paths(&[
"bar/test", "baz/bar/test", "baz/baz/bar/test", "baz/foo",
"baz/test", "foo", "test",
]));
// Now try again with the ignore file activated.
cmd.arg("--ignore-file").arg(".not-an-ignore");
let lines = paths_from_stdout(wd.stdout(&mut cmd));
assert_eq!(lines, paths(&[
"baz/bar/test", "baz/baz/bar/test", "baz/test", "test",
]));
// Now do it again, but inside the baz directory.
// Since the ignore file is interpreted relative to the CWD, this will
// cause the /bar anchored pattern to filter out baz/bar, which is a
// subtle difference between true parent ignore files and manually
// specified ignore files.
let mut cmd = wd.command();
cmd.arg("test").arg(".").arg("--ignore-file").arg("../.not-an-ignore");
cmd.current_dir(wd.path().join("baz"));
let lines = paths_from_stdout(wd.stdout(&mut cmd));
assert_eq!(lines, paths(&["baz/bar/test", "test"]));
});
// See: https://github.com/BurntSushi/ripgrep/issues/45
sherlock!(feature_45_precedence_with_others, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create(".not-an-ignore", "*.log");
wd.create(".ignore", "!imp.log");
wd.create("imp.log", "test");
wd.create("wat.log", "test");
cmd.arg("--ignore-file").arg(".not-an-ignore");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "imp.log:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/45
sherlock!(feature_45_precedence_internal, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create(".not-an-ignore1", "*.log");
wd.create(".not-an-ignore2", "!imp.log");
wd.create("imp.log", "test");
wd.create("wat.log", "test");
cmd.arg("--ignore-file").arg(".not-an-ignore1");
cmd.arg("--ignore-file").arg(".not-an-ignore2");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "imp.log:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/68
clean!(feature_68_no_ignore_vcs, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "foo");
wd.create(".ignore", "bar");
wd.create("foo", "test");
wd.create("bar", "test");
cmd.arg("--no-ignore-vcs");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/70
sherlock!(feature_70_smart_case, "sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--smart-case");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
sherlock!(feature_89_files_with_matches, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--null").arg("--files-with-matches");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "sherlock\x00");
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
sherlock!(feature_89_count, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--null").arg("--count");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "sherlock\x002\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
sherlock!(feature_89_files, "NADA", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--null").arg("--files");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "sherlock\x00");
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
sherlock!(feature_89_match, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--null").arg("-C1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock\x00For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock\x00Holmeses, success in the province of detective work must always
sherlock\x00be, to a very large extent, the result of luck. Sherlock Holmes
sherlock\x00can extract a clew from a wisp of straw or a flake of cigar ash;
";
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/109
clean!(feature_109_max_depth, "far", ".", |wd: WorkDir, mut cmd: Command| {
wd.create_dir("one");
wd.create("one/pass", "far");
wd.create_dir("one/too");
wd.create("one/too/many", "far");
cmd.arg("--maxdepth").arg("2");
let lines: String = wd.stdout(&mut cmd);
let expected = path("one/pass:far\n");
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/124
clean!(feature_109_case_sensitive_part1, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create("foo", "tEsT");
cmd.arg("--smart-case").arg("--case-sensitive");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/124
clean!(feature_109_case_sensitive_part2, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create("foo", "tEsT");
cmd.arg("--ignore-case").arg("--case-sensitive");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/159
clean!(feature_159_works, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("foo", "test\ntest");
cmd.arg("-m1");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/159
clean!(feature_159_zero_max, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("foo", "test\ntest");
cmd.arg("-m0");
wd.assert_err(&mut cmd);
});
#[test]
fn binary_nosearch() {
let wd = WorkDir::new("binary_nosearch");
wd.create("file", "foo\x00bar\nfoo\x00baz\n");
let mut cmd = wd.command();
cmd.arg("foo").arg("file");
wd.assert_err(&mut cmd);
}
// The following two tests show a discrepancy in search results between
// searching with memory mapped files and stream searching. Stream searching
// uses a heuristic (that GNU grep also uses) where NUL bytes are replaced with
// the EOL terminator, which tends to avoid allocating large amounts of memory
// for really long "lines." The memory map searcher has no need to worry about
// such things, and more than that, it would be pretty hard for it to match
// the semantics of streaming search in this case.
//
// Binary files with lots of NULs aren't really part of the use case of ripgrep
// (or any other grep-like tool for that matter), so we shouldn't feel too bad
// about it.
#[test]
fn binary_search_mmap() {
let wd = WorkDir::new("binary_search_mmap");
wd.create("file", "foo\x00bar\nfoo\x00baz\n");
let mut cmd = wd.command();
cmd.arg("-a").arg("--mmap").arg("foo").arg("file");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo\x00bar\nfoo\x00baz\n");
}
#[test]
fn binary_search_no_mmap() {
let wd = WorkDir::new("binary_search_no_mmap");
wd.create("file", "foo\x00bar\nfoo\x00baz\n");
let mut cmd = wd.command();
cmd.arg("-a").arg("--no-mmap").arg("foo").arg("file");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo\x00bar\nfoo\x00baz\n");
}
#[test]
fn files() {
let wd = WorkDir::new("files");
wd.create("file", "");
wd.create_dir("dir");
wd.create("dir/file", "");
let mut cmd = wd.command();
cmd.arg("--files");
let lines: String = wd.stdout(&mut cmd);
assert!(lines == path("file\ndir/file\n")
|| lines == path("dir/file\nfile\n"));
}
// See: https://github.com/BurntSushi/ripgrep/issues/64
#[test]
fn regression_64() {
let wd = WorkDir::new("regression_64");
wd.create_dir("dir");
wd.create_dir("foo");
wd.create("dir/abc", "");
wd.create("foo/abc", "");
let mut cmd = wd.command();
cmd.arg("--files").arg("foo");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, path("foo/abc\n"));
}
#[test]
fn type_list() {
let wd = WorkDir::new("type_list");
let mut cmd = wd.command();
cmd.arg("--type-list");
let lines: String = wd.stdout(&mut cmd);
// This can change over time, so just make sure we print something.
assert!(!lines.is_empty());
}
Disable symlink tests on Windows.
For some reason, these work on AppVeyor but not in other build systems.
Let's just disable them.
See: https://github.com/rust-lang/rust/pull/37149
/*!
This module contains *integration* tests. Their purpose is to test the CLI
interface. Namely, that passing a flag does what it says on the tin.
Tests for more fine grained behavior (like the search or the globber) should be
unit tests in their respective modules.
*/
#![allow(dead_code, unused_imports)]
use std::process::Command;
use workdir::WorkDir;
mod hay;
mod workdir;
macro_rules! sherlock {
($name:ident, $fun:expr) => {
sherlock!($name, "Sherlock", $fun);
};
($name:ident, $query:expr, $fun:expr) => {
sherlock!($name, $query, "sherlock", $fun);
};
($name:ident, $query:expr, $path:expr, $fun:expr) => {
#[test]
fn $name() {
let wd = WorkDir::new(stringify!($name));
wd.create("sherlock", hay::SHERLOCK);
let mut cmd = wd.command();
cmd.arg($query).arg($path);
$fun(wd, cmd);
}
};
}
macro_rules! clean {
($name:ident, $query:expr, $path:expr, $fun:expr) => {
#[test]
fn $name() {
let wd = WorkDir::new(stringify!($name));
let mut cmd = wd.command();
cmd.arg($query).arg($path);
$fun(wd, cmd);
}
};
}
fn path(unix: &str) -> String {
if cfg!(windows) {
unix.replace("/", "\\")
} else {
unix.to_string()
}
}
fn paths(unix: &[&str]) -> Vec<String> {
let mut xs: Vec<_> = unix.iter().map(|s| path(s)).collect();
xs.sort();
xs
}
fn paths_from_stdout(stdout: String) -> Vec<String> {
let mut paths: Vec<_> = stdout.lines().map(|s| {
s.split(":").next().unwrap().to_string()
}).collect();
paths.sort();
paths
}
fn sort_lines(lines: &str) -> String {
let mut lines: Vec<String> =
lines.trim().lines().map(|s| s.to_owned()).collect();
lines.sort();
format!("{}\n", lines.join("\n"))
}
sherlock!(single_file, |wd: WorkDir, mut cmd| {
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(dir, "Sherlock", ".", |wd: WorkDir, mut cmd| {
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(line_numbers, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-n");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
3:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(columns, |wd: WorkDir, mut cmd: Command| {
cmd.arg("--column");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
57:For the Doctor Watsons of this world, as opposed to the Sherlock
49:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(with_filename, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-H");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(with_heading, |wd: WorkDir, mut cmd: Command| {
// This forces the issue since --with-filename is disabled by default
// when searching one fil.e
cmd.arg("--with-filename").arg("--heading");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(with_heading_default, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
// Search two or more and get --with-filename enabled by default.
// Use -j1 to get deterministic results.
wd.create("foo", "Sherlock Holmes lives on Baker Street.");
cmd.arg("-j1").arg("--heading");
let lines: String = wd.stdout(&mut cmd);
let expected1 = "\
foo
Sherlock Holmes lives on Baker Street.
sherlock
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
let expected2 = "\
sherlock
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
foo
Sherlock Holmes lives on Baker Street.
";
if lines != expected1 {
assert_eq!(lines, expected2);
} else {
assert_eq!(lines, expected1);
}
});
sherlock!(inverted, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-v");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
Holmeses, success in the province of detective work must always
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
assert_eq!(lines, expected);
});
sherlock!(inverted_line_numbers, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-n").arg("-v");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
2:Holmeses, success in the province of detective work must always
4:can extract a clew from a wisp of straw or a flake of cigar ash;
5:but Doctor Watson has to have it taken out for him and dusted,
6:and exhibited clearly, with a label attached.
";
assert_eq!(lines, expected);
});
sherlock!(case_insensitive, "sherlock", |wd: WorkDir, mut cmd: Command| {
cmd.arg("-i");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(word, "as", |wd: WorkDir, mut cmd: Command| {
cmd.arg("-w");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
";
assert_eq!(lines, expected);
});
sherlock!(literal, "()", "file", |wd: WorkDir, mut cmd: Command| {
wd.create("file", "blib\n()\nblab\n");
cmd.arg("-F");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "()\n");
});
sherlock!(quiet, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-q");
let lines: String = wd.stdout(&mut cmd);
assert!(lines.is_empty());
});
sherlock!(replace, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-r").arg("FooBar");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the FooBar
be, to a very large extent, the result of luck. FooBar Holmes
";
assert_eq!(lines, expected);
});
sherlock!(replace_groups, "([A-Z][a-z]+) ([A-Z][a-z]+)",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("-r").arg("$2, $1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Watsons, Doctor of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Holmes, Sherlock
but Watson, Doctor has to have it taken out for him and dusted,
";
assert_eq!(lines, expected);
});
sherlock!(replace_named_groups, "(?P<first>[A-Z][a-z]+) (?P<last>[A-Z][a-z]+)",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("-r").arg("$last, $first");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Watsons, Doctor of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Holmes, Sherlock
but Watson, Doctor has to have it taken out for him and dusted,
";
assert_eq!(lines, expected);
});
sherlock!(file_types, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
cmd.arg("-t").arg("rust");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.rs:Sherlock\n");
});
sherlock!(file_types_all, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
cmd.arg("-t").arg("all");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.py:Sherlock\n");
});
sherlock!(file_types_negate, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
cmd.arg("-T").arg("rust");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.py:Sherlock\n");
});
sherlock!(file_types_negate_all, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
cmd.arg("-T").arg("all");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
");
});
sherlock!(file_type_clear, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
cmd.arg("--type-clear").arg("rust").arg("-t").arg("rust");
wd.assert_err(&mut cmd);
});
sherlock!(file_type_add, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
wd.create("file.wat", "Sherlock");
cmd.arg("--type-add").arg("wat:*.wat").arg("-t").arg("wat");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.wat:Sherlock\n");
});
sherlock!(glob, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
cmd.arg("-g").arg("*.rs");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.rs:Sherlock\n");
});
sherlock!(glob_negate, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create("file.py", "Sherlock");
wd.create("file.rs", "Sherlock");
cmd.arg("-g").arg("!*.rs");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file.py:Sherlock\n");
});
sherlock!(count, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
cmd.arg("--count");
let lines: String = wd.stdout(&mut cmd);
let expected = "sherlock:2\n";
assert_eq!(lines, expected);
});
sherlock!(files_with_matches, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
cmd.arg("--files-with-matches");
let lines: String = wd.stdout(&mut cmd);
let expected = "sherlock\n";
assert_eq!(lines, expected);
});
sherlock!(after_context, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-A").arg("1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
";
assert_eq!(lines, expected);
});
sherlock!(after_context_line_numbers, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-A").arg("1").arg("-n");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2-Holmeses, success in the province of detective work must always
3:be, to a very large extent, the result of luck. Sherlock Holmes
4-can extract a clew from a wisp of straw or a flake of cigar ash;
";
assert_eq!(lines, expected);
});
sherlock!(before_context, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-B").arg("1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(before_context_line_numbers, |wd: WorkDir, mut cmd: Command| {
cmd.arg("-B").arg("1").arg("-n");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2-Holmeses, success in the province of detective work must always
3:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(context, "world|attached", |wd: WorkDir, mut cmd: Command| {
cmd.arg("-C").arg("1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
--
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
assert_eq!(lines, expected);
});
sherlock!(context_line_numbers, "world|attached",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("-C").arg("1").arg("-n");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2-Holmeses, success in the province of detective work must always
--
5-but Doctor Watson has to have it taken out for him and dusted,
6:and exhibited clearly, with a label attached.
";
assert_eq!(lines, expected);
});
sherlock!(ignore_hidden, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create(".sherlock", hay::SHERLOCK);
wd.assert_err(&mut cmd);
});
sherlock!(no_ignore_hidden, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create(".sherlock", hay::SHERLOCK);
cmd.arg("--hidden");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
.sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
.sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(ignore_git, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "sherlock\n");
wd.assert_err(&mut cmd);
});
sherlock!(ignore_generic, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".ignore", "sherlock\n");
wd.assert_err(&mut cmd);
});
sherlock!(ignore_ripgrep, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".rgignore", "sherlock\n");
wd.assert_err(&mut cmd);
});
sherlock!(no_ignore, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "sherlock\n");
cmd.arg("--no-ignore");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(ignore_git_parent, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create(".gitignore", "sherlock\n");
wd.create_dir(".git");
wd.create_dir("foo");
wd.create("foo/sherlock", hay::SHERLOCK);
// Even though we search in foo/, which has no .gitignore, ripgrep will
// search parent directories and respect the gitignore files found.
cmd.current_dir(wd.path().join("foo"));
wd.assert_err(&mut cmd);
});
sherlock!(ignore_git_parent_stop, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
// This tests that searching parent directories for .gitignore files stops
// after it sees a .git directory. To test this, we create this directory
// hierarchy:
//
// .gitignore (contains `sherlock`)
// foo/
// .git
// bar/
// sherlock
//
// And we perform the search inside `foo/bar/`. ripgrep will stop looking
// for .gitignore files after it sees `foo/.git/`, and therefore not
// respect the top-level `.gitignore` containing `sherlock`.
wd.remove("sherlock");
wd.create(".gitignore", "sherlock\n");
wd.create_dir("foo");
wd.create_dir("foo/.git");
wd.create_dir("foo/bar");
wd.create("foo/bar/sherlock", hay::SHERLOCK);
cmd.current_dir(wd.path().join("foo").join("bar"));
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(ignore_ripgrep_parent_no_stop, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
// This is like the `ignore_git_parent_stop` test, except it checks that
// ripgrep *doesn't* stop checking for .rgignore files.
wd.remove("sherlock");
wd.create(".rgignore", "sherlock\n");
wd.create_dir("foo");
wd.create_dir("foo/.git");
wd.create_dir("foo/bar");
wd.create("foo/bar/sherlock", hay::SHERLOCK);
cmd.current_dir(wd.path().join("foo").join("bar"));
// The top-level .rgignore applies.
wd.assert_err(&mut cmd);
});
sherlock!(no_parent_ignore_git, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
// Set up a directory hierarchy like this:
//
// .gitignore
// foo/
// .gitignore
// sherlock
// watson
//
// Where `.gitignore` contains `sherlock` and `foo/.gitignore` contains
// `watson`.
//
// Now *do the search* from the foo directory. By default, ripgrep will
// search parent directories for .gitignore files. The --no-ignore-parent
// flag should prevent that. At the same time, the `foo/.gitignore` file
// will still be respected (since the search is happening in `foo/`).
//
// In other words, we should only see results from `sherlock`, not from
// `watson`.
wd.remove("sherlock");
wd.create(".gitignore", "sherlock\n");
wd.create_dir("foo");
wd.create("foo/.gitignore", "watson\n");
wd.create("foo/sherlock", hay::SHERLOCK);
wd.create("foo/watson", hay::SHERLOCK);
cmd.current_dir(wd.path().join("foo"));
cmd.arg("--no-ignore-parent");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
#[cfg(not(windows))]
sherlock!(symlink_nofollow, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create_dir("foo");
wd.create_dir("foo/bar");
wd.link_dir("foo/baz", "foo/bar/baz");
wd.create_dir("foo/baz");
wd.create("foo/baz/sherlock", hay::SHERLOCK);
cmd.current_dir(wd.path().join("foo/bar"));
wd.assert_err(&mut cmd);
});
#[cfg(not(windows))]
sherlock!(symlink_follow, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create_dir("foo");
wd.create_dir("foo/bar");
wd.create_dir("foo/baz");
wd.create("foo/baz/sherlock", hay::SHERLOCK);
wd.link_dir("foo/baz", "foo/bar/baz");
cmd.arg("-L");
cmd.current_dir(wd.path().join("foo/bar"));
let lines: String = wd.stdout(&mut cmd);
let expected = "\
baz/sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
baz/sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, path(expected));
});
sherlock!(unrestricted1, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "sherlock\n");
cmd.arg("-u");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(unrestricted2, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.remove("sherlock");
wd.create(".sherlock", hay::SHERLOCK);
cmd.arg("-uu");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
.sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
.sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
sherlock!(unrestricted3, "foo", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("file", "foo\x00bar\nfoo\x00baz\n");
cmd.arg("-uuu");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "file:foo\x00bar\nfile:foo\x00baz\n");
});
sherlock!(vimgrep, "Sherlock|Watson", ".", |wd: WorkDir, mut cmd: Command| {
cmd.arg("--vimgrep");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:1:16:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:1:57:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:3:49:be, to a very large extent, the result of luck. Sherlock Holmes
sherlock:5:12:but Doctor Watson has to have it taken out for him and dusted,
";
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/16
clean!(regression_16, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "ghi/");
wd.create_dir("ghi");
wd.create_dir("def/ghi");
wd.create("ghi/toplevel.txt", "xyz");
wd.create("def/ghi/subdir.txt", "xyz");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/25
clean!(regression_25, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "/llvm/");
wd.create_dir("src/llvm");
wd.create("src/llvm/foo", "test");
let lines: String = wd.stdout(&mut cmd);
let expected = path("src/llvm/foo:test\n");
assert_eq!(lines, expected);
cmd.current_dir(wd.path().join("src"));
let lines: String = wd.stdout(&mut cmd);
let expected = path("llvm/foo:test\n");
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/30
clean!(regression_30, "test", ".", |wd: WorkDir, mut cmd: Command| {
if cfg!(windows) {
wd.create(".gitignore", "vendor/**\n!vendor\\manifest");
} else {
wd.create(".gitignore", "vendor/**\n!vendor/manifest");
}
wd.create_dir("vendor");
wd.create("vendor/manifest", "test");
let lines: String = wd.stdout(&mut cmd);
let expected = path("vendor/manifest:test\n");
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/49
clean!(regression_49, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "foo/bar");
wd.create_dir("test/foo/bar");
wd.create("test/foo/bar/baz", "test");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/50
clean!(regression_50, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "XXX/YYY/");
wd.create_dir("abc/def/XXX/YYY");
wd.create_dir("ghi/XXX/YYY");
wd.create("abc/def/XXX/YYY/bar", "test");
wd.create("ghi/XXX/YYY/bar", "test");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/65
clean!(regression_65, "xyz", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "a/");
wd.create_dir("a");
wd.create("a/foo", "xyz");
wd.create("a/bar", "xyz");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/67
clean!(regression_67, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "/*\n!/dir");
wd.create_dir("dir");
wd.create_dir("foo");
wd.create("foo/bar", "test");
wd.create("dir/bar", "test");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, path("dir/bar:test\n"));
});
// See: https://github.com/BurntSushi/ripgrep/issues/87
clean!(regression_87, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "foo\n**no-vcs**");
wd.create("foo", "test");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/90
clean!(regression_90, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "!.foo");
wd.create(".foo", "test");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, ".foo:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/93
clean!(regression_93, r"(\d{1,3}\.){3}\d{1,3}", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create("foo", "192.168.1.1");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:192.168.1.1\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/99
clean!(regression_99, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create("foo1", "test");
wd.create("foo2", "zzz");
wd.create("bar", "test");
cmd.arg("-j1").arg("--heading");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(sort_lines(&lines), sort_lines("bar\ntest\n\nfoo1\ntest\n"));
});
// See: https://github.com/BurntSushi/ripgrep/issues/105
clean!(regression_105_part1, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("foo", "zztest");
cmd.arg("--vimgrep");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:1:3:zztest\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/105
clean!(regression_105_part2, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("foo", "zztest");
cmd.arg("--column");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:3:zztest\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/127
clean!(regression_127, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
// Set up a directory hierarchy like this:
//
// .gitignore
// foo/
// sherlock
// watson
//
// Where `.gitignore` contains `foo/sherlock`.
//
// ripgrep should ignore 'foo/sherlock' giving us results only from
// 'foo/watson' but on Windows ripgrep will include both 'foo/sherlock' and
// 'foo/watson' in the search results.
wd.create(".gitignore", "foo/sherlock\n");
wd.create_dir("foo");
wd.create("foo/sherlock", hay::SHERLOCK);
wd.create("foo/watson", hay::SHERLOCK);
let lines: String = wd.stdout(&mut cmd);
let expected = format!("\
{path}:For the Doctor Watsons of this world, as opposed to the Sherlock
{path}:be, to a very large extent, the result of luck. Sherlock Holmes
", path=path("foo/watson"));
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/128
clean!(regression_128, "x", ".", |wd: WorkDir, mut cmd: Command| {
wd.create_bytes("foo", b"01234567\x0b\n\x0b\n\x0b\n\x0b\nx");
cmd.arg("-n");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:5:x\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/131
//
// TODO(burntsushi): Darwin doesn't like this test for some reason.
#[cfg(not(target_os = "macos"))]
clean!(regression_131, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "TopÑapa");
wd.create("TopÑapa", "test");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/137
//
// TODO(burntsushi): Figure out why Windows gives "access denied" errors
// when trying to create a file symlink. For now, disable test on Windows.
#[cfg(not(windows))]
sherlock!(regression_137, "Sherlock", ".", |wd: WorkDir, mut cmd: Command| {
wd.link_file("sherlock", "sym1");
wd.link_file("sherlock", "sym2");
cmd.arg("sym1");
cmd.arg("sym2");
cmd.arg("-j1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
sym1:For the Doctor Watsons of this world, as opposed to the Sherlock
sym1:be, to a very large extent, the result of luck. Sherlock Holmes
sym2:For the Doctor Watsons of this world, as opposed to the Sherlock
sym2:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, path(expected));
});
// See: https://github.com/BurntSushi/ripgrep/issues/156
clean!(
regression_156,
r#"#(?:parse|include)\s*\(\s*(?:"|')[./A-Za-z_-]+(?:"|')"#,
"testcase.txt",
|wd: WorkDir, mut cmd: Command| {
const TESTCASE: &'static str = r#"#parse('widgets/foo_bar_macros.vm')
#parse ( 'widgets/mobile/foo_bar_macros.vm' )
#parse ("widgets/foobarhiddenformfields.vm")
#parse ( "widgets/foo_bar_legal.vm" )
#include( 'widgets/foo_bar_tips.vm' )
#include('widgets/mobile/foo_bar_macros.vm')
#include ("widgets/mobile/foo_bar_resetpw.vm")
#parse('widgets/foo-bar-macros.vm')
#parse ( 'widgets/mobile/foo-bar-macros.vm' )
#parse ("widgets/foo-bar-hiddenformfields.vm")
#parse ( "widgets/foo-bar-legal.vm" )
#include( 'widgets/foo-bar-tips.vm' )
#include('widgets/mobile/foo-bar-macros.vm')
#include ("widgets/mobile/foo-bar-resetpw.vm")
"#;
wd.create("testcase.txt", TESTCASE);
cmd.arg("-N");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, TESTCASE);
});
// See: https://github.com/BurntSushi/ripgrep/issues/184
clean!(regression_184, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", ".*");
wd.create_dir("foo/bar");
wd.create("foo/bar/baz", "test");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, format!("{}:test\n", path("foo/bar/baz")));
cmd.current_dir(wd.path().join("./foo/bar"));
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "baz:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/199
clean!(regression_199, r"\btest\b", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("foo", "tEsT");
cmd.arg("--smart-case");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:tEsT\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/206
clean!(regression_206, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create_dir("foo");
wd.create("foo/bar.txt", "test");
cmd.arg("-g").arg("*.txt");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, format!("{}:test\n", path("foo/bar.txt")));
});
// See: https://github.com/BurntSushi/ripgrep/issues/228
clean!(regression_228, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create_dir("foo");
cmd.arg("--ignore-file").arg("foo");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/20
sherlock!(feature_20_no_filename, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--no-filename");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/45
sherlock!(feature_45_relative_cwd, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create(".not-an-ignore", "foo\n/bar");
wd.create_dir("bar");
wd.create_dir("baz/bar");
wd.create_dir("baz/baz/bar");
wd.create("bar/test", "test");
wd.create("baz/bar/test", "test");
wd.create("baz/baz/bar/test", "test");
wd.create("baz/foo", "test");
wd.create("baz/test", "test");
wd.create("foo", "test");
wd.create("test", "test");
// First, get a baseline without applying ignore rules.
let lines = paths_from_stdout(wd.stdout(&mut cmd));
assert_eq!(lines, paths(&[
"bar/test", "baz/bar/test", "baz/baz/bar/test", "baz/foo",
"baz/test", "foo", "test",
]));
// Now try again with the ignore file activated.
cmd.arg("--ignore-file").arg(".not-an-ignore");
let lines = paths_from_stdout(wd.stdout(&mut cmd));
assert_eq!(lines, paths(&[
"baz/bar/test", "baz/baz/bar/test", "baz/test", "test",
]));
// Now do it again, but inside the baz directory.
// Since the ignore file is interpreted relative to the CWD, this will
// cause the /bar anchored pattern to filter out baz/bar, which is a
// subtle difference between true parent ignore files and manually
// specified ignore files.
let mut cmd = wd.command();
cmd.arg("test").arg(".").arg("--ignore-file").arg("../.not-an-ignore");
cmd.current_dir(wd.path().join("baz"));
let lines = paths_from_stdout(wd.stdout(&mut cmd));
assert_eq!(lines, paths(&["baz/bar/test", "test"]));
});
// See: https://github.com/BurntSushi/ripgrep/issues/45
sherlock!(feature_45_precedence_with_others, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create(".not-an-ignore", "*.log");
wd.create(".ignore", "!imp.log");
wd.create("imp.log", "test");
wd.create("wat.log", "test");
cmd.arg("--ignore-file").arg(".not-an-ignore");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "imp.log:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/45
sherlock!(feature_45_precedence_internal, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create(".not-an-ignore1", "*.log");
wd.create(".not-an-ignore2", "!imp.log");
wd.create("imp.log", "test");
wd.create("wat.log", "test");
cmd.arg("--ignore-file").arg(".not-an-ignore1");
cmd.arg("--ignore-file").arg(".not-an-ignore2");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "imp.log:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/68
clean!(feature_68_no_ignore_vcs, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create(".gitignore", "foo");
wd.create(".ignore", "bar");
wd.create("foo", "test");
wd.create("bar", "test");
cmd.arg("--no-ignore-vcs");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/70
sherlock!(feature_70_smart_case, "sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--smart-case");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
sherlock!(feature_89_files_with_matches, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--null").arg("--files-with-matches");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "sherlock\x00");
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
sherlock!(feature_89_count, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--null").arg("--count");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "sherlock\x002\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
sherlock!(feature_89_files, "NADA", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--null").arg("--files");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "sherlock\x00");
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
sherlock!(feature_89_match, "Sherlock", ".",
|wd: WorkDir, mut cmd: Command| {
cmd.arg("--null").arg("-C1");
let lines: String = wd.stdout(&mut cmd);
let expected = "\
sherlock\x00For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock\x00Holmeses, success in the province of detective work must always
sherlock\x00be, to a very large extent, the result of luck. Sherlock Holmes
sherlock\x00can extract a clew from a wisp of straw or a flake of cigar ash;
";
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/109
clean!(feature_109_max_depth, "far", ".", |wd: WorkDir, mut cmd: Command| {
wd.create_dir("one");
wd.create("one/pass", "far");
wd.create_dir("one/too");
wd.create("one/too/many", "far");
cmd.arg("--maxdepth").arg("2");
let lines: String = wd.stdout(&mut cmd);
let expected = path("one/pass:far\n");
assert_eq!(lines, expected);
});
// See: https://github.com/BurntSushi/ripgrep/issues/124
clean!(feature_109_case_sensitive_part1, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create("foo", "tEsT");
cmd.arg("--smart-case").arg("--case-sensitive");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/124
clean!(feature_109_case_sensitive_part2, "test", ".",
|wd: WorkDir, mut cmd: Command| {
wd.create("foo", "tEsT");
cmd.arg("--ignore-case").arg("--case-sensitive");
wd.assert_err(&mut cmd);
});
// See: https://github.com/BurntSushi/ripgrep/issues/159
clean!(feature_159_works, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("foo", "test\ntest");
cmd.arg("-m1");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo:test\n");
});
// See: https://github.com/BurntSushi/ripgrep/issues/159
clean!(feature_159_zero_max, "test", ".", |wd: WorkDir, mut cmd: Command| {
wd.create("foo", "test\ntest");
cmd.arg("-m0");
wd.assert_err(&mut cmd);
});
#[test]
fn binary_nosearch() {
let wd = WorkDir::new("binary_nosearch");
wd.create("file", "foo\x00bar\nfoo\x00baz\n");
let mut cmd = wd.command();
cmd.arg("foo").arg("file");
wd.assert_err(&mut cmd);
}
// The following two tests show a discrepancy in search results between
// searching with memory mapped files and stream searching. Stream searching
// uses a heuristic (that GNU grep also uses) where NUL bytes are replaced with
// the EOL terminator, which tends to avoid allocating large amounts of memory
// for really long "lines." The memory map searcher has no need to worry about
// such things, and more than that, it would be pretty hard for it to match
// the semantics of streaming search in this case.
//
// Binary files with lots of NULs aren't really part of the use case of ripgrep
// (or any other grep-like tool for that matter), so we shouldn't feel too bad
// about it.
#[test]
fn binary_search_mmap() {
let wd = WorkDir::new("binary_search_mmap");
wd.create("file", "foo\x00bar\nfoo\x00baz\n");
let mut cmd = wd.command();
cmd.arg("-a").arg("--mmap").arg("foo").arg("file");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo\x00bar\nfoo\x00baz\n");
}
#[test]
fn binary_search_no_mmap() {
let wd = WorkDir::new("binary_search_no_mmap");
wd.create("file", "foo\x00bar\nfoo\x00baz\n");
let mut cmd = wd.command();
cmd.arg("-a").arg("--no-mmap").arg("foo").arg("file");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, "foo\x00bar\nfoo\x00baz\n");
}
#[test]
fn files() {
let wd = WorkDir::new("files");
wd.create("file", "");
wd.create_dir("dir");
wd.create("dir/file", "");
let mut cmd = wd.command();
cmd.arg("--files");
let lines: String = wd.stdout(&mut cmd);
assert!(lines == path("file\ndir/file\n")
|| lines == path("dir/file\nfile\n"));
}
// See: https://github.com/BurntSushi/ripgrep/issues/64
#[test]
fn regression_64() {
let wd = WorkDir::new("regression_64");
wd.create_dir("dir");
wd.create_dir("foo");
wd.create("dir/abc", "");
wd.create("foo/abc", "");
let mut cmd = wd.command();
cmd.arg("--files").arg("foo");
let lines: String = wd.stdout(&mut cmd);
assert_eq!(lines, path("foo/abc\n"));
}
#[test]
fn type_list() {
let wd = WorkDir::new("type_list");
let mut cmd = wd.command();
cmd.arg("--type-list");
let lines: String = wd.stdout(&mut cmd);
// This can change over time, so just make sure we print something.
assert!(!lines.is_empty());
}
|
extern crate memcache;
extern crate rand;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use std::iter;
use std::thread;
use std::thread::JoinHandle;
use std::time;
fn gen_random_key() -> String {
return iter::repeat(())
.map(|()| thread_rng().sample(Alphanumeric))
.take(10)
.collect::<String>();
}
#[test]
fn test() {
let mut urls = vec![
"memcache://localhost:12346?tcp_nodelay=true",
"memcache://localhost:12347?timeout=10",
"memcache://localhost:12348",
"memcache://localhost:12349",
];
if cfg!(unix) {
urls.push("memcache:///tmp/memcached2.sock");
}
let mut client = memcache::Client::connect(urls).unwrap();
client.version().unwrap();
client.set("foo", "bar", 0).unwrap();
client.flush().unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, None);
client.set("foo", "bar", 0).unwrap();
client.flush_with_delay(3).unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("bar")));
thread::sleep(time::Duration::from_secs(4));
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, None);
let mut keys: Vec<String> = Vec::new();
for _ in 0..1000 {
let key = gen_random_key();
keys.push(key.clone());
client.set(key.as_str(), "xxx", 0).unwrap();
}
for key in keys {
let value: String = client.get(key.as_str()).unwrap().unwrap();
assert_eq!(value, "xxx");
}
}
#[test]
fn udp_test() {
let urls = vec!["memcache+udp://localhost:22345"];
let mut client = memcache::Client::connect(urls).unwrap();
client.version().unwrap();
client.set("foo", "bar", 0).unwrap();
client.flush().unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, None);
client.set("foo", "bar", 0).unwrap();
client.flush_with_delay(3).unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("bar")));
thread::sleep(time::Duration::from_secs(4));
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, None);
client.set("foo", "bar", 0).unwrap();
let value = client.add("foo", "baz", 0);
assert_eq!(value.is_err(), true);
client.delete("foo").unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, None);
client.add("foo", "bar", 0).unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("bar")));
client.replace("foo", "baz", 0).unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("baz")));
client.append("foo", "bar").unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("bazbar")));
client.prepend("foo", "bar").unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("barbazbar")));
client.set("fooo", 0, 0).unwrap();
client.increment("fooo", 1).unwrap();
let value: Option<String> = client.get("fooo").unwrap();
assert_eq!(value, Some(String::from("1")));
client.decrement("fooo", 1).unwrap();
let value: Option<String> = client.get("fooo").unwrap();
assert_eq!(value, Some(String::from("0")));
assert_eq!(client.touch("foooo", 123).unwrap(), false);
assert_eq!(client.touch("fooo", 12345).unwrap(), true);
// gets is not supported for udp
let value: Result<std::collections::HashMap<String, String>, _> = client.gets(vec!["foo", "fooo"]);
assert_eq!(value.is_ok(), false);
let mut keys: Vec<String> = Vec::new();
for _ in 0..1000 {
let key = gen_random_key();
keys.push(key.clone());
client.set(key.as_str(), "xxx", 0).unwrap();
}
for key in keys {
let value: String = client.get(key.as_str()).unwrap().unwrap();
assert_eq!(value, "xxx");
}
// test with multiple udp connections
let mut handles: Vec<Option<JoinHandle<_>>> = Vec::new();
for i in 0..10 {
handles.push(Some(thread::spawn(move || {
let key = format!("key{}", i);
let value = format!("value{}", i);
let mut client = memcache::Client::connect("memcache://localhost:22345?udp=true").unwrap();
for j in 0..50 {
let value = format!("{}{}", value, j);
client.set(key.as_str(), value.clone(), 0).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, Some(value.clone()));
let result = client.add(key.as_str(), value.clone(), 0);
assert_eq!(result.is_err(), true);
client.delete(key.as_str()).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, None);
client.add(key.as_str(), value.clone(), 0).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, Some(value.clone()));
client.replace(key.as_str(), value.clone(), 0).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, Some(value.clone()));
client.append(key.as_str(), value.clone()).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, Some(format!("{}{}", value, value)));
client.prepend(key.as_str(), value.clone()).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, Some(format!("{}{}{}", value, value, value)));
}
})));
}
for i in 0..10 {
handles[i].take().unwrap().join().unwrap();
}
}
:white_check_mark: Run more tests on ascii
extern crate memcache;
extern crate rand;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use std::iter;
use std::thread;
use std::thread::JoinHandle;
use std::time;
fn gen_random_key() -> String {
return iter::repeat(())
.map(|()| thread_rng().sample(Alphanumeric))
.take(10)
.collect::<String>();
}
#[test]
fn test() {
let mut urls = vec![
"memcache://localhost:12346?tcp_nodelay=true",
"memcache://localhost:12347?timeout=10",
"memcache://localhost:12348?protocol=ascii",
"memcache://localhost:12349?",
];
if cfg!(unix) {
urls.push("memcache:///tmp/memcached2.sock");
}
let mut client = memcache::Client::connect(urls).unwrap();
client.version().unwrap();
client.set("foo", "bar", 0).unwrap();
client.flush().unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, None);
client.set("foo", "bar", 0).unwrap();
client.flush_with_delay(3).unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("bar")));
thread::sleep(time::Duration::from_secs(4));
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, None);
let mut keys: Vec<String> = Vec::new();
for _ in 0..1000 {
let key = gen_random_key();
keys.push(key.clone());
client.set(key.as_str(), "xxx", 0).unwrap();
}
for key in keys {
let value: String = client.get(key.as_str()).unwrap().unwrap();
assert_eq!(value, "xxx");
}
}
#[test]
fn udp_test() {
let urls = vec!["memcache+udp://localhost:22345"];
let mut client = memcache::Client::connect(urls).unwrap();
client.version().unwrap();
client.set("foo", "bar", 0).unwrap();
client.flush().unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, None);
client.set("foo", "bar", 0).unwrap();
client.flush_with_delay(3).unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("bar")));
thread::sleep(time::Duration::from_secs(4));
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, None);
client.set("foo", "bar", 0).unwrap();
let value = client.add("foo", "baz", 0);
assert_eq!(value.is_err(), true);
client.delete("foo").unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, None);
client.add("foo", "bar", 0).unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("bar")));
client.replace("foo", "baz", 0).unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("baz")));
client.append("foo", "bar").unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("bazbar")));
client.prepend("foo", "bar").unwrap();
let value: Option<String> = client.get("foo").unwrap();
assert_eq!(value, Some(String::from("barbazbar")));
client.set("fooo", 0, 0).unwrap();
client.increment("fooo", 1).unwrap();
let value: Option<String> = client.get("fooo").unwrap();
assert_eq!(value, Some(String::from("1")));
client.decrement("fooo", 1).unwrap();
let value: Option<String> = client.get("fooo").unwrap();
assert_eq!(value, Some(String::from("0")));
assert_eq!(client.touch("foooo", 123).unwrap(), false);
assert_eq!(client.touch("fooo", 12345).unwrap(), true);
// gets is not supported for udp
let value: Result<std::collections::HashMap<String, String>, _> = client.gets(vec!["foo", "fooo"]);
assert_eq!(value.is_ok(), false);
let mut keys: Vec<String> = Vec::new();
for _ in 0..1000 {
let key = gen_random_key();
keys.push(key.clone());
client.set(key.as_str(), "xxx", 0).unwrap();
}
for key in keys {
let value: String = client.get(key.as_str()).unwrap().unwrap();
assert_eq!(value, "xxx");
}
// test with multiple udp connections
let mut handles: Vec<Option<JoinHandle<_>>> = Vec::new();
for i in 0..10 {
handles.push(Some(thread::spawn(move || {
let key = format!("key{}", i);
let value = format!("value{}", i);
let mut client = memcache::Client::connect("memcache://localhost:22345?udp=true").unwrap();
for j in 0..50 {
let value = format!("{}{}", value, j);
client.set(key.as_str(), value.clone(), 0).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, Some(value.clone()));
let result = client.add(key.as_str(), value.clone(), 0);
assert_eq!(result.is_err(), true);
client.delete(key.as_str()).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, None);
client.add(key.as_str(), value.clone(), 0).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, Some(value.clone()));
client.replace(key.as_str(), value.clone(), 0).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, Some(value.clone()));
client.append(key.as_str(), value.clone()).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, Some(format!("{}{}", value, value)));
client.prepend(key.as_str(), value.clone()).unwrap();
let result: Option<String> = client.get(key.as_str()).unwrap();
assert_eq!(result, Some(format!("{}{}{}", value, value, value)));
}
})));
}
for i in 0..10 {
handles[i].take().unwrap().join().unwrap();
}
}
|
use neb::ram::types;
use neb::ram::chunk;
use std::*;
macro_rules! test_nums {
(
$sym:ident, $io:ident, $name:ident
) => (
#[test]
fn $name () {
let test_data = vec![$sym::MIN, $sym::MAX, 0, 1, 2, 255];
let chunk = &chunk::init(1, 2048).list[0];
for d in test_data {
types::$io::write(d, chunk.addr);
assert!(types::$io::read(chunk.addr) == d);
}
}
);
}
#[test]
fn init () {
}
test_nums!(i8, i8_io, i8_test);
all primitive number tests
use neb::ram::types;
use neb::ram::chunk;
macro_rules! test_nums {
(
$t:ident, $io:ident
) => (
mod $t {
use neb::ram::types;
use neb::ram::chunk;
use std;
#[test]
fn test () {
let test_data = vec![std::$t::MIN, std::$t::MAX, 0 as $t, 1 as $t, 2 as $t, 255 as $t];
let chunk = &chunk::init(1, 2048).list[0];
for d in test_data {
types::$io::write(d, chunk.addr);
assert!(types::$io::read(chunk.addr) == d);
}
}
}
);
}
#[test]
fn init () {
}
test_nums!(i8, i8_io);
test_nums!(i16, i16_io);
test_nums!(i32, i32_io);
test_nums!(i64, i64_io);
test_nums!(u8, u8_io);
test_nums!(u16, u16_io);
test_nums!(u32, u32_io);
test_nums!(u64, u64_io);
test_nums!(isize, isize_io);
test_nums!(usize, usize_io);
test_nums!(f32, f32_io);
test_nums!(f64, f64_io); |
use std::thread::{self,Thread,Builder};
use std::sync::mpsc::{self,channel,Receiver};
use mio::net::*;
use mio::*;
use mio_uds::UnixStream;
use mio::unix::UnixReady;
use std::collections::{HashMap,HashSet};
use std::os::unix::io::RawFd;
use std::os::unix::io::{FromRawFd,AsRawFd};
use std::io::{self,Read,ErrorKind};
use nom::HexDisplay;
use std::error::Error;
use slab::{Slab,Entry,VacantEntry};
use std::rc::Rc;
use std::cell::{RefCell,RefMut};
use std::net::{SocketAddr,Shutdown};
use std::str::FromStr;
use time::{Duration,precise_time_s};
use rand::random;
use uuid::Uuid;
use pool::{Pool,Checkout,Reset};
use sozu_command::buffer::Buffer;
use sozu_command::channel::Channel;
use sozu_command::scm_socket::ScmSocket;
use sozu_command::messages::{self,TcpFront,Order,OrderMessage,OrderMessageAnswer,OrderMessageStatus};
use network::{AppId,Backend,ClientResult,ConnectionError,RequiredEvents,Protocol,Readiness,SessionMetrics,
ProxyClient,ProxyConfiguration,AcceptError,BackendConnectAction,BackendConnectionStatus,
CloseResult};
use network::proxy::{Server,ProxyChannel,ListenToken,ClientToken,ListenClient};
use network::buffer_queue::BufferQueue;
use network::socket::{SocketHandler,SocketResult,server_bind};
use network::{http,https_rustls};
use network::protocol::{Pipe, ProtocolResult};
use network::protocol::proxy_protocol::ProxyProtocol;
use util::UnwrapLog;
const SERVER: Token = Token(0);
#[derive(Debug,Clone,PartialEq,Eq)]
pub enum ConnectionStatus {
Initial,
ClientConnected,
Connected,
ClientClosed,
ServerClosed,
Closed
}
pub enum State {
Pipe(Pipe<TcpStream>),
ProxyProtocol(ProxyProtocol<TcpStream>),
}
pub struct Client {
sock: TcpStream,
backend: Option<TcpStream>,
token: Option<Token>,
backend_token: Option<Token>,
accept_token: Token,
status: ConnectionStatus,
rx_count: usize,
tx_count: usize,
app_id: Option<String>,
request_id: String,
metrics: SessionMetrics,
protocol: Option<State>,
front_buf: Option<Checkout<BufferQueue>>,
back_buf: Option<Checkout<BufferQueue>>,
}
impl Client {
fn new(sock: TcpStream, accept_token: Token, front_buf: Checkout<BufferQueue>,
back_buf: Checkout<BufferQueue>, send_proxy: bool) -> Client {
let s = sock.try_clone().expect("could not clone the socket");
let addr = sock.peer_addr().map(|s| s.ip()).ok();
let mut frontend_buffer = None;
let mut backend_buffer = None;
let protocol = if send_proxy {
frontend_buffer = Some(front_buf);
backend_buffer = Some(back_buf);
Some(State::ProxyProtocol(ProxyProtocol::new(s, None)))
} else {
Some(State::Pipe(Pipe::new(s, None, front_buf, back_buf, addr)))
};
Client {
sock: sock,
backend: None,
token: None,
backend_token: None,
accept_token: accept_token,
status: ConnectionStatus::Connected,
rx_count: 0,
tx_count: 0,
app_id: None,
request_id: Uuid::new_v4().hyphenated().to_string(),
metrics: SessionMetrics::new(),
protocol,
front_buf: frontend_buffer,
back_buf: backend_buffer,
}
}
fn log_request(&self) {
let client = match self.sock.peer_addr().ok() {
None => String::from("-"),
Some(SocketAddr::V4(addr)) => format!("{}", addr),
Some(SocketAddr::V6(addr)) => format!("{}", addr),
};
let backend = match self.backend.as_ref().and_then(|backend| backend.peer_addr().ok()) {
None => String::from("-"),
Some(SocketAddr::V4(addr)) => format!("{}", addr),
Some(SocketAddr::V6(addr)) => format!("{}", addr),
};
let response_time = self.metrics.response_time().num_milliseconds();
let service_time = self.metrics.service_time().num_milliseconds();
let app_id = self.app_id.clone().unwrap_or(String::from("-"));
record_request_time!(&app_id, response_time);
if let Some(backend_id) = self.metrics.backend_id.as_ref() {
if let Some(backend_response_time) = self.metrics.backend_response_time() {
record_backend_metrics!(app_id, backend_id, backend_response_time.num_milliseconds(),
self.metrics.backend_bin, self.metrics.backend_bout);
}
}
info!("{}{} -> {}\t{} {} {} {}",
self.log_context(), client, backend,
response_time, service_time, self.metrics.bin, self.metrics.bout);
}
fn front_hup(&mut self) -> ClientResult {
self.log_request();
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.front_hup(),
Some(State::ProxyProtocol(_)) => {
ClientResult::CloseClient
},
_ => unreachable!(),
}
}
fn back_hup(&mut self) -> ClientResult {
self.log_request();
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.back_hup(),
_ => ClientResult::CloseClient,
}
}
fn log_context(&self) -> String {
if let Some(ref app_id) = self.app_id {
format!("{}\t{}\t", self.request_id, app_id)
} else {
format!("{}\tunknown\t", self.request_id)
}
}
fn readable(&mut self) -> ClientResult {
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.readable(&mut self.metrics),
_ => ClientResult::Continue,
}
}
fn writable(&mut self) -> ClientResult {
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.writable(&mut self.metrics),
_ => ClientResult::Continue,
}
}
fn back_readable(&mut self) -> ClientResult {
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.back_readable(&mut self.metrics),
_ => ClientResult::Continue,
}
}
fn back_writable(&mut self) -> ClientResult {
let mut res = (ProtocolResult::Continue, ClientResult::Continue);
match self.protocol {
Some(State::Pipe(ref mut pipe)) => res.1 = pipe.back_writable(&mut self.metrics),
Some(State::ProxyProtocol(ref mut pp)) => {
res.0 = pp.back_writable().0;
res.1 = pp.back_writable().1;
}
_ => unreachable!(),
};
if let ProtocolResult::Upgrade = res.0 {
self.upgrade();
}
res.1
}
fn front_socket(&self) -> &TcpStream {
match self.protocol {
Some(State::Pipe(ref pipe)) => pipe.front_socket(),
Some(State::ProxyProtocol(ref pp)) => pp.front_socket(),
_ => unreachable!(),
}
}
fn back_socket(&self) -> Option<&TcpStream> {
match self.protocol {
Some(State::Pipe(ref pipe)) => pipe.back_socket(),
Some(State::ProxyProtocol(ref pp)) => pp.back_socket(),
_ => unreachable!(),
}
}
pub fn upgrade(&mut self) {
let protocol = self.protocol.take();
if let Some(State::ProxyProtocol(mut pp)) = protocol {
if self.back_buf.is_some() && self.front_buf.is_some() {
let mut backend_socket = pp.backend.take().unwrap();
let addr = backend_socket.peer_addr().map(|s| s.ip()).ok();
let front_token = pp.front_token();
let back_token = pp.back_token();
let mut pipe = Pipe::new(
pp.frontend.take(0).into_inner(),
Some(backend_socket),
self.front_buf.take().unwrap(),
self.back_buf.take().unwrap(),
addr,
);
pipe.readiness.front_readiness = pp.readiness.front_readiness;
pipe.readiness.back_readiness = pp.readiness.back_readiness;
if let Some(front_token) = front_token {
pipe.set_front_token(front_token);
}
if let Some(back_token) = back_token {
pipe.set_back_token(back_token);
}
self.protocol = Some(State::Pipe(pipe));
} else {
error!("Missing the frontend or backend buffer queue, we can't switch to a pipe");
}
}
}
fn readiness(&mut self) -> &mut Readiness {
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.readiness(),
Some(State::ProxyProtocol(ref mut pp)) => pp.readiness(),
_ => unreachable!(),
}
}
fn front_token(&self) -> Option<Token> {
match self.protocol {
Some(State::Pipe(ref pipe)) => pipe.front_token(),
Some(State::ProxyProtocol(ref pp)) => pp.front_token(),
_ => unreachable!()
}
}
fn back_token(&self) -> Option<Token> {
match self.protocol {
Some(State::Pipe(ref pipe)) => pipe.back_token(),
Some(State::ProxyProtocol(ref pp)) => pp.back_token(),
_ => unreachable!()
}
}
fn set_back_socket(&mut self, socket: TcpStream) {
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.set_back_socket(socket),
Some(State::ProxyProtocol(ref mut pp)) => pp.set_back_socket(socket),
_ => unreachable!()
}
}
fn set_front_token(&mut self, token: Token) {
self.token = Some(token);
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.set_front_token(token),
Some(State::ProxyProtocol(ref mut pp)) => pp.set_front_token(token),
_ => unreachable!()
}
}
fn set_back_token(&mut self, token: Token) {
self.backend_token = Some(token);
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.set_back_token(token),
Some(State::ProxyProtocol(ref mut pp)) => pp.set_back_token(token),
_ => unreachable!()
}
}
fn back_connected(&self) -> BackendConnectionStatus {
//FIXME: handle backends correctly when refactoring the TCP proxy
BackendConnectionStatus::Connected
}
fn set_back_connected(&mut self, _: BackendConnectionStatus) {
}
fn metrics(&mut self) -> &mut SessionMetrics {
&mut self.metrics
}
fn remove_backend(&mut self) -> (Option<String>, Option<SocketAddr>) {
let addr = self.backend.as_ref().and_then(|sock| sock.peer_addr().ok());
self.backend = None;
self.backend_token = None;
(self.app_id.clone(), addr)
}
}
impl ProxyClient for Client {
fn close(&mut self, poll: &mut Poll) -> CloseResult {
self.metrics.service_stop();
self.front_socket().shutdown(Shutdown::Both);
poll.deregister(self.front_socket());
let mut result = CloseResult::default();
if let Some(tk) = self.backend_token {
result.tokens.push(tk)
}
if let (Some(app_id), Some(addr)) = self.remove_backend() {
result.backends.push((app_id, addr.clone()));
}
if let Some(sock) = self.back_socket() {
sock.shutdown(Shutdown::Both);
poll.deregister(sock);
decr!("backend.connections");
}
if let Some(tk) = self.token {
result.tokens.push(tk)
}
result
}
fn close_backend(&mut self, _: Token, poll: &mut Poll) -> Option<(String,SocketAddr)> {
let mut res = None;
if let (Some(app_id), Some(addr)) = self.remove_backend() {
res = Some((app_id, addr.clone()));
}
if let Some(sock) = self.back_socket() {
sock.shutdown(Shutdown::Both);
poll.deregister(sock);
decr!("backend.connections");
}
res
}
fn protocol(&self) -> Protocol {
Protocol::TCP
}
fn process_events(&mut self, token: Token, events: Ready) {
if self.token == Some(token) {
self.readiness().front_readiness = self.readiness().front_readiness | UnixReady::from(events);
} else if self.backend_token == Some(token) {
self.readiness().back_readiness = self.readiness().back_readiness | UnixReady::from(events);
}
}
fn ready(&mut self) -> ClientResult {
let mut counter = 0;
let max_loop_iterations = 100000;
self.metrics().service_start();
if self.back_connected() == BackendConnectionStatus::Connecting {
if self.readiness().back_readiness.is_hup() {
//retry connecting the backend
//FIXME: there should probably be a circuit breaker per client too
error!("error connecting to backend, trying again");
self.metrics().service_stop();
return ClientResult::ConnectBackend;
} else if self.readiness().back_readiness != UnixReady::from(Ready::empty()) {
self.set_back_connected(BackendConnectionStatus::Connected);
}
}
let token = self.token.clone();
while counter < max_loop_iterations {
let front_interest = self.readiness().front_interest & self.readiness().front_readiness;
let back_interest = self.readiness().back_interest & self.readiness().back_readiness;
trace!("PROXY\t{:?} {:?} | front: {:?} | back: {:?} ", token, self.readiness(), front_interest, back_interest);
if front_interest == UnixReady::from(Ready::empty()) && back_interest == UnixReady::from(Ready::empty()) {
break;
}
if front_interest.is_readable() {
let order = self.readable();
trace!("front readable\tinterpreting client order {:?}", order);
if order != ClientResult::Continue {
return order;
}
}
if back_interest.is_writable() {
let order = self.back_writable();
if order != ClientResult::Continue {
return order;
}
}
if back_interest.is_readable() {
let order = self.back_readable();
if order != ClientResult::Continue {
return order;
}
}
if front_interest.is_writable() {
let order = self.writable();
trace!("front writable\tinterpreting client order {:?}", order);
if order != ClientResult::Continue {
return order;
}
}
if front_interest.is_hup() {
let order = self.front_hup();
match order {
ClientResult::CloseClient => {
return order;
},
_ => {
self.readiness().front_readiness.remove(UnixReady::hup());
return order;
}
}
}
if back_interest.is_hup() {
let order = self.back_hup();
match order {
ClientResult::CloseClient => {
return order;
},
ClientResult::Continue => {
self.readiness().front_interest.insert(Ready::writable());
if ! self.readiness().front_readiness.is_writable() {
break;
}
},
_ => {
self.readiness().back_readiness.remove(UnixReady::hup());
return order;
}
};
}
if front_interest.is_error() || back_interest.is_error() {
if front_interest.is_error() {
error!("PROXY client {:?} front error, disconnecting", self.token);
} else {
error!("PROXY client {:?} back error, disconnecting", self.token);
}
self.readiness().front_interest = UnixReady::from(Ready::empty());
self.readiness().back_interest = UnixReady::from(Ready::empty());
return ClientResult::CloseClient;
}
counter += 1;
}
if counter == max_loop_iterations {
error!("PROXY\thandling client {:?} went through {} iterations, there's a probable infinite loop bug, closing the connection", self.token, max_loop_iterations);
let front_interest = self.readiness().front_interest & self.readiness().front_readiness;
let back_interest = self.readiness().back_interest & self.readiness().back_readiness;
let token = self.token.clone();
error!("PROXY\t{:?} readiness: {:?} | front: {:?} | back: {:?} ", token,
self.readiness(), front_interest, back_interest);
return ClientResult::CloseClient;
}
ClientResult::Continue
}
}
pub struct ApplicationListener {
app_id: String,
sock: Option<TcpListener>,
token: Option<Token>,
front_address: SocketAddr,
back_addresses: Vec<SocketAddr>,
}
#[derive(Debug)]
pub struct ApplicationConfiguration {
send_proxy: bool,
}
pub struct ServerConfiguration {
fronts: HashMap<String, Token>,
backends: HashMap<String, Vec<Backend>>,
listeners: HashMap<Token, ApplicationListener>,
configs: HashMap<AppId, ApplicationConfiguration>,
pool: Rc<RefCell<Pool<BufferQueue>>>,
}
impl ServerConfiguration {
pub fn new(event_loop: &mut Poll, pool: Rc<RefCell<Pool<BufferQueue>>>,
mut tcp_listener: Vec<(AppId, TcpListener)>, mut tokens: Vec<Token>) -> (ServerConfiguration, HashSet<Token>) {
let mut configuration = ServerConfiguration {
backends: HashMap::new(),
listeners: HashMap::new(),
configs: HashMap::new(),
fronts: HashMap::new(),
pool: pool,
};
let mut listener_tokens = HashSet::new();
for ((app_id, listener), token) in tcp_listener.drain(..).zip(tokens.drain(..)) {
if let Ok(front) = listener.local_addr() {
let al = ApplicationListener {
app_id: app_id.clone(),
sock: Some(listener),
token: None,
front_address: front,
back_addresses: Vec::new(),
};
if let Some(_) = configuration.add_application_listener(&app_id, al, event_loop, token) {
listener_tokens.insert(token);
}
}
}
(configuration, listener_tokens)
}
pub fn give_back_listeners(&mut self) -> Vec<(String, TcpListener)> {
let res = self.listeners.values_mut()
.filter(|app_listener| app_listener.sock.is_some())
.map(|app_listener| (app_listener.app_id.clone(), app_listener.sock.take().unwrap()))
.collect();
self.listeners.clear();
res
}
fn add_application_listener(&mut self, app_id: &str, al: ApplicationListener, event_loop: &mut Poll, token: Token) -> Option<ListenToken> {
let front = al.front_address.clone();
self.listeners.insert(token, al);
self.fronts.insert(String::from(app_id), token);
if let Some(ref sock) = self.listeners[&token].sock {
event_loop.register(sock, token, Ready::readable(), PollOpt::edge());
}
info!("started TCP listener for app {} on port {}", app_id, front.port());
Some(ListenToken(token.0))
}
pub fn add_tcp_front(&mut self, app_id: &str, front: &SocketAddr, event_loop: &mut Poll, token: Token) -> Option<ListenToken> {
if self.fronts.contains_key(app_id) {
error!("TCP front already exists for app_id {}", app_id);
return None;
}
if let Ok(listener) = server_bind(front) {
let addresses: Vec<SocketAddr> = if let Some(ads) = self.backends.get(app_id) {
let v: Vec<SocketAddr> = ads.iter().map(|backend| backend.address).collect();
v
} else {
Vec::new()
};
let al = ApplicationListener {
app_id: String::from(app_id),
sock: Some(listener),
token: Some(token),
front_address: *front,
back_addresses: addresses,
};
self.add_application_listener(app_id, al, event_loop, token)
} else {
error!("could not declare listener for app {} on port {}", app_id, front.port());
None
}
}
pub fn remove_tcp_front(&mut self, app_id: String, event_loop: &mut Poll) -> Option<ListenToken>{
info!("removing tcp_front {:?}", app_id);
// ToDo
// Removes all listeners for the given app_id
// an app can't have two listeners. Is this a problem?
if let Some(&tok) = self.fronts.get(&app_id) {
if self.listeners.contains_key(&tok) {
self.listeners[&tok].sock.as_ref().map(|sock| event_loop.deregister(sock));
self.listeners.remove(&tok);
warn!("removed server {:?}", tok);
//self.listeners[tok].sock.shutdown(Shutdown::Both);
Some(ListenToken(tok.0))
} else {
None
}
} else {
None
}
}
pub fn add_backend(&mut self, app_id: &str, backend_id: &str, backend_address: &SocketAddr, event_loop: &mut Poll) -> Option<ListenToken> {
use std::borrow::BorrowMut;
if let Some(addrs) = self.backends.get_mut(app_id) {
let id = addrs.last().map(|mut b| (*b.borrow_mut()).id ).unwrap_or(0) + 1;
let backend = Backend::new(backend_id, *backend_address, id);
if !addrs.contains(&backend) {
addrs.push(backend);
}
}
if self.backends.get(app_id).is_none() {
let backend = Backend::new(backend_id, *backend_address, 0);
self.backends.insert(String::from(app_id), vec![backend]);
}
let opt_tok = self.fronts.get(app_id).clone();
if let Some(tok) = opt_tok {
self.listeners.get_mut(&tok).map(|listener| {
listener.back_addresses.push(*backend_address);
});
//let application_listener = &mut self.listeners[&tok];
//application_listener.back_addresses.push(*backend_address);
Some(ListenToken(tok.0))
} else {
error!("No front for backend {} in app {}", backend_id, app_id);
None
}
}
pub fn remove_backend(&mut self, app_id: &str, backend_address: &SocketAddr, event_loop: &mut Poll) -> Option<ListenToken>{
// ToDo
None
}
}
impl ProxyConfiguration<Client> for ServerConfiguration {
fn connect_to_backend(&mut self, poll: &mut Poll, client: &mut Client, back_token: Token) ->Result<BackendConnectAction,ConnectionError> {
let len = self.listeners[&client.accept_token].back_addresses.len();
if len == 0 {
error!("no backend available");
return Err(ConnectionError::NoBackendAvailable);
}
let rnd = random::<usize>();
let idx = rnd % len;
client.app_id = Some(self.listeners[&client.accept_token].app_id.clone());
let backend_addr = try!(self.listeners[&client.accept_token].back_addresses.get(idx).ok_or(ConnectionError::ToBeDefined));
let stream = try!(TcpStream::connect(backend_addr).map_err(|_| ConnectionError::ToBeDefined));
stream.set_nodelay(true);
poll.register(
&stream,
back_token,
Ready::readable() | Ready::writable() | Ready::from(UnixReady::hup() | UnixReady::error()),
PollOpt::edge()
);
client.set_back_token(back_token);
client.set_back_socket(stream);
incr!("backend.connections");
Ok(BackendConnectAction::New)
}
fn notify(&mut self, event_loop: &mut Poll, message: OrderMessage) -> OrderMessageAnswer {
match message.order {
/*FIXME
Order::AddTcpFront(tcp_front) => {
let addr_string = tcp_front.ip_address + ":" + &tcp_front.port.to_string();
if let Ok(front) = addr_string.parse() {
if let Some(token) = self.add_tcp_front(&tcp_front.app_id, &front, event_loop) {
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
} else {
error!("Couldn't add tcp front");
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Error(String::from("cannot add tcp front")), data: None}
}
} else {
error!("Couldn't parse tcp front address");
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Error(String::from("cannot parse the address")), data: None}
}
},
*/
Order::RemoveTcpFront(front) => {
trace!("{:?}", front);
let _ = self.remove_tcp_front(front.app_id, event_loop);
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
},
Order::AddBackend(backend) => {
let addr_string = backend.ip_address + ":" + &backend.port.to_string();
let addr = &addr_string.parse().unwrap();
if let Some(token) = self.add_backend(&backend.app_id, &backend.backend_id, addr, event_loop) {
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
} else {
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Error(String::from("cannot add tcp backend")), data: None}
}
},
Order::RemoveBackend(backend) => {
trace!("{:?}", backend);
let addr_string = backend.ip_address + ":" + &backend.port.to_string();
let addr = &addr_string.parse().unwrap();
if let Some(token) = self.remove_backend(&backend.app_id, addr, event_loop) {
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
} else {
error!("Couldn't remove tcp backend");
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Error(String::from("cannot remove tcp backend")), data: None}
}
},
Order::SoftStop => {
info!("{} processing soft shutdown", message.id);
for listener in self.listeners.values() {
listener.sock.as_ref().map(|sock| event_loop.deregister(sock));
}
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Processing, data: None}
},
Order::HardStop => {
info!("{} hard shutdown", message.id);
for listener in self.listeners.values() {
listener.sock.as_ref().map(|sock| event_loop.deregister(sock));
}
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
},
Order::Status => {
info!("{} status", message.id);
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
},
Order::Logging(logging_filter) => {
info!("{} changing logging filter to {}", message.id, logging_filter);
::logging::LOGGER.with(|l| {
let directives = ::logging::parse_logging_spec(&logging_filter);
l.borrow_mut().set_directives(directives);
});
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None }
},
Order::AddApplication(application) => {
let config = ApplicationConfiguration {
send_proxy: application.send_proxy,
};
self.configs.insert(application.app_id, config);
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None }
},
Order::RemoveApplication(_) => {
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None }
},
command => {
error!("{} unsupported message, ignoring {:?}", message.id, command);
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Error(String::from("unsupported message")), data: None}
}
}
}
fn accept(&mut self, token: ListenToken, poll: &mut Poll, client_token: Token)
-> Result<(Rc<RefCell<Client>>, bool), AcceptError> {
let mut p = (*self.pool).borrow_mut();
if let (Some(front_buf), Some(back_buf)) = (p.checkout(), p.checkout()) {
let internal_token = Token(token.0);//FIXME: ListenToken(token.0 - 2 - self.base_token);
if self.listeners.contains_key(&internal_token) {
let mut send_proxy = false;
if let Some(config) = self.configs.get(&self.listeners[&internal_token].app_id) {
send_proxy = config.send_proxy;
}
if let Some(ref listener) = self.listeners[&internal_token].sock.as_ref() {
listener.accept().map(|(frontend_sock, _)| {
frontend_sock.set_nodelay(true);
let mut c = Client::new(frontend_sock, internal_token, front_buf, back_buf, send_proxy);
incr_req!();
c.readiness().front_interest = UnixReady::from(Ready::readable()) | UnixReady::hup() | UnixReady::error();
c.set_front_token(client_token);
poll.register(
c.front_socket(),
client_token,
Ready::readable() | Ready::writable() | Ready::from(UnixReady::hup() | UnixReady::error()),
PollOpt::edge()
);
(Rc::new(RefCell::new(c)), true)
}).map_err(|e| {
match e.kind() {
ErrorKind::WouldBlock => AcceptError::WouldBlock,
other => {
error!("accept() IO error: {:?}", e);
AcceptError::IoError
}
}
})
} else {
Err(AcceptError::IoError)
}
} else {
Err(AcceptError::IoError)
}
} else {
error!("could not get buffers from pool");
Err(AcceptError::TooManyClients)
}
}
fn accept_flush(&mut self) {
for ref listener in self.listeners.values() {
if let Some(ref sock) = listener.sock.as_ref() {
while sock.accept().is_ok() {
error!("accepting and closing connection");
}
}
}
}
fn close_backend(&mut self, app_id: String, addr: &SocketAddr) {
if let Some(app_backends) = self.backends.get_mut(&app_id) {
if let Some(ref mut backend) = app_backends.iter_mut().find(|backend| &backend.address == addr) {
backend.dec_connections();
}
}
}
}
pub fn start_example() -> Channel<OrderMessage,OrderMessageAnswer> {
use network::proxy::ProxyClientCast;
info!("listen for connections");
let (mut command, channel) = Channel::generate(1000, 10000).expect("should create a channel");
thread::spawn(move|| {
info!("starting event loop");
let mut poll = Poll::new().expect("could not create event loop");
let max_buffers = 10;
let buffer_size = 16384;
let pool = Rc::new(RefCell::new(
Pool::with_capacity(2*max_buffers, 0, || BufferQueue::with_capacity(buffer_size))
));
let mut clients: Slab<Rc<RefCell<ProxyClientCast>>,ClientToken> = Slab::with_capacity(max_buffers);
{
let entry = clients.vacant_entry().expect("client list should have enough room at startup");
info!("taking token {:?} for channel", entry.index());
entry.insert(Rc::new(RefCell::new(ListenClient { protocol: Protocol::HTTPListen })));
}
{
let entry = clients.vacant_entry().expect("client list should have enough room at startup");
info!("taking token {:?} for metrics", entry.index());
entry.insert(Rc::new(RefCell::new(ListenClient { protocol: Protocol::HTTPListen })));
}
let (configuration, tokens) = ServerConfiguration::new(&mut poll, pool, Vec::new(), vec!());
let (scm_server, scm_client) = UnixStream::pair().unwrap();
let mut s = Server::new(poll, channel, ScmSocket::new(scm_server.as_raw_fd()),
clients, None, None, Some(configuration), None, max_buffers);
info!("will run");
s.run();
info!("ending event loop");
});
{
let front = TcpFront {
app_id: String::from("yolo"),
ip_address: String::from("127.0.0.1"),
port: 1234,
};
let backend = messages::Backend {
app_id: String::from("yolo"),
backend_id: String::from("yolo-0"),
ip_address: String::from("127.0.0.1"),
port: 5678,
};
command.write_message(&OrderMessage { id: String::from("ID_YOLO1"), order: Order::AddTcpFront(front) });
command.write_message(&OrderMessage { id: String::from("ID_YOLO2"), order: Order::AddBackend(backend) });
}
{
let front = TcpFront {
app_id: String::from("yolo"),
ip_address: String::from("127.0.0.1"),
port: 1235,
};
let backend = messages::Backend {
app_id: String::from("yolo"),
backend_id: String::from("yolo-0"),
ip_address: String::from("127.0.0.1"),
port: 5678,
};
command.write_message(&OrderMessage { id: String::from("ID_YOLO3"), order: Order::AddTcpFront(front) });
command.write_message(&OrderMessage { id: String::from("ID_YOLO4"), order: Order::AddBackend(backend) });
}
command
}
pub fn start(max_buffers: usize, buffer_size:usize, channel: ProxyChannel) {
use network::proxy::ProxyClientCast;
let mut poll = Poll::new().expect("could not create event loop");
let pool = Rc::new(RefCell::new(
Pool::with_capacity(2*max_buffers, 0, || BufferQueue::with_capacity(buffer_size))
));
let mut clients: Slab<Rc<RefCell<ProxyClientCast>>,ClientToken> = Slab::with_capacity(max_buffers);
{
let entry = clients.vacant_entry().expect("client list should have enough room at startup");
info!("taking token {:?} for channel", entry.index());
entry.insert(Rc::new(RefCell::new(ListenClient { protocol: Protocol::HTTPListen })));
}
{
let entry = clients.vacant_entry().expect("client list should have enough room at startup");
info!("taking token {:?} for metrics", entry.index());
entry.insert(Rc::new(RefCell::new(ListenClient { protocol: Protocol::HTTPListen })));
}
let token = {
let entry = clients.vacant_entry().expect("client list should have enough room at startup");
let e = entry.insert(Rc::new(RefCell::new(ListenClient { protocol: Protocol::HTTPListen })));
Token(e.index().0)
};
let (configuration, tokens) = ServerConfiguration::new(&mut poll, pool, Vec::new(), vec!(token));
let (scm_server, scm_client) = UnixStream::pair().unwrap();
let mut server = Server::new(poll, channel, ScmSocket::new(scm_server.as_raw_fd()), clients, None, None, Some(configuration), None, max_buffers);
info!("starting event loop");
server.run();
info!("ending event loop");
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::{TcpListener, TcpStream, Shutdown};
use std::io::{Read,Write};
use std::time::Duration;
use std::{thread,str};
#[allow(unused_mut, unused_must_use, unused_variables)]
#[test]
fn mi() {
setup_test_logger!();
thread::spawn(|| { start_server(); });
let tx = start_example();
thread::sleep(Duration::from_millis(300));
let mut s1 = TcpStream::connect("127.0.0.1:1234").expect("could not parse address");
let mut s3 = TcpStream::connect("127.0.0.1:1234").expect("could not parse address");
thread::sleep(Duration::from_millis(300));
let mut s2 = TcpStream::connect("127.0.0.1:1234").expect("could not parse address");
s1.write(&b"hello"[..]);
println!("s1 sent");
s2.write(&b"pouet pouet"[..]);
println!("s2 sent");
thread::sleep(Duration::from_millis(500));
let mut res = [0; 128];
s1.write(&b"coucou"[..]);
let mut sz1 = s1.read(&mut res[..]).expect("could not read from socket");
println!("s1 received {:?}", str::from_utf8(&res[..sz1]));
assert_eq!(&res[..sz1], &b"hello END"[..]);
s3.shutdown(Shutdown::Both);
let sz2 = s2.read(&mut res[..]).expect("could not read from socket");
println!("s2 received {:?}", str::from_utf8(&res[..sz2]));
assert_eq!(&res[..sz2], &b"pouet pouet END"[..]);
thread::sleep(Duration::from_millis(400));
sz1 = s1.read(&mut res[..]).expect("could not read from socket");
println!("s1 received again({}): {:?}", sz1, str::from_utf8(&res[..sz1]));
assert_eq!(&res[..sz1], &b"coucou END"[..]);
//assert!(false);
}
/*
#[allow(unused_mut, unused_must_use, unused_variables)]
#[test]
fn concurrent() {
use std::sync::mpsc;
use time;
let thread_nb = 127;
thread::spawn(|| { start_server(); });
start();
thread::sleep_ms(300);
let (tx, rx) = mpsc::channel();
let begin = time::precise_time_s();
for i in 0..thread_nb {
let id = i;
let tx = tx.clone();
thread::Builder::new().name(id.to_string()).spawn(move || {
let s = format!("[{}] Hello world!\n", id);
let v: Vec<u8> = s.bytes().collect();
if let Ok(mut conn) = TcpStream::connect("127.0.0.1:1234") {
let mut res = [0; 128];
for j in 0..10000 {
conn.write(&v[..]);
if j % 5 == 0 {
if let Ok(sz) = conn.read(&mut res[..]) {
//println!("[{}] received({}): {:?}", id, sz, str::from_utf8(&res[..sz]));
} else {
println!("failed reading");
tx.send(());
return;
}
}
}
tx.send(());
return;
} else {
println!("failed connecting");
tx.send(());
return;
}
});
}
//thread::sleep_ms(5000);
for i in 0..thread_nb {
rx.recv();
}
let end = time::precise_time_s();
println!("executed in {} seconds", end - begin);
assert!(false);
}
*/
#[allow(unused_mut, unused_must_use, unused_variables)]
fn start_server() {
let listener = TcpListener::bind("127.0.0.1:5678").expect("could not parse address");
fn handle_client(stream: &mut TcpStream, id: u8) {
let mut buf = [0; 128];
let response = b" END";
while let Ok(sz) = stream.read(&mut buf[..]) {
if sz > 0 {
println!("ECHO[{}] got \"{:?}\"", id, str::from_utf8(&buf[..sz]));
stream.write(&buf[..sz]);
thread::sleep(Duration::from_millis(20));
stream.write(&response[..]);
}
}
}
let mut count = 0;
thread::spawn(move|| {
for conn in listener.incoming() {
match conn {
Ok(mut stream) => {
thread::spawn(move|| {
println!("got a new client: {}", count);
handle_client(&mut stream, count)
});
}
Err(e) => { println!("connection failed"); }
}
count += 1;
}
});
}
}
handle set_back_connected in the TCP proxy
use std::thread::{self,Thread,Builder};
use std::sync::mpsc::{self,channel,Receiver};
use mio::net::*;
use mio::*;
use mio_uds::UnixStream;
use mio::unix::UnixReady;
use std::collections::{HashMap,HashSet};
use std::os::unix::io::RawFd;
use std::os::unix::io::{FromRawFd,AsRawFd};
use std::io::{self,Read,ErrorKind};
use nom::HexDisplay;
use std::error::Error;
use slab::{Slab,Entry,VacantEntry};
use std::rc::Rc;
use std::cell::{RefCell,RefMut};
use std::net::{SocketAddr,Shutdown};
use std::str::FromStr;
use time::{Duration,precise_time_s};
use rand::random;
use uuid::Uuid;
use pool::{Pool,Checkout,Reset};
use sozu_command::buffer::Buffer;
use sozu_command::channel::Channel;
use sozu_command::scm_socket::ScmSocket;
use sozu_command::messages::{self,TcpFront,Order,OrderMessage,OrderMessageAnswer,OrderMessageStatus};
use network::{AppId,Backend,ClientResult,ConnectionError,RequiredEvents,Protocol,Readiness,SessionMetrics,
ProxyClient,ProxyConfiguration,AcceptError,BackendConnectAction,BackendConnectionStatus,
CloseResult};
use network::proxy::{Server,ProxyChannel,ListenToken,ClientToken,ListenClient};
use network::buffer_queue::BufferQueue;
use network::socket::{SocketHandler,SocketResult,server_bind};
use network::{http,https_rustls};
use network::protocol::{Pipe, ProtocolResult};
use network::protocol::proxy_protocol::ProxyProtocol;
use util::UnwrapLog;
const SERVER: Token = Token(0);
#[derive(Debug,Clone,PartialEq,Eq)]
pub enum ConnectionStatus {
Initial,
ClientConnected,
Connected,
ClientClosed,
ServerClosed,
Closed
}
pub enum State {
Pipe(Pipe<TcpStream>),
ProxyProtocol(ProxyProtocol<TcpStream>),
}
pub struct Client {
sock: TcpStream,
backend: Option<TcpStream>,
token: Option<Token>,
backend_token: Option<Token>,
back_connected: BackendConnectionStatus,
accept_token: Token,
status: ConnectionStatus,
rx_count: usize,
tx_count: usize,
app_id: Option<String>,
request_id: String,
metrics: SessionMetrics,
protocol: Option<State>,
front_buf: Option<Checkout<BufferQueue>>,
back_buf: Option<Checkout<BufferQueue>>,
}
impl Client {
fn new(sock: TcpStream, accept_token: Token, front_buf: Checkout<BufferQueue>,
back_buf: Checkout<BufferQueue>, send_proxy: bool) -> Client {
let s = sock.try_clone().expect("could not clone the socket");
let addr = sock.peer_addr().map(|s| s.ip()).ok();
let mut frontend_buffer = None;
let mut backend_buffer = None;
let protocol = if send_proxy {
frontend_buffer = Some(front_buf);
backend_buffer = Some(back_buf);
Some(State::ProxyProtocol(ProxyProtocol::new(s, None)))
} else {
Some(State::Pipe(Pipe::new(s, None, front_buf, back_buf, addr)))
};
Client {
sock: sock,
backend: None,
token: None,
backend_token: None,
back_connected: BackendConnectionStatus::NotConnected,
accept_token: accept_token,
status: ConnectionStatus::Connected,
rx_count: 0,
tx_count: 0,
app_id: None,
request_id: Uuid::new_v4().hyphenated().to_string(),
metrics: SessionMetrics::new(),
protocol,
front_buf: frontend_buffer,
back_buf: backend_buffer,
}
}
fn log_request(&self) {
let client = match self.sock.peer_addr().ok() {
None => String::from("-"),
Some(SocketAddr::V4(addr)) => format!("{}", addr),
Some(SocketAddr::V6(addr)) => format!("{}", addr),
};
let backend = match self.backend.as_ref().and_then(|backend| backend.peer_addr().ok()) {
None => String::from("-"),
Some(SocketAddr::V4(addr)) => format!("{}", addr),
Some(SocketAddr::V6(addr)) => format!("{}", addr),
};
let response_time = self.metrics.response_time().num_milliseconds();
let service_time = self.metrics.service_time().num_milliseconds();
let app_id = self.app_id.clone().unwrap_or(String::from("-"));
record_request_time!(&app_id, response_time);
if let Some(backend_id) = self.metrics.backend_id.as_ref() {
if let Some(backend_response_time) = self.metrics.backend_response_time() {
record_backend_metrics!(app_id, backend_id, backend_response_time.num_milliseconds(),
self.metrics.backend_bin, self.metrics.backend_bout);
}
}
info!("{}{} -> {}\t{} {} {} {}",
self.log_context(), client, backend,
response_time, service_time, self.metrics.bin, self.metrics.bout);
}
fn front_hup(&mut self) -> ClientResult {
self.log_request();
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.front_hup(),
Some(State::ProxyProtocol(_)) => {
ClientResult::CloseClient
},
_ => unreachable!(),
}
}
fn back_hup(&mut self) -> ClientResult {
self.log_request();
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.back_hup(),
_ => ClientResult::CloseClient,
}
}
fn log_context(&self) -> String {
if let Some(ref app_id) = self.app_id {
format!("{}\t{}\t", self.request_id, app_id)
} else {
format!("{}\tunknown\t", self.request_id)
}
}
fn readable(&mut self) -> ClientResult {
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.readable(&mut self.metrics),
_ => ClientResult::Continue,
}
}
fn writable(&mut self) -> ClientResult {
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.writable(&mut self.metrics),
_ => ClientResult::Continue,
}
}
fn back_readable(&mut self) -> ClientResult {
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.back_readable(&mut self.metrics),
_ => ClientResult::Continue,
}
}
fn back_writable(&mut self) -> ClientResult {
let mut res = (ProtocolResult::Continue, ClientResult::Continue);
match self.protocol {
Some(State::Pipe(ref mut pipe)) => res.1 = pipe.back_writable(&mut self.metrics),
Some(State::ProxyProtocol(ref mut pp)) => {
res.0 = pp.back_writable().0;
res.1 = pp.back_writable().1;
}
_ => unreachable!(),
};
if let ProtocolResult::Upgrade = res.0 {
self.upgrade();
}
res.1
}
fn front_socket(&self) -> &TcpStream {
match self.protocol {
Some(State::Pipe(ref pipe)) => pipe.front_socket(),
Some(State::ProxyProtocol(ref pp)) => pp.front_socket(),
_ => unreachable!(),
}
}
fn back_socket(&self) -> Option<&TcpStream> {
match self.protocol {
Some(State::Pipe(ref pipe)) => pipe.back_socket(),
Some(State::ProxyProtocol(ref pp)) => pp.back_socket(),
_ => unreachable!(),
}
}
pub fn upgrade(&mut self) {
let protocol = self.protocol.take();
if let Some(State::ProxyProtocol(mut pp)) = protocol {
if self.back_buf.is_some() && self.front_buf.is_some() {
let mut backend_socket = pp.backend.take().unwrap();
let addr = backend_socket.peer_addr().map(|s| s.ip()).ok();
let front_token = pp.front_token();
let back_token = pp.back_token();
let mut pipe = Pipe::new(
pp.frontend.take(0).into_inner(),
Some(backend_socket),
self.front_buf.take().unwrap(),
self.back_buf.take().unwrap(),
addr,
);
pipe.readiness.front_readiness = pp.readiness.front_readiness;
pipe.readiness.back_readiness = pp.readiness.back_readiness;
if let Some(front_token) = front_token {
pipe.set_front_token(front_token);
}
if let Some(back_token) = back_token {
pipe.set_back_token(back_token);
}
self.protocol = Some(State::Pipe(pipe));
} else {
error!("Missing the frontend or backend buffer queue, we can't switch to a pipe");
}
}
}
fn readiness(&mut self) -> &mut Readiness {
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.readiness(),
Some(State::ProxyProtocol(ref mut pp)) => pp.readiness(),
_ => unreachable!(),
}
}
fn front_token(&self) -> Option<Token> {
match self.protocol {
Some(State::Pipe(ref pipe)) => pipe.front_token(),
Some(State::ProxyProtocol(ref pp)) => pp.front_token(),
_ => unreachable!()
}
}
fn back_token(&self) -> Option<Token> {
match self.protocol {
Some(State::Pipe(ref pipe)) => pipe.back_token(),
Some(State::ProxyProtocol(ref pp)) => pp.back_token(),
_ => unreachable!()
}
}
fn set_back_socket(&mut self, socket: TcpStream) {
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.set_back_socket(socket),
Some(State::ProxyProtocol(ref mut pp)) => pp.set_back_socket(socket),
_ => unreachable!()
}
}
fn set_front_token(&mut self, token: Token) {
self.token = Some(token);
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.set_front_token(token),
Some(State::ProxyProtocol(ref mut pp)) => pp.set_front_token(token),
_ => unreachable!()
}
}
fn set_back_token(&mut self, token: Token) {
self.backend_token = Some(token);
match self.protocol {
Some(State::Pipe(ref mut pipe)) => pipe.set_back_token(token),
Some(State::ProxyProtocol(ref mut pp)) => pp.set_back_token(token),
_ => unreachable!()
}
}
fn back_connected(&self) -> BackendConnectionStatus {
self.back_connected
}
fn set_back_connected(&mut self, status: BackendConnectionStatus) {
self.back_connected = status;
if status == BackendConnectionStatus::Connected {
if let Some(State::ProxyProtocol(ref mut pp)) = self.protocol {
pp.set_back_connected(BackendConnectionStatus::Connected);
}
}
}
fn metrics(&mut self) -> &mut SessionMetrics {
&mut self.metrics
}
fn remove_backend(&mut self) -> (Option<String>, Option<SocketAddr>) {
let addr = self.backend.as_ref().and_then(|sock| sock.peer_addr().ok());
self.backend = None;
self.backend_token = None;
(self.app_id.clone(), addr)
}
}
impl ProxyClient for Client {
fn close(&mut self, poll: &mut Poll) -> CloseResult {
self.metrics.service_stop();
self.front_socket().shutdown(Shutdown::Both);
poll.deregister(self.front_socket());
let mut result = CloseResult::default();
if let Some(tk) = self.backend_token {
result.tokens.push(tk)
}
if let (Some(app_id), Some(addr)) = self.remove_backend() {
result.backends.push((app_id, addr.clone()));
}
if let Some(sock) = self.back_socket() {
sock.shutdown(Shutdown::Both);
poll.deregister(sock);
decr!("backend.connections");
}
if let Some(tk) = self.token {
result.tokens.push(tk)
}
result
}
fn close_backend(&mut self, _: Token, poll: &mut Poll) -> Option<(String,SocketAddr)> {
let mut res = None;
if let (Some(app_id), Some(addr)) = self.remove_backend() {
res = Some((app_id, addr.clone()));
}
if let Some(sock) = self.back_socket() {
sock.shutdown(Shutdown::Both);
poll.deregister(sock);
decr!("backend.connections");
}
res
}
fn protocol(&self) -> Protocol {
Protocol::TCP
}
fn process_events(&mut self, token: Token, events: Ready) {
if self.token == Some(token) {
self.readiness().front_readiness = self.readiness().front_readiness | UnixReady::from(events);
} else if self.backend_token == Some(token) {
self.readiness().back_readiness = self.readiness().back_readiness | UnixReady::from(events);
}
}
fn ready(&mut self) -> ClientResult {
let mut counter = 0;
let max_loop_iterations = 100000;
self.metrics().service_start();
if self.back_connected() == BackendConnectionStatus::Connecting {
if self.readiness().back_readiness.is_hup() {
//retry connecting the backend
//FIXME: there should probably be a circuit breaker per client too
error!("error connecting to backend, trying again");
self.metrics().service_stop();
return ClientResult::ConnectBackend;
} else if self.readiness().back_readiness != UnixReady::from(Ready::empty()) {
self.set_back_connected(BackendConnectionStatus::Connected);
}
}
let token = self.token.clone();
while counter < max_loop_iterations {
let front_interest = self.readiness().front_interest & self.readiness().front_readiness;
let back_interest = self.readiness().back_interest & self.readiness().back_readiness;
trace!("PROXY\t{:?} {:?} | front: {:?} | back: {:?} ", token, self.readiness(), front_interest, back_interest);
if front_interest == UnixReady::from(Ready::empty()) && back_interest == UnixReady::from(Ready::empty()) {
break;
}
if front_interest.is_readable() {
let order = self.readable();
trace!("front readable\tinterpreting client order {:?}", order);
if order != ClientResult::Continue {
return order;
}
}
if back_interest.is_writable() {
let order = self.back_writable();
if order != ClientResult::Continue {
return order;
}
}
if back_interest.is_readable() {
let order = self.back_readable();
if order != ClientResult::Continue {
return order;
}
}
if front_interest.is_writable() {
let order = self.writable();
trace!("front writable\tinterpreting client order {:?}", order);
if order != ClientResult::Continue {
return order;
}
}
if front_interest.is_hup() {
let order = self.front_hup();
match order {
ClientResult::CloseClient => {
return order;
},
_ => {
self.readiness().front_readiness.remove(UnixReady::hup());
return order;
}
}
}
if back_interest.is_hup() {
let order = self.back_hup();
match order {
ClientResult::CloseClient => {
return order;
},
ClientResult::Continue => {
self.readiness().front_interest.insert(Ready::writable());
if ! self.readiness().front_readiness.is_writable() {
break;
}
},
_ => {
self.readiness().back_readiness.remove(UnixReady::hup());
return order;
}
};
}
if front_interest.is_error() || back_interest.is_error() {
if front_interest.is_error() {
error!("PROXY client {:?} front error, disconnecting", self.token);
} else {
error!("PROXY client {:?} back error, disconnecting", self.token);
}
self.readiness().front_interest = UnixReady::from(Ready::empty());
self.readiness().back_interest = UnixReady::from(Ready::empty());
return ClientResult::CloseClient;
}
counter += 1;
}
if counter == max_loop_iterations {
error!("PROXY\thandling client {:?} went through {} iterations, there's a probable infinite loop bug, closing the connection", self.token, max_loop_iterations);
let front_interest = self.readiness().front_interest & self.readiness().front_readiness;
let back_interest = self.readiness().back_interest & self.readiness().back_readiness;
let token = self.token.clone();
error!("PROXY\t{:?} readiness: {:?} | front: {:?} | back: {:?} ", token,
self.readiness(), front_interest, back_interest);
return ClientResult::CloseClient;
}
ClientResult::Continue
}
}
pub struct ApplicationListener {
app_id: String,
sock: Option<TcpListener>,
token: Option<Token>,
front_address: SocketAddr,
back_addresses: Vec<SocketAddr>,
}
#[derive(Debug)]
pub struct ApplicationConfiguration {
send_proxy: bool,
}
pub struct ServerConfiguration {
fronts: HashMap<String, Token>,
backends: HashMap<String, Vec<Backend>>,
listeners: HashMap<Token, ApplicationListener>,
configs: HashMap<AppId, ApplicationConfiguration>,
pool: Rc<RefCell<Pool<BufferQueue>>>,
}
impl ServerConfiguration {
pub fn new(event_loop: &mut Poll, pool: Rc<RefCell<Pool<BufferQueue>>>,
mut tcp_listener: Vec<(AppId, TcpListener)>, mut tokens: Vec<Token>) -> (ServerConfiguration, HashSet<Token>) {
let mut configuration = ServerConfiguration {
backends: HashMap::new(),
listeners: HashMap::new(),
configs: HashMap::new(),
fronts: HashMap::new(),
pool: pool,
};
let mut listener_tokens = HashSet::new();
for ((app_id, listener), token) in tcp_listener.drain(..).zip(tokens.drain(..)) {
if let Ok(front) = listener.local_addr() {
let al = ApplicationListener {
app_id: app_id.clone(),
sock: Some(listener),
token: None,
front_address: front,
back_addresses: Vec::new(),
};
if let Some(_) = configuration.add_application_listener(&app_id, al, event_loop, token) {
listener_tokens.insert(token);
}
}
}
(configuration, listener_tokens)
}
pub fn give_back_listeners(&mut self) -> Vec<(String, TcpListener)> {
let res = self.listeners.values_mut()
.filter(|app_listener| app_listener.sock.is_some())
.map(|app_listener| (app_listener.app_id.clone(), app_listener.sock.take().unwrap()))
.collect();
self.listeners.clear();
res
}
fn add_application_listener(&mut self, app_id: &str, al: ApplicationListener, event_loop: &mut Poll, token: Token) -> Option<ListenToken> {
let front = al.front_address.clone();
self.listeners.insert(token, al);
self.fronts.insert(String::from(app_id), token);
if let Some(ref sock) = self.listeners[&token].sock {
event_loop.register(sock, token, Ready::readable(), PollOpt::edge());
}
info!("started TCP listener for app {} on port {}", app_id, front.port());
Some(ListenToken(token.0))
}
pub fn add_tcp_front(&mut self, app_id: &str, front: &SocketAddr, event_loop: &mut Poll, token: Token) -> Option<ListenToken> {
if self.fronts.contains_key(app_id) {
error!("TCP front already exists for app_id {}", app_id);
return None;
}
if let Ok(listener) = server_bind(front) {
let addresses: Vec<SocketAddr> = if let Some(ads) = self.backends.get(app_id) {
let v: Vec<SocketAddr> = ads.iter().map(|backend| backend.address).collect();
v
} else {
Vec::new()
};
let al = ApplicationListener {
app_id: String::from(app_id),
sock: Some(listener),
token: Some(token),
front_address: *front,
back_addresses: addresses,
};
self.add_application_listener(app_id, al, event_loop, token)
} else {
error!("could not declare listener for app {} on port {}", app_id, front.port());
None
}
}
pub fn remove_tcp_front(&mut self, app_id: String, event_loop: &mut Poll) -> Option<ListenToken>{
info!("removing tcp_front {:?}", app_id);
// ToDo
// Removes all listeners for the given app_id
// an app can't have two listeners. Is this a problem?
if let Some(&tok) = self.fronts.get(&app_id) {
if self.listeners.contains_key(&tok) {
self.listeners[&tok].sock.as_ref().map(|sock| event_loop.deregister(sock));
self.listeners.remove(&tok);
warn!("removed server {:?}", tok);
//self.listeners[tok].sock.shutdown(Shutdown::Both);
Some(ListenToken(tok.0))
} else {
None
}
} else {
None
}
}
pub fn add_backend(&mut self, app_id: &str, backend_id: &str, backend_address: &SocketAddr, event_loop: &mut Poll) -> Option<ListenToken> {
use std::borrow::BorrowMut;
if let Some(addrs) = self.backends.get_mut(app_id) {
let id = addrs.last().map(|mut b| (*b.borrow_mut()).id ).unwrap_or(0) + 1;
let backend = Backend::new(backend_id, *backend_address, id);
if !addrs.contains(&backend) {
addrs.push(backend);
}
}
if self.backends.get(app_id).is_none() {
let backend = Backend::new(backend_id, *backend_address, 0);
self.backends.insert(String::from(app_id), vec![backend]);
}
let opt_tok = self.fronts.get(app_id).clone();
if let Some(tok) = opt_tok {
self.listeners.get_mut(&tok).map(|listener| {
listener.back_addresses.push(*backend_address);
});
//let application_listener = &mut self.listeners[&tok];
//application_listener.back_addresses.push(*backend_address);
Some(ListenToken(tok.0))
} else {
error!("No front for backend {} in app {}", backend_id, app_id);
None
}
}
pub fn remove_backend(&mut self, app_id: &str, backend_address: &SocketAddr, event_loop: &mut Poll) -> Option<ListenToken>{
// ToDo
None
}
}
impl ProxyConfiguration<Client> for ServerConfiguration {
fn connect_to_backend(&mut self, poll: &mut Poll, client: &mut Client, back_token: Token) ->Result<BackendConnectAction,ConnectionError> {
let len = self.listeners[&client.accept_token].back_addresses.len();
if len == 0 {
error!("no backend available");
return Err(ConnectionError::NoBackendAvailable);
}
let rnd = random::<usize>();
let idx = rnd % len;
client.app_id = Some(self.listeners[&client.accept_token].app_id.clone());
let backend_addr = try!(self.listeners[&client.accept_token].back_addresses.get(idx).ok_or(ConnectionError::ToBeDefined));
let stream = try!(TcpStream::connect(backend_addr).map_err(|_| ConnectionError::ToBeDefined));
stream.set_nodelay(true);
poll.register(
&stream,
back_token,
Ready::readable() | Ready::writable() | Ready::from(UnixReady::hup() | UnixReady::error()),
PollOpt::edge()
);
client.set_back_token(back_token);
client.set_back_socket(stream);
client.set_back_connected(BackendConnectionStatus::Connecting);
incr!("backend.connections");
Ok(BackendConnectAction::New)
}
fn notify(&mut self, event_loop: &mut Poll, message: OrderMessage) -> OrderMessageAnswer {
match message.order {
/*FIXME
Order::AddTcpFront(tcp_front) => {
let addr_string = tcp_front.ip_address + ":" + &tcp_front.port.to_string();
if let Ok(front) = addr_string.parse() {
if let Some(token) = self.add_tcp_front(&tcp_front.app_id, &front, event_loop) {
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
} else {
error!("Couldn't add tcp front");
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Error(String::from("cannot add tcp front")), data: None}
}
} else {
error!("Couldn't parse tcp front address");
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Error(String::from("cannot parse the address")), data: None}
}
},
*/
Order::RemoveTcpFront(front) => {
trace!("{:?}", front);
let _ = self.remove_tcp_front(front.app_id, event_loop);
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
},
Order::AddBackend(backend) => {
let addr_string = backend.ip_address + ":" + &backend.port.to_string();
let addr = &addr_string.parse().unwrap();
if let Some(token) = self.add_backend(&backend.app_id, &backend.backend_id, addr, event_loop) {
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
} else {
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Error(String::from("cannot add tcp backend")), data: None}
}
},
Order::RemoveBackend(backend) => {
trace!("{:?}", backend);
let addr_string = backend.ip_address + ":" + &backend.port.to_string();
let addr = &addr_string.parse().unwrap();
if let Some(token) = self.remove_backend(&backend.app_id, addr, event_loop) {
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
} else {
error!("Couldn't remove tcp backend");
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Error(String::from("cannot remove tcp backend")), data: None}
}
},
Order::SoftStop => {
info!("{} processing soft shutdown", message.id);
for listener in self.listeners.values() {
listener.sock.as_ref().map(|sock| event_loop.deregister(sock));
}
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Processing, data: None}
},
Order::HardStop => {
info!("{} hard shutdown", message.id);
for listener in self.listeners.values() {
listener.sock.as_ref().map(|sock| event_loop.deregister(sock));
}
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
},
Order::Status => {
info!("{} status", message.id);
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None}
},
Order::Logging(logging_filter) => {
info!("{} changing logging filter to {}", message.id, logging_filter);
::logging::LOGGER.with(|l| {
let directives = ::logging::parse_logging_spec(&logging_filter);
l.borrow_mut().set_directives(directives);
});
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None }
},
Order::AddApplication(application) => {
let config = ApplicationConfiguration {
send_proxy: application.send_proxy,
};
self.configs.insert(application.app_id, config);
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None }
},
Order::RemoveApplication(_) => {
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Ok, data: None }
},
command => {
error!("{} unsupported message, ignoring {:?}", message.id, command);
OrderMessageAnswer{ id: message.id, status: OrderMessageStatus::Error(String::from("unsupported message")), data: None}
}
}
}
fn accept(&mut self, token: ListenToken, poll: &mut Poll, client_token: Token)
-> Result<(Rc<RefCell<Client>>, bool), AcceptError> {
let mut p = (*self.pool).borrow_mut();
if let (Some(front_buf), Some(back_buf)) = (p.checkout(), p.checkout()) {
let internal_token = Token(token.0);//FIXME: ListenToken(token.0 - 2 - self.base_token);
if self.listeners.contains_key(&internal_token) {
let mut send_proxy = false;
if let Some(config) = self.configs.get(&self.listeners[&internal_token].app_id) {
send_proxy = config.send_proxy;
}
if let Some(ref listener) = self.listeners[&internal_token].sock.as_ref() {
listener.accept().map(|(frontend_sock, _)| {
frontend_sock.set_nodelay(true);
let mut c = Client::new(frontend_sock, internal_token, front_buf, back_buf, send_proxy);
incr_req!();
c.readiness().front_interest = UnixReady::from(Ready::readable()) | UnixReady::hup() | UnixReady::error();
c.set_front_token(client_token);
poll.register(
c.front_socket(),
client_token,
Ready::readable() | Ready::writable() | Ready::from(UnixReady::hup() | UnixReady::error()),
PollOpt::edge()
);
(Rc::new(RefCell::new(c)), true)
}).map_err(|e| {
match e.kind() {
ErrorKind::WouldBlock => AcceptError::WouldBlock,
other => {
error!("accept() IO error: {:?}", e);
AcceptError::IoError
}
}
})
} else {
Err(AcceptError::IoError)
}
} else {
Err(AcceptError::IoError)
}
} else {
error!("could not get buffers from pool");
Err(AcceptError::TooManyClients)
}
}
fn accept_flush(&mut self) {
for ref listener in self.listeners.values() {
if let Some(ref sock) = listener.sock.as_ref() {
while sock.accept().is_ok() {
error!("accepting and closing connection");
}
}
}
}
fn close_backend(&mut self, app_id: String, addr: &SocketAddr) {
if let Some(app_backends) = self.backends.get_mut(&app_id) {
if let Some(ref mut backend) = app_backends.iter_mut().find(|backend| &backend.address == addr) {
backend.dec_connections();
}
}
}
}
pub fn start_example() -> Channel<OrderMessage,OrderMessageAnswer> {
use network::proxy::ProxyClientCast;
info!("listen for connections");
let (mut command, channel) = Channel::generate(1000, 10000).expect("should create a channel");
thread::spawn(move|| {
info!("starting event loop");
let mut poll = Poll::new().expect("could not create event loop");
let max_buffers = 10;
let buffer_size = 16384;
let pool = Rc::new(RefCell::new(
Pool::with_capacity(2*max_buffers, 0, || BufferQueue::with_capacity(buffer_size))
));
let mut clients: Slab<Rc<RefCell<ProxyClientCast>>,ClientToken> = Slab::with_capacity(max_buffers);
{
let entry = clients.vacant_entry().expect("client list should have enough room at startup");
info!("taking token {:?} for channel", entry.index());
entry.insert(Rc::new(RefCell::new(ListenClient { protocol: Protocol::HTTPListen })));
}
{
let entry = clients.vacant_entry().expect("client list should have enough room at startup");
info!("taking token {:?} for metrics", entry.index());
entry.insert(Rc::new(RefCell::new(ListenClient { protocol: Protocol::HTTPListen })));
}
let (configuration, tokens) = ServerConfiguration::new(&mut poll, pool, Vec::new(), vec!());
let (scm_server, scm_client) = UnixStream::pair().unwrap();
let mut s = Server::new(poll, channel, ScmSocket::new(scm_server.as_raw_fd()),
clients, None, None, Some(configuration), None, max_buffers);
info!("will run");
s.run();
info!("ending event loop");
});
{
let front = TcpFront {
app_id: String::from("yolo"),
ip_address: String::from("127.0.0.1"),
port: 1234,
};
let backend = messages::Backend {
app_id: String::from("yolo"),
backend_id: String::from("yolo-0"),
ip_address: String::from("127.0.0.1"),
port: 5678,
};
command.write_message(&OrderMessage { id: String::from("ID_YOLO1"), order: Order::AddTcpFront(front) });
command.write_message(&OrderMessage { id: String::from("ID_YOLO2"), order: Order::AddBackend(backend) });
}
{
let front = TcpFront {
app_id: String::from("yolo"),
ip_address: String::from("127.0.0.1"),
port: 1235,
};
let backend = messages::Backend {
app_id: String::from("yolo"),
backend_id: String::from("yolo-0"),
ip_address: String::from("127.0.0.1"),
port: 5678,
};
command.write_message(&OrderMessage { id: String::from("ID_YOLO3"), order: Order::AddTcpFront(front) });
command.write_message(&OrderMessage { id: String::from("ID_YOLO4"), order: Order::AddBackend(backend) });
}
command
}
pub fn start(max_buffers: usize, buffer_size:usize, channel: ProxyChannel) {
use network::proxy::ProxyClientCast;
let mut poll = Poll::new().expect("could not create event loop");
let pool = Rc::new(RefCell::new(
Pool::with_capacity(2*max_buffers, 0, || BufferQueue::with_capacity(buffer_size))
));
let mut clients: Slab<Rc<RefCell<ProxyClientCast>>,ClientToken> = Slab::with_capacity(max_buffers);
{
let entry = clients.vacant_entry().expect("client list should have enough room at startup");
info!("taking token {:?} for channel", entry.index());
entry.insert(Rc::new(RefCell::new(ListenClient { protocol: Protocol::HTTPListen })));
}
{
let entry = clients.vacant_entry().expect("client list should have enough room at startup");
info!("taking token {:?} for metrics", entry.index());
entry.insert(Rc::new(RefCell::new(ListenClient { protocol: Protocol::HTTPListen })));
}
let token = {
let entry = clients.vacant_entry().expect("client list should have enough room at startup");
let e = entry.insert(Rc::new(RefCell::new(ListenClient { protocol: Protocol::HTTPListen })));
Token(e.index().0)
};
let (configuration, tokens) = ServerConfiguration::new(&mut poll, pool, Vec::new(), vec!(token));
let (scm_server, scm_client) = UnixStream::pair().unwrap();
let mut server = Server::new(poll, channel, ScmSocket::new(scm_server.as_raw_fd()), clients, None, None, Some(configuration), None, max_buffers);
info!("starting event loop");
server.run();
info!("ending event loop");
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::{TcpListener, TcpStream, Shutdown};
use std::io::{Read,Write};
use std::time::Duration;
use std::{thread,str};
#[allow(unused_mut, unused_must_use, unused_variables)]
#[test]
fn mi() {
setup_test_logger!();
thread::spawn(|| { start_server(); });
let tx = start_example();
thread::sleep(Duration::from_millis(300));
let mut s1 = TcpStream::connect("127.0.0.1:1234").expect("could not parse address");
let mut s3 = TcpStream::connect("127.0.0.1:1234").expect("could not parse address");
thread::sleep(Duration::from_millis(300));
let mut s2 = TcpStream::connect("127.0.0.1:1234").expect("could not parse address");
s1.write(&b"hello"[..]);
println!("s1 sent");
s2.write(&b"pouet pouet"[..]);
println!("s2 sent");
thread::sleep(Duration::from_millis(500));
let mut res = [0; 128];
s1.write(&b"coucou"[..]);
let mut sz1 = s1.read(&mut res[..]).expect("could not read from socket");
println!("s1 received {:?}", str::from_utf8(&res[..sz1]));
assert_eq!(&res[..sz1], &b"hello END"[..]);
s3.shutdown(Shutdown::Both);
let sz2 = s2.read(&mut res[..]).expect("could not read from socket");
println!("s2 received {:?}", str::from_utf8(&res[..sz2]));
assert_eq!(&res[..sz2], &b"pouet pouet END"[..]);
thread::sleep(Duration::from_millis(400));
sz1 = s1.read(&mut res[..]).expect("could not read from socket");
println!("s1 received again({}): {:?}", sz1, str::from_utf8(&res[..sz1]));
assert_eq!(&res[..sz1], &b"coucou END"[..]);
//assert!(false);
}
/*
#[allow(unused_mut, unused_must_use, unused_variables)]
#[test]
fn concurrent() {
use std::sync::mpsc;
use time;
let thread_nb = 127;
thread::spawn(|| { start_server(); });
start();
thread::sleep_ms(300);
let (tx, rx) = mpsc::channel();
let begin = time::precise_time_s();
for i in 0..thread_nb {
let id = i;
let tx = tx.clone();
thread::Builder::new().name(id.to_string()).spawn(move || {
let s = format!("[{}] Hello world!\n", id);
let v: Vec<u8> = s.bytes().collect();
if let Ok(mut conn) = TcpStream::connect("127.0.0.1:1234") {
let mut res = [0; 128];
for j in 0..10000 {
conn.write(&v[..]);
if j % 5 == 0 {
if let Ok(sz) = conn.read(&mut res[..]) {
//println!("[{}] received({}): {:?}", id, sz, str::from_utf8(&res[..sz]));
} else {
println!("failed reading");
tx.send(());
return;
}
}
}
tx.send(());
return;
} else {
println!("failed connecting");
tx.send(());
return;
}
});
}
//thread::sleep_ms(5000);
for i in 0..thread_nb {
rx.recv();
}
let end = time::precise_time_s();
println!("executed in {} seconds", end - begin);
assert!(false);
}
*/
#[allow(unused_mut, unused_must_use, unused_variables)]
fn start_server() {
let listener = TcpListener::bind("127.0.0.1:5678").expect("could not parse address");
fn handle_client(stream: &mut TcpStream, id: u8) {
let mut buf = [0; 128];
let response = b" END";
while let Ok(sz) = stream.read(&mut buf[..]) {
if sz > 0 {
println!("ECHO[{}] got \"{:?}\"", id, str::from_utf8(&buf[..sz]));
stream.write(&buf[..sz]);
thread::sleep(Duration::from_millis(20));
stream.write(&response[..]);
}
}
}
let mut count = 0;
thread::spawn(move|| {
for conn in listener.incoming() {
match conn {
Ok(mut stream) => {
thread::spawn(move|| {
println!("got a new client: {}", count);
handle_client(&mut stream, count)
});
}
Err(e) => { println!("connection failed"); }
}
count += 1;
}
});
}
}
|
use std::thread::{self,Thread,Builder};
use std::sync::mpsc::{self,channel,Receiver};
use std::sync::{Arc,Mutex};
use std::rc::{Rc,Weak};
use std::cell::RefCell;
use std::mem;
use mio::*;
use mio::tcp::*;
use mio::timer::Timeout;
use mio_uds::UnixStream;
use std::io::{self,Read,Write,ErrorKind,BufReader};
use std::collections::HashMap;
use std::error::Error;
use slab::Slab;
use pool::{Pool,Checkout};
use std::net::{IpAddr,SocketAddr};
use std::str::{FromStr, from_utf8, from_utf8_unchecked};
use time::{precise_time_s, precise_time_ns};
use rand::random;
use openssl::ssl::{self, SslContext, SslContextBuilder, SslMethod,
Ssl, SslRef, SslStream, SniError};
use openssl::x509::X509;
use openssl::dh::Dh;
use openssl::pkey::PKey;
use openssl::hash::MessageDigest;
use openssl::nid;
use nom::IResult;
use parser::http11::{HttpState,RequestState,ResponseState,RRequestLine,parse_request_until_stop,hostname_and_port};
use network::buffer::Buffer;
use network::buffer_queue::BufferQueue;
use network::{Backend,ClientResult,ServerMessage,ServerMessageStatus,ConnectionError,ProxyOrder,Protocol};
use network::proxy::{BackendConnectAction,Server,ProxyConfiguration,ProxyClient,
Readiness,ListenToken,FrontToken,BackToken,Channel};
use messages::{self,Command,TlsFront,TlsProxyConfiguration};
use network::http::{self,DefaultAnswers};
use network::socket::{SocketHandler,SocketResult,server_bind};
use network::trie::*;
use network::protocol::{ProtocolResult,TlsHandshake,Http,Pipe};
use command::CommandChannel;
type BackendToken = Token;
type ClientToken = Token;
#[derive(Debug,Clone,PartialEq,Eq)]
pub struct TlsApp {
pub app_id: String,
pub hostname: String,
pub path_begin: String,
pub cert_fingerprint: CertFingerprint,
}
pub enum State {
Handshake(TlsHandshake),
Http(Http<SslStream<TcpStream>>),
WebSocket(Pipe<SslStream<TcpStream>>)
}
pub struct TlsClient {
front: Option<TcpStream>,
front_token: Option<Token>,
front_timeout: Option<Timeout>,
back_timeout: Option<Timeout>,
protocol: Option<State>,
public_address: Option<IpAddr>,
ssl: Option<Ssl>,
pool: Weak<RefCell<Pool<BufferQueue>>>,
}
impl TlsClient {
pub fn new(server_context: &str, ssl:Ssl, sock: TcpStream, pool: Weak<RefCell<Pool<BufferQueue>>>, public_address: Option<IpAddr>) -> Option<TlsClient> {
//FIXME: we should not need to clone the socket. Maybe do the accept here instead of
// in TlsHandshake?
let s = sock.try_clone().unwrap();
let handshake = TlsHandshake::new(server_context, ssl, s);
Some(TlsClient {
front: Some(sock),
front_token: None,
front_timeout: None,
back_timeout: None,
protocol: Some(State::Handshake(handshake)),
public_address: public_address,
ssl: None,
pool: pool,
})
}
pub fn http(&mut self) -> Option<&mut Http<SslStream<TcpStream>>> {
if let State::Http(ref mut http) = *self.protocol.as_mut().unwrap() {
Some(http)
} else {
None
}
}
pub fn upgrade(&mut self) -> bool {
let protocol = self.protocol.take().unwrap();
if let State::Handshake(handshake) = protocol {
if let Some(pool) = self.pool.upgrade() {
let mut p = pool.borrow_mut();
if let (Some(front_buf), Some(back_buf)) = (p.checkout(), p.checkout()) {
let mut http = Http::new(&handshake.server_context, handshake.stream.unwrap(), front_buf,
back_buf, self.public_address.clone()).unwrap();
http.readiness = handshake.readiness;
http.readiness.front_interest = Ready::readable() | Ready::hup() | Ready::error();
http.set_front_token(self.front_token.as_ref().unwrap().clone());
self.ssl = handshake.ssl;
self.protocol = Some(State::Http(http));
return true;
} else {
error!("could not get buffers");
//FIXME: must return an error and stop the connection here
}
}
false
} else if let State::Http(http) = protocol {
info!("https switching to wss");
let front_token = http.front_token().unwrap();
let back_token = http.back_token().unwrap();
let mut pipe = Pipe::new(&http.server_context, http.frontend, http.backend.unwrap(),
http.front_buf, http.back_buf, http.public_address).unwrap();
pipe.readiness.front_readiness = http.readiness.front_readiness;
pipe.readiness.back_readiness = http.readiness.back_readiness;
pipe.set_front_token(front_token);
pipe.set_back_token(back_token);
self.protocol = Some(State::WebSocket(pipe));
true
} else {
self.protocol = Some(protocol);
true
}
}
}
impl ProxyClient for TlsClient {
fn front_socket(&self) -> &TcpStream {
self.front.as_ref().unwrap()
}
fn back_socket(&self) -> Option<&TcpStream> {
match *self.protocol.as_ref().unwrap() {
State::Handshake(ref handshake) => None,
State::Http(ref http) => http.back_socket(),
State::WebSocket(ref pipe) => pipe.back_socket(),
}
}
fn front_token(&self) -> Option<Token> {
self.front_token
}
fn back_token(&self) -> Option<Token> {
if let State::Http(ref http) = *self.protocol.as_ref().unwrap() {
http.back_token()
} else {
None
}
}
fn close(&mut self) {
//println!("TLS closing[{:?}] temp->front: {:?}, temp->back: {:?}", self.token, *self.temp.front_buf, *self.temp.back_buf);
self.http().map(|http| http.close());
}
fn log_context(&self) -> String {
if let State::Http(ref http) = *self.protocol.as_ref().unwrap() {
http.log_context()
} else {
"".to_string()
}
}
fn set_back_socket(&mut self, sock:TcpStream) {
self.http().unwrap().set_back_socket(sock)
}
fn set_front_token(&mut self, token: Token) {
self.front_token = Some(token);
self.protocol.as_mut().map(|p| match *p {
State::Http(ref mut http) => http.set_front_token(token),
_ => {}
});
}
fn set_back_token(&mut self, token: Token) {
self.http().unwrap().set_back_token(token)
}
fn front_timeout(&mut self) -> Option<Timeout> {
self.front_timeout.clone()
}
fn back_timeout(&mut self) -> Option<Timeout> {
self.back_timeout.clone()
}
fn set_front_timeout(&mut self, timeout: Timeout) {
self.front_timeout = Some(timeout)
}
fn set_back_timeout(&mut self, timeout: Timeout) {
self.back_timeout = Some(timeout);
}
fn front_hup(&mut self) -> ClientResult {
self.http().unwrap().front_hup()
}
fn back_hup(&mut self) -> ClientResult {
self.http().unwrap().back_hup()
}
fn readable(&mut self) -> ClientResult {
let (upgrade, result) = match *self.protocol.as_mut().unwrap() {
State::Handshake(ref mut handshake) => handshake.readable(),
State::Http(ref mut http) => (ProtocolResult::Continue, http.readable()),
State::WebSocket(ref mut pipe) => (ProtocolResult::Continue, pipe.readable()),
};
if upgrade == ProtocolResult::Continue {
result
} else {
if self.upgrade() {
match *self.protocol.as_mut().unwrap() {
State::Http(ref mut http) => http.readable(),
_ => result
}
} else {
ClientResult::CloseClient
}
}
}
fn writable(&mut self) -> ClientResult {
match *self.protocol.as_mut().unwrap() {
State::Handshake(ref mut handshake) => ClientResult::CloseClient,
State::Http(ref mut http) => http.writable(),
State::WebSocket(ref mut pipe) => pipe.writable(),
}
}
fn back_readable(&mut self) -> ClientResult {
let (upgrade, result) = match *self.protocol.as_mut().unwrap() {
State::Http(ref mut http) => http.back_readable(),
State::Handshake(ref mut handshake) => (ProtocolResult::Continue, ClientResult::CloseClient),
State::WebSocket(ref mut pipe) => (ProtocolResult::Continue, pipe.back_readable()),
};
if upgrade == ProtocolResult::Continue {
result
} else {
if self.upgrade() {
match *self.protocol.as_mut().unwrap() {
State::WebSocket(ref mut pipe) => pipe.back_readable(),
_ => result
}
} else {
ClientResult::CloseBothFailure
}
}
}
fn back_writable(&mut self) -> ClientResult {
//self.http().unwrap().back_writable()
match *self.protocol.as_mut().unwrap() {
State::Handshake(ref mut handshake) => ClientResult::CloseClient,
State::Http(ref mut http) => http.back_writable(),
State::WebSocket(ref mut pipe) => pipe.back_writable(),
}
}
fn remove_backend(&mut self) -> (Option<String>, Option<SocketAddr>) {
self.http().unwrap().remove_backend()
}
fn readiness(&mut self) -> &mut Readiness {
let r = match *self.protocol.as_mut().unwrap() {
State::Handshake(ref mut handshake) => &mut handshake.readiness,
State::Http(ref mut http) => http.readiness(),
State::WebSocket(ref mut pipe) => &mut pipe.readiness,
};
//info!("current readiness: {:?}", r);
r
}
fn protocol(&self) -> Protocol {
Protocol::TLS
}
}
fn get_cert_common_name(cert: &X509) -> Option<String> {
cert.subject_name().entries_by_nid(nid::COMMONNAME).next().and_then(|name| name.data().as_utf8().ok().map(|name| String::from(&*name)))
}
pub type AppId = String;
pub type HostName = String;
pub type PathBegin = String;
//maybe not the most efficient type for that
pub type CertFingerprint = Vec<u8>;
pub struct TlsData {
context: SslContext,
certificate: Vec<u8>,
refcount: usize,
}
pub struct ServerConfiguration {
listener: TcpListener,
address: SocketAddr,
instances: HashMap<AppId, Vec<Backend>>,
fronts: HashMap<HostName, Vec<TlsApp>>,
domains: Arc<Mutex<TrieNode<CertFingerprint>>>,
default_context: TlsData,
contexts: Arc<Mutex<HashMap<CertFingerprint,TlsData>>>,
channel: Channel,
pool: Rc<RefCell<Pool<BufferQueue>>>,
answers: DefaultAnswers,
front_timeout: u64,
back_timeout: u64,
config: TlsProxyConfiguration,
tag: String,
}
impl ServerConfiguration {
pub fn new(tag: String, config: TlsProxyConfiguration, channel: Channel, event_loop: &mut Poll, start_at: usize) -> io::Result<ServerConfiguration> {
let contexts:HashMap<CertFingerprint,TlsData> = HashMap::new();
let mut domains = TrieNode::root();
let mut fronts = HashMap::new();
let rc_ctx = Arc::new(Mutex::new(contexts));
let ref_ctx = rc_ctx.clone();
let rc_domains = Arc::new(Mutex::new(domains));
let ref_domains = rc_domains.clone();
let cl_tag = tag.clone();
let default_name = config.default_name.as_ref().map(|name| name.clone()).unwrap_or(String::new());
let (fingerprint, mut tls_data, names):(Vec<u8>,TlsData, Vec<String>) = Self::create_default_context(&config, ref_ctx, ref_domains, cl_tag, default_name).unwrap();
let cert = X509::from_pem(&tls_data.certificate).unwrap();
let common_name: Option<String> = get_cert_common_name(&cert);
info!("{}\tgot common name: {:?}", &tag, common_name);
let app = TlsApp {
app_id: config.default_app_id.clone().unwrap_or(String::new()),
hostname: config.default_name.clone().unwrap_or(String::new()),
path_begin: String::new(),
cert_fingerprint: fingerprint.clone(),
};
fronts.insert(config.default_name.clone().unwrap_or(String::from("")), vec![app]);
match server_bind(&config.front) {
Ok(listener) => {
event_loop.register(&listener, Token(start_at), Ready::readable(), PollOpt::level());
let default = DefaultAnswers {
NotFound: Vec::from(if config.answer_404.len() > 0 {
config.answer_404.as_bytes()
} else {
&b"HTTP/1.1 404 Not Found\r\nCache-Control: no-cache\r\nConnection: close\r\n\r\n"[..]
}),
ServiceUnavailable: Vec::from(if config.answer_503.len() > 0 {
config.answer_503.as_bytes()
} else {
&b"HTTP/1.1 503 your application is in deployment\r\nCache-Control: no-cache\r\nConnection: close\r\n\r\n"[..]
}),
};
Ok(ServerConfiguration {
listener: listener,
address: config.front.clone(),
instances: HashMap::new(),
fronts: fronts,
domains: rc_domains,
default_context: tls_data,
contexts: rc_ctx,
channel: channel,
pool: Rc::new(RefCell::new(
Pool::with_capacity(2*config.max_connections, 0, || BufferQueue::with_capacity(config.buffer_size))
)),
front_timeout: 50000,
back_timeout: 50000,
answers: default,
config: config,
tag: tag,
})
},
Err(e) => {
error!("{}\tcould not create listener {:?}: {:?}", tag, config.front, e);
Err(e)
}
}
}
pub fn create_default_context(config: &TlsProxyConfiguration, ref_ctx: Arc<Mutex<HashMap<CertFingerprint,TlsData>>>, ref_domains: Arc<Mutex<TrieNode<CertFingerprint>>>, tag: String, default_name: String) -> Option<(CertFingerprint,TlsData,Vec<String>)> {
let ctx = SslContext::builder(SslMethod::tls());
if let Err(e) = ctx {
//return Err(io::Error::new(io::ErrorKind::Other, e.description()));
return None
}
let mut context = ctx.unwrap();
let mut options = context.options();
options.insert(ssl::SSL_OP_NO_SSLV2);
options.insert(ssl::SSL_OP_NO_SSLV3);
options.insert(ssl::SSL_OP_NO_TLSV1);
options.insert(ssl::SSL_OP_NO_COMPRESSION);
options.insert(ssl::SSL_OP_NO_TICKET);
options.insert(ssl::SSL_OP_CIPHER_SERVER_PREFERENCE);
let opt = context.set_options(options);
context.set_cipher_list(&config.cipher_list);
match Dh::get_2048_256() {
Ok(dh) => context.set_tmp_dh(&dh),
Err(e) => {
//return Err(io::Error::new(io::ErrorKind::Other, e.description()))
return None
}
};
context.set_ecdh_auto(true);
//FIXME: get the default cert and key from the configuration
let cert_read = config.default_certificate.as_ref().map(|vec| &vec[..]).unwrap_or(&include_bytes!("../../../assets/certificate.pem")[..]);
let key_read = config.default_key.as_ref().map(|vec| &vec[..]).unwrap_or(&include_bytes!("../../../assets/key.pem")[..]);
if let Some(path) = config.default_certificate_chain.as_ref() {
context.set_certificate_chain_file(path);
}
if let (Ok(cert), Ok(key)) = (X509::from_pem(&cert_read[..]), PKey::private_key_from_pem(&key_read[..])) {
if let Ok(fingerprint) = cert.fingerprint(MessageDigest::sha256()) {
context.set_certificate(&cert);
context.set_private_key(&key);
let mut names: Vec<String> = cert.subject_alt_names().map(|names| {
names.iter().filter_map(|general_name|
general_name.dnsname().map(|name| String::from(name))
).collect()
}).unwrap_or(vec!());
info!("{}\tgot subject alt names: {:?}", &tag, names);
{
let mut domains = ref_domains.lock().unwrap();
for name in &names {
domains.domain_insert(name.clone().into_bytes(), fingerprint.clone());
}
}
if let Some(common_name) = get_cert_common_name(&cert) {
info!("got common name: {:?}", common_name);
names.push(common_name);
}
context.set_servername_callback(move |ssl: &mut SslRef| {
let contexts = ref_ctx.lock().unwrap();
let domains = ref_domains.lock().unwrap();
info!("{}\tref: {:?}", tag, ssl);
if let Some(servername) = ssl.servername().map(|s| s.to_string()) {
info!("checking servername: {}", servername);
if &servername == &default_name {
return Ok(());
}
info!("{}\tlooking for fingerprint for {:?}", tag, servername);
if let Some(kv) = domains.domain_lookup(servername.as_bytes()) {
info!("{}\tlooking for context for {:?} with fingerprint {:?}", tag, servername, kv.1);
if let Some(ref tls_data) = contexts.get(&kv.1) {
info!("{}\tfound context for {:?}", tag, servername);
let context: &SslContext = &tls_data.context;
if let Ok(()) = ssl.set_ssl_context(context) {
info!("{}\tservername is now {:?}", tag, ssl.servername());
return Ok(());
} else {
error!("{}\tno context found for {:?}", tag, servername);
}
}
}
} else {
error!("{}\tgot no server name from ssl, answering with default one", tag);
}
//answer ok because we use the default certificate
Ok(())
});
let tls_data = TlsData {
context: context.build(),
certificate: cert_read.to_vec(),
refcount: 1,
};
Some((fingerprint, tls_data, names))
} else {
None
}
} else {
None
}
}
pub fn add_http_front(&mut self, http_front: TlsFront, event_loop: &mut Poll) -> bool {
//FIXME: insert some error management with a Result here
let c = SslContext::builder(SslMethod::tls());
if c.is_err() { return false; }
let mut ctx = c.unwrap();
let mut options = ctx.options();
options.insert(ssl::SSL_OP_NO_SSLV2);
options.insert(ssl::SSL_OP_NO_SSLV3);
options.insert(ssl::SSL_OP_NO_TLSV1);
options.insert(ssl::SSL_OP_NO_COMPRESSION);
options.insert(ssl::SSL_OP_NO_TICKET);
options.insert(ssl::SSL_OP_CIPHER_SERVER_PREFERENCE);
let opt = ctx.set_options(options);
match Dh::get_2048_256() {
Ok(dh) => ctx.set_tmp_dh(&dh),
Err(e) => {
return false;
}
};
ctx.set_ecdh_auto(true);
let mut cert_read = &http_front.certificate.as_bytes()[..];
let mut key_read = &http_front.key.as_bytes()[..];
let cert_chain: Vec<X509> = http_front.certificate_chain.iter().filter_map(|c| {
X509::from_pem(c.as_bytes()).ok()
}).collect();
if let (Ok(cert), Ok(key)) = (X509::from_pem(&mut cert_read), PKey::private_key_from_pem(&mut key_read)) {
//FIXME: would need more logs here
//FIXME
let fingerprint = cert.fingerprint(MessageDigest::sha256()).unwrap();
let common_name: Option<String> = get_cert_common_name(&cert);
info!("{}\tgot common name: {:?}", self.tag, common_name);
let names: Vec<String> = cert.subject_alt_names().map(|names| {
names.iter().filter_map(|general_name|
general_name.dnsname().map(|name| String::from(name))
).collect()
}).unwrap_or(vec!());
info!("{}\tgot subject alt names: {:?}", self.tag, names);
ctx.set_certificate(&cert);
ctx.set_private_key(&key);
cert_chain.iter().map(|cert| ctx.add_extra_chain_cert(cert.clone()));
let tls_data = TlsData {
context: ctx.build(),
certificate: cert_read.to_vec(),
refcount: 1,
};
// if the name or the fingerprint are already used,
// those insertions should fail, because it would be
// from the same certificate
// Add a refcount?
//FIXME: this is blocking
//this lock is only obtained from this thread, so is it alright?
{
let mut contexts = self.contexts.lock().unwrap();
if contexts.contains_key(&fingerprint) {
contexts.get_mut(&fingerprint).map(|data| {
data.refcount += 1;
});
} else {
contexts.insert(fingerprint.clone(), tls_data);
}
}
{
let mut domains = self.domains.lock().unwrap();
if let Some(name) = common_name {
domains.domain_insert(name.into_bytes(), fingerprint.clone());
}
for name in names {
domains.domain_insert(name.into_bytes(), fingerprint.clone());
}
}
let app = TlsApp {
app_id: http_front.app_id.clone(),
hostname: http_front.hostname.clone(),
path_begin: http_front.path_begin.clone(),
cert_fingerprint: fingerprint.clone(),
};
if let Some(fronts) = self.fronts.get_mut(&http_front.hostname) {
if ! fronts.contains(&app) {
fronts.push(app.clone());
}
}
if self.fronts.get(&http_front.hostname).is_none() {
self.fronts.insert(http_front.hostname, vec![app]);
}
true
} else {
false
}
}
pub fn remove_http_front(&mut self, front: TlsFront, event_loop: &mut Poll) {
info!("{}\tremoving http_front {:?}", self.tag, front);
if let Some(fronts) = self.fronts.get_mut(&front.hostname) {
if let Some(pos) = fronts.iter().position(|f| &f.app_id == &front.app_id) {
let front = fronts.remove(pos);
{
let mut contexts = self.contexts.lock().unwrap();
let mut domains = self.domains.lock().unwrap();
let must_delete = contexts.get_mut(&front.cert_fingerprint).map(|tls_data| {
tls_data.refcount -= 1;
tls_data.refcount == 0
});
if must_delete == Some(true) {
if let Some(data) = contexts.remove(&front.cert_fingerprint) {
if let Ok(cert) = X509::from_pem(&data.certificate) {
let common_name: Option<String> = get_cert_common_name(&cert);
//info!("got common name: {:?}", common_name);
if let Some(name) = common_name {
domains.domain_remove(&name.into_bytes());
}
let names: Vec<String> = cert.subject_alt_names().map(|names| {
names.iter().filter_map(|general_name|
general_name.dnsname().map(|name| String::from(name))
).collect()
}).unwrap_or(vec!());
for name in names {
domains.domain_remove(&name.into_bytes());
}
}
}
}
}
}
}
}
pub fn add_instance(&mut self, app_id: &str, instance_address: &SocketAddr, event_loop: &mut Poll) {
if let Some(addrs) = self.instances.get_mut(app_id) {
let backend = Backend::new(*instance_address);
if !addrs.contains(&backend) {
addrs.push(backend);
}
}
if self.instances.get(app_id).is_none() {
let backend = Backend::new(*instance_address);
self.instances.insert(String::from(app_id), vec![backend]);
}
}
pub fn remove_instance(&mut self, app_id: &str, instance_address: &SocketAddr, event_loop: &mut Poll) {
if let Some(instances) = self.instances.get_mut(app_id) {
instances.retain(|backend| &backend.address != instance_address);
} else {
error!("{}\tInstance was already removed", self.tag);
}
}
// ToDo factor out with http.rs
pub fn frontend_from_request(&self, host: &str, uri: &str) -> Option<&TlsApp> {
if let Some(http_fronts) = self.fronts.get(host) {
let matching_fronts = http_fronts.iter().filter(|f| uri.starts_with(&f.path_begin)); // ToDo match on uri
let mut front = None;
for f in matching_fronts {
if front.is_none() {
front = Some(f);
}
if let Some(ff) = front {
if f.path_begin.len() > ff.path_begin.len() {
front = Some(f)
}
}
}
front
} else {
None
}
}
pub fn backend_from_request(&mut self, client: &mut TlsClient, host: &str, uri: &str) -> Result<TcpStream,ConnectionError> {
trace!("{}\tlooking for backend for host: {}", self.tag, host);
let real_host = if let Some(h) = host.split(":").next() {
h
} else {
host
};
trace!("{}\tlooking for backend for real host: {}", self.tag, real_host);
if let Some(app_id) = self.frontend_from_request(real_host, uri).map(|ref front| front.app_id.clone()) {
client.http().unwrap().app_id = Some(app_id.clone());
// ToDo round-robin on instances
if let Some(ref mut app_instances) = self.instances.get_mut(&app_id) {
if app_instances.len() == 0 {
client.http().unwrap().set_answer(&self.answers.ServiceUnavailable);
return Err(ConnectionError::NoBackendAvailable);
}
let rnd = random::<usize>();
let mut instances:Vec<&mut Backend> = app_instances.iter_mut().filter(|backend| backend.can_open()).collect();
let idx = rnd % instances.len();
info!("{}\tConnecting {} -> {:?}", client.http().unwrap().log_context(), host, instances.get(idx).map(|backend| (backend.address, backend.active_connections)));
instances.get_mut(idx).ok_or(ConnectionError::NoBackendAvailable).and_then(|ref mut backend| {
let conn = TcpStream::connect(&backend.address).map_err(|_| ConnectionError::NoBackendAvailable);
if conn.is_ok() {
backend.inc_connections();
}
conn
})
} else {
Err(ConnectionError::NoBackendAvailable)
}
} else {
Err(ConnectionError::HostNotFound)
}
}
}
impl ProxyConfiguration<TlsClient> for ServerConfiguration {
fn accept(&mut self, token: ListenToken) -> Option<(TlsClient,bool)> {
let accepted = self.listener.accept();
if let Ok((frontend_sock, _)) = accepted {
frontend_sock.set_nodelay(true);
if let Ok(ssl) = Ssl::new(&self.default_context.context) {
if let Some(c) = TlsClient::new("TLS", ssl, frontend_sock, Rc::downgrade(&self.pool), self.config.public_address) {
return Some((c, false))
}
} else {
error!("{}\tcould not create ssl context", self.tag);
}
} else {
error!("{}\tcould not accept connection: {:?}", self.tag, accepted);
}
None
}
fn connect_to_backend(&mut self, event_loop: &mut Poll, client: &mut TlsClient) -> Result<BackendConnectAction,ConnectionError> {
let h = try!(client.http().unwrap().state().get_host().ok_or(ConnectionError::NoHostGiven));
let host: &str = if let IResult::Done(i, (hostname, port)) = hostname_and_port(h.as_bytes()) {
if i != &b""[..] {
error!("{}\tinvalid remaining chars after hostname", self.tag);
return Err(ConnectionError::ToBeDefined);
}
// it is alright to call from_utf8_unchecked,
// we already verified that there are only ascii
// chars in there
let hostname_str = unsafe { from_utf8_unchecked(hostname) };
//FIXME: what if we don't use SNI?
let servername: Option<String> = client.http().unwrap().frontend.ssl().servername().map(|s| s.to_string());
if servername.as_ref().map(|s| s.as_str()) != Some(hostname_str) {
error!("{}\tTLS SNI hostname and Host header don't match", self.tag);
return Err(ConnectionError::HostNotFound);
}
//FIXME: we should check that the port is right too
if port == Some(&b"443"[..]) {
hostname_str
} else {
&h
}
} else {
error!("{}\thostname parsing failed", self.tag);
return Err(ConnectionError::ToBeDefined);
};
let rl:RRequestLine = try!(client.http().unwrap().state().get_request_line().ok_or(ConnectionError::NoRequestLineGiven));
let conn = try!(client.http().unwrap().state().get_front_keep_alive().ok_or(ConnectionError::ToBeDefined));
let conn = self.backend_from_request(client, &host, &rl.uri);
match conn {
Ok(socket) => {
let req_state = client.http().unwrap().state().request.clone();
let req_header_end = client.http().unwrap().state().req_header_end;
let res_header_end = client.http().unwrap().state().res_header_end;
let added_req_header = client.http().unwrap().state().added_req_header.clone();
let added_res_header = client.http().unwrap().state().added_res_header.clone();
// FIXME: is this still needed?
client.http().unwrap().set_state(HttpState {
req_header_end: req_header_end,
res_header_end: res_header_end,
request: req_state,
response: Some(ResponseState::Initial),
added_req_header: added_req_header,
added_res_header: added_res_header,
});
client.set_back_socket(socket);
//FIXME: implement keepalive
Ok(BackendConnectAction::New)
},
Err(ConnectionError::NoBackendAvailable) => {
client.http().unwrap().set_answer(&self.answers.ServiceUnavailable);
Err(ConnectionError::NoBackendAvailable)
},
Err(ConnectionError::HostNotFound) => {
client.http().unwrap().set_answer(&self.answers.NotFound);
Err(ConnectionError::HostNotFound)
},
e => panic!(e)
}
}
fn notify(&mut self, event_loop: &mut Poll, message: ProxyOrder) {
trace!("{}\t{} notified", self.tag, message);
match message.command {
Command::AddTlsFront(front) => {
//info!("TLS\t{} add front {:?}", id, front);
self.add_http_front(front, event_loop);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
},
Command::RemoveTlsFront(front) => {
//info!("TLS\t{} remove front {:?}", id, front);
self.remove_http_front(front, event_loop);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
},
Command::AddInstance(instance) => {
info!("{}\t{} add instance {:?}", self.tag, message.id, instance);
let addr_string = instance.ip_address + ":" + &instance.port.to_string();
let parsed:Option<SocketAddr> = addr_string.parse().ok();
if let Some(addr) = parsed {
self.add_instance(&instance.app_id, &addr, event_loop);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
} else {
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Error(String::from("cannot parse the address"))});
}
},
Command::RemoveInstance(instance) => {
info!("{}\t{} remove instance {:?}", self.tag, message.id, instance);
let addr_string = instance.ip_address + ":" + &instance.port.to_string();
let parsed:Option<SocketAddr> = addr_string.parse().ok();
if let Some(addr) = parsed {
self.remove_instance(&instance.app_id, &addr, event_loop);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
} else {
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Error(String::from("cannot parse the address"))});
}
},
Command::HttpProxy(configuration) => {
info!("{}\t{} modifying proxy configuration: {:?}", self.tag, message.id, configuration);
self.front_timeout = configuration.front_timeout;
self.back_timeout = configuration.back_timeout;
self.answers = DefaultAnswers {
NotFound: configuration.answer_404.into_bytes(),
ServiceUnavailable: configuration.answer_503.into_bytes(),
};
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
},
Command::SoftStop => {
info!("{}\t{} processing soft shutdown", self.tag, message.id);
event_loop.deregister(&self.listener);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Processing});
},
Command::HardStop => {
info!("{}\t{} hard shutdown", self.tag, message.id);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
},
command => {
error!("{}\t{} unsupported message, ignoring {:?}", self.tag, message.id, command);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Error(String::from("unsupported message"))});
}
}
}
fn close_backend(&mut self, app_id: AppId, addr: &SocketAddr) {
if let Some(app_instances) = self.instances.get_mut(&app_id) {
if let Some(ref mut backend) = app_instances.iter_mut().find(|backend| &backend.address == addr) {
backend.dec_connections();
}
}
}
fn front_timeout(&self) -> u64 {
self.front_timeout
}
fn back_timeout(&self) -> u64 {
self.back_timeout
}
fn channel(&mut self) -> &mut Channel {
&mut self.channel
}
}
pub type TlsServer = Server<ServerConfiguration,TlsClient>;
pub fn start_listener(tag: String, config: TlsProxyConfiguration, channel: Channel) {
let mut event_loop = Poll::new().unwrap();
let max_connections = config.max_connections;
let max_listeners = 1;
// start at max_listeners + 1 because token(0) is the channel, and token(1) is the timer
let configuration = ServerConfiguration::new(tag.clone(), config, channel, &mut event_loop, 1 + max_listeners).unwrap();
let mut server = TlsServer::new(max_listeners, max_connections, configuration, event_loop);
info!("{}\tstarting event loop", &tag);
server.run();
//event_loop.run(&mut server).unwrap();
info!("{}\tending event loop", &tag);
}
#[cfg(test)]
mod tests {
extern crate tiny_http;
use super::*;
use std::collections::HashMap;
use std::net::{TcpListener, TcpStream, Shutdown};
use std::io::{Read,Write};
use std::{thread,str};
use std::sync::mpsc::channel;
use std::net::SocketAddr;
use std::str::FromStr;
use std::time::Duration;
use std::rc::{Rc,Weak};
use std::sync::{Arc,Mutex};
use std::cell::RefCell;
use messages::{Command,TlsFront,Instance};
use slab::Slab;
use pool::Pool;
use network::buffer::Buffer;
use network::buffer_queue::BufferQueue;
use network::{ProxyOrder,ServerMessage};
use network::http::DefaultAnswers;
use network::trie::TrieNode;
use openssl::ssl::{SslContext, SslMethod, Ssl, SslStream};
use openssl::x509::X509;
/*
#[allow(unused_mut, unused_must_use, unused_variables)]
#[test]
fn mi() {
thread::spawn(|| { start_server(); });
let front: SocketAddr = FromStr::from_str("127.0.0.1:1024").unwrap();
let (tx,rx) = channel::<ServerMessage>();
let (sender, jg) = start_listener(front, 10, 10, tx.clone());
let front = HttpFront { app_id: String::from("app_1"), hostname: String::from("localhost:1024"), path_begin: String::from("/") };
sender.send(ProxyOrder::Command(Command::AddHttpFront(front)));
let instance = Instance { app_id: String::from("app_1"), ip_address: String::from("127.0.0.1"), port: 1025 };
sender.send(ProxyOrder::Command(Command::AddInstance(instance)));
println!("test received: {:?}", rx.recv());
println!("test received: {:?}", rx.recv());
thread::sleep_ms(300);
let mut client = TcpStream::connect(("127.0.0.1", 1024)).unwrap();
// 5 seconds of timeout
client.set_read_timeout(Some(Duration::new(5,0)));
thread::sleep_ms(100);
let mut w = client.write(&b"GET / HTTP/1.1\r\nHost: localhost:1024\r\nConnection: Close\r\n\r\n"[..]);
println!("http client write: {:?}", w);
let mut buffer = [0;4096];
thread::sleep_ms(500);
let mut r = client.read(&mut buffer[..]);
println!("http client read: {:?}", r);
match r {
Err(e) => assert!(false, "client request should not fail. Error: {:?}",e),
Ok(sz) => {
// Read the Response.
println!("read response");
println!("Response: {}", str::from_utf8(&buffer[..]).unwrap());
//thread::sleep_ms(300);
//assert_eq!(&body, &"Hello World!"[..]);
assert_eq!(sz, 154);
//assert!(false);
}
}
}
use self::tiny_http::{ServerBuilder, Response};
#[allow(unused_mut, unused_must_use, unused_variables)]
fn start_server() {
thread::spawn(move|| {
let server = ServerBuilder::new().with_port(1025).build().unwrap();
println!("starting web server");
for request in server.incoming_requests() {
println!("backend web server got request -> method: {:?}, url: {:?}, headers: {:?}",
request.method(),
request.url(),
request.headers()
);
let response = Response::from_string("hello world");
request.respond(response);
println!("backend web server sent response");
}
});
}
*/
use mio::tcp;
#[test]
fn frontend_from_request_test() {
let app_id1 = "app_1".to_owned();
let app_id2 = "app_2".to_owned();
let app_id3 = "app_3".to_owned();
let uri1 = "/".to_owned();
let uri2 = "/yolo".to_owned();
let uri3 = "/yolo/swag".to_owned();
let mut fronts = HashMap::new();
fronts.insert("lolcatho.st".to_owned(), vec![
TlsApp {
app_id: app_id1, hostname: "lolcatho.st".to_owned(), path_begin: uri1,
cert_fingerprint: vec!()
},
TlsApp {
app_id: app_id2, hostname: "lolcatho.st".to_owned(), path_begin: uri2,
cert_fingerprint: vec!()
},
TlsApp {
app_id: app_id3, hostname: "lolcatho.st".to_owned(), path_begin: uri3,
cert_fingerprint: vec!()
}
]);
fronts.insert("other.domain".to_owned(), vec![
TlsApp {
app_id: "app_1".to_owned(), hostname: "other.domain".to_owned(), path_begin: "/test".to_owned(),
cert_fingerprint: vec!()
},
]);
let contexts = HashMap::new();
let rc_ctx = Arc::new(Mutex::new(contexts));
let domains = TrieNode::root();
let rc_domains = Arc::new(Mutex::new(domains));
let context = SslContext::builder(SslMethod::dtls()).unwrap();
let (command, channel) = CommandChannel::generate(1000, 10000).expect("should create a channel");
let tls_data = TlsData {
context: context.build(),
certificate: vec!(),
refcount: 0,
};
let front: SocketAddr = FromStr::from_str("127.0.0.1:1032").expect("test address 127.0.0.1:1032 should be parsed");
let listener = tcp::TcpListener::bind(&front).expect("test address 127.0.0.1:1032 should be available");
let server_config = ServerConfiguration {
listener: listener,
address: front,
instances: HashMap::new(),
fronts: fronts,
domains: rc_domains,
default_context: tls_data,
contexts: rc_ctx,
channel: channel,
pool: Rc::new(RefCell::new(Pool::with_capacity(1, 0, || BufferQueue::with_capacity(16384)))),
front_timeout: 5000,
back_timeout: 5000,
answers: DefaultAnswers {
NotFound: Vec::from(&b"HTTP/1.1 404 Not Found\r\n\r\n"[..]),
ServiceUnavailable: Vec::from(&b"HTTP/1.1 503 your application is in deployment\r\n\r\n"[..]),
},
config: Default::default(),
tag: String::from("TLS"),
};
println!("TEST {}", line!());
let frontend1 = server_config.frontend_from_request("lolcatho.st", "/");
assert_eq!(frontend1.unwrap().app_id, "app_1");
println!("TEST {}", line!());
let frontend2 = server_config.frontend_from_request("lolcatho.st", "/test");
assert_eq!(frontend2.unwrap().app_id, "app_1");
println!("TEST {}", line!());
let frontend3 = server_config.frontend_from_request("lolcatho.st", "/yolo/test");
assert_eq!(frontend3.unwrap().app_id, "app_2");
println!("TEST {}", line!());
let frontend4 = server_config.frontend_from_request("lolcatho.st", "/yolo/swag");
assert_eq!(frontend4.unwrap().app_id, "app_3");
println!("TEST {}", line!());
let frontend5 = server_config.frontend_from_request("domain", "/");
assert_eq!(frontend5, None);
// assert!(false);
}
}
do not use dtls for tests
use std::thread::{self,Thread,Builder};
use std::sync::mpsc::{self,channel,Receiver};
use std::sync::{Arc,Mutex};
use std::rc::{Rc,Weak};
use std::cell::RefCell;
use std::mem;
use mio::*;
use mio::tcp::*;
use mio::timer::Timeout;
use mio_uds::UnixStream;
use std::io::{self,Read,Write,ErrorKind,BufReader};
use std::collections::HashMap;
use std::error::Error;
use slab::Slab;
use pool::{Pool,Checkout};
use std::net::{IpAddr,SocketAddr};
use std::str::{FromStr, from_utf8, from_utf8_unchecked};
use time::{precise_time_s, precise_time_ns};
use rand::random;
use openssl::ssl::{self, SslContext, SslContextBuilder, SslMethod,
Ssl, SslRef, SslStream, SniError};
use openssl::x509::X509;
use openssl::dh::Dh;
use openssl::pkey::PKey;
use openssl::hash::MessageDigest;
use openssl::nid;
use nom::IResult;
use parser::http11::{HttpState,RequestState,ResponseState,RRequestLine,parse_request_until_stop,hostname_and_port};
use network::buffer::Buffer;
use network::buffer_queue::BufferQueue;
use network::{Backend,ClientResult,ServerMessage,ServerMessageStatus,ConnectionError,ProxyOrder,Protocol};
use network::proxy::{BackendConnectAction,Server,ProxyConfiguration,ProxyClient,
Readiness,ListenToken,FrontToken,BackToken,Channel};
use messages::{self,Command,TlsFront,TlsProxyConfiguration};
use network::http::{self,DefaultAnswers};
use network::socket::{SocketHandler,SocketResult,server_bind};
use network::trie::*;
use network::protocol::{ProtocolResult,TlsHandshake,Http,Pipe};
use command::CommandChannel;
type BackendToken = Token;
type ClientToken = Token;
#[derive(Debug,Clone,PartialEq,Eq)]
pub struct TlsApp {
pub app_id: String,
pub hostname: String,
pub path_begin: String,
pub cert_fingerprint: CertFingerprint,
}
pub enum State {
Handshake(TlsHandshake),
Http(Http<SslStream<TcpStream>>),
WebSocket(Pipe<SslStream<TcpStream>>)
}
pub struct TlsClient {
front: Option<TcpStream>,
front_token: Option<Token>,
front_timeout: Option<Timeout>,
back_timeout: Option<Timeout>,
protocol: Option<State>,
public_address: Option<IpAddr>,
ssl: Option<Ssl>,
pool: Weak<RefCell<Pool<BufferQueue>>>,
}
impl TlsClient {
pub fn new(server_context: &str, ssl:Ssl, sock: TcpStream, pool: Weak<RefCell<Pool<BufferQueue>>>, public_address: Option<IpAddr>) -> Option<TlsClient> {
//FIXME: we should not need to clone the socket. Maybe do the accept here instead of
// in TlsHandshake?
let s = sock.try_clone().unwrap();
let handshake = TlsHandshake::new(server_context, ssl, s);
Some(TlsClient {
front: Some(sock),
front_token: None,
front_timeout: None,
back_timeout: None,
protocol: Some(State::Handshake(handshake)),
public_address: public_address,
ssl: None,
pool: pool,
})
}
pub fn http(&mut self) -> Option<&mut Http<SslStream<TcpStream>>> {
if let State::Http(ref mut http) = *self.protocol.as_mut().unwrap() {
Some(http)
} else {
None
}
}
pub fn upgrade(&mut self) -> bool {
let protocol = self.protocol.take().unwrap();
if let State::Handshake(handshake) = protocol {
if let Some(pool) = self.pool.upgrade() {
let mut p = pool.borrow_mut();
if let (Some(front_buf), Some(back_buf)) = (p.checkout(), p.checkout()) {
let mut http = Http::new(&handshake.server_context, handshake.stream.unwrap(), front_buf,
back_buf, self.public_address.clone()).unwrap();
http.readiness = handshake.readiness;
http.readiness.front_interest = Ready::readable() | Ready::hup() | Ready::error();
http.set_front_token(self.front_token.as_ref().unwrap().clone());
self.ssl = handshake.ssl;
self.protocol = Some(State::Http(http));
return true;
} else {
error!("could not get buffers");
//FIXME: must return an error and stop the connection here
}
}
false
} else if let State::Http(http) = protocol {
info!("https switching to wss");
let front_token = http.front_token().unwrap();
let back_token = http.back_token().unwrap();
let mut pipe = Pipe::new(&http.server_context, http.frontend, http.backend.unwrap(),
http.front_buf, http.back_buf, http.public_address).unwrap();
pipe.readiness.front_readiness = http.readiness.front_readiness;
pipe.readiness.back_readiness = http.readiness.back_readiness;
pipe.set_front_token(front_token);
pipe.set_back_token(back_token);
self.protocol = Some(State::WebSocket(pipe));
true
} else {
self.protocol = Some(protocol);
true
}
}
}
impl ProxyClient for TlsClient {
fn front_socket(&self) -> &TcpStream {
self.front.as_ref().unwrap()
}
fn back_socket(&self) -> Option<&TcpStream> {
match *self.protocol.as_ref().unwrap() {
State::Handshake(ref handshake) => None,
State::Http(ref http) => http.back_socket(),
State::WebSocket(ref pipe) => pipe.back_socket(),
}
}
fn front_token(&self) -> Option<Token> {
self.front_token
}
fn back_token(&self) -> Option<Token> {
if let State::Http(ref http) = *self.protocol.as_ref().unwrap() {
http.back_token()
} else {
None
}
}
fn close(&mut self) {
//println!("TLS closing[{:?}] temp->front: {:?}, temp->back: {:?}", self.token, *self.temp.front_buf, *self.temp.back_buf);
self.http().map(|http| http.close());
}
fn log_context(&self) -> String {
if let State::Http(ref http) = *self.protocol.as_ref().unwrap() {
http.log_context()
} else {
"".to_string()
}
}
fn set_back_socket(&mut self, sock:TcpStream) {
self.http().unwrap().set_back_socket(sock)
}
fn set_front_token(&mut self, token: Token) {
self.front_token = Some(token);
self.protocol.as_mut().map(|p| match *p {
State::Http(ref mut http) => http.set_front_token(token),
_ => {}
});
}
fn set_back_token(&mut self, token: Token) {
self.http().unwrap().set_back_token(token)
}
fn front_timeout(&mut self) -> Option<Timeout> {
self.front_timeout.clone()
}
fn back_timeout(&mut self) -> Option<Timeout> {
self.back_timeout.clone()
}
fn set_front_timeout(&mut self, timeout: Timeout) {
self.front_timeout = Some(timeout)
}
fn set_back_timeout(&mut self, timeout: Timeout) {
self.back_timeout = Some(timeout);
}
fn front_hup(&mut self) -> ClientResult {
self.http().unwrap().front_hup()
}
fn back_hup(&mut self) -> ClientResult {
self.http().unwrap().back_hup()
}
fn readable(&mut self) -> ClientResult {
let (upgrade, result) = match *self.protocol.as_mut().unwrap() {
State::Handshake(ref mut handshake) => handshake.readable(),
State::Http(ref mut http) => (ProtocolResult::Continue, http.readable()),
State::WebSocket(ref mut pipe) => (ProtocolResult::Continue, pipe.readable()),
};
if upgrade == ProtocolResult::Continue {
result
} else {
if self.upgrade() {
match *self.protocol.as_mut().unwrap() {
State::Http(ref mut http) => http.readable(),
_ => result
}
} else {
ClientResult::CloseClient
}
}
}
fn writable(&mut self) -> ClientResult {
match *self.protocol.as_mut().unwrap() {
State::Handshake(ref mut handshake) => ClientResult::CloseClient,
State::Http(ref mut http) => http.writable(),
State::WebSocket(ref mut pipe) => pipe.writable(),
}
}
fn back_readable(&mut self) -> ClientResult {
let (upgrade, result) = match *self.protocol.as_mut().unwrap() {
State::Http(ref mut http) => http.back_readable(),
State::Handshake(ref mut handshake) => (ProtocolResult::Continue, ClientResult::CloseClient),
State::WebSocket(ref mut pipe) => (ProtocolResult::Continue, pipe.back_readable()),
};
if upgrade == ProtocolResult::Continue {
result
} else {
if self.upgrade() {
match *self.protocol.as_mut().unwrap() {
State::WebSocket(ref mut pipe) => pipe.back_readable(),
_ => result
}
} else {
ClientResult::CloseBothFailure
}
}
}
fn back_writable(&mut self) -> ClientResult {
//self.http().unwrap().back_writable()
match *self.protocol.as_mut().unwrap() {
State::Handshake(ref mut handshake) => ClientResult::CloseClient,
State::Http(ref mut http) => http.back_writable(),
State::WebSocket(ref mut pipe) => pipe.back_writable(),
}
}
fn remove_backend(&mut self) -> (Option<String>, Option<SocketAddr>) {
self.http().unwrap().remove_backend()
}
fn readiness(&mut self) -> &mut Readiness {
let r = match *self.protocol.as_mut().unwrap() {
State::Handshake(ref mut handshake) => &mut handshake.readiness,
State::Http(ref mut http) => http.readiness(),
State::WebSocket(ref mut pipe) => &mut pipe.readiness,
};
//info!("current readiness: {:?}", r);
r
}
fn protocol(&self) -> Protocol {
Protocol::TLS
}
}
fn get_cert_common_name(cert: &X509) -> Option<String> {
cert.subject_name().entries_by_nid(nid::COMMONNAME).next().and_then(|name| name.data().as_utf8().ok().map(|name| String::from(&*name)))
}
pub type AppId = String;
pub type HostName = String;
pub type PathBegin = String;
//maybe not the most efficient type for that
pub type CertFingerprint = Vec<u8>;
pub struct TlsData {
context: SslContext,
certificate: Vec<u8>,
refcount: usize,
}
pub struct ServerConfiguration {
listener: TcpListener,
address: SocketAddr,
instances: HashMap<AppId, Vec<Backend>>,
fronts: HashMap<HostName, Vec<TlsApp>>,
domains: Arc<Mutex<TrieNode<CertFingerprint>>>,
default_context: TlsData,
contexts: Arc<Mutex<HashMap<CertFingerprint,TlsData>>>,
channel: Channel,
pool: Rc<RefCell<Pool<BufferQueue>>>,
answers: DefaultAnswers,
front_timeout: u64,
back_timeout: u64,
config: TlsProxyConfiguration,
tag: String,
}
impl ServerConfiguration {
pub fn new(tag: String, config: TlsProxyConfiguration, channel: Channel, event_loop: &mut Poll, start_at: usize) -> io::Result<ServerConfiguration> {
let contexts:HashMap<CertFingerprint,TlsData> = HashMap::new();
let mut domains = TrieNode::root();
let mut fronts = HashMap::new();
let rc_ctx = Arc::new(Mutex::new(contexts));
let ref_ctx = rc_ctx.clone();
let rc_domains = Arc::new(Mutex::new(domains));
let ref_domains = rc_domains.clone();
let cl_tag = tag.clone();
let default_name = config.default_name.as_ref().map(|name| name.clone()).unwrap_or(String::new());
let (fingerprint, mut tls_data, names):(Vec<u8>,TlsData, Vec<String>) = Self::create_default_context(&config, ref_ctx, ref_domains, cl_tag, default_name).unwrap();
let cert = X509::from_pem(&tls_data.certificate).unwrap();
let common_name: Option<String> = get_cert_common_name(&cert);
info!("{}\tgot common name: {:?}", &tag, common_name);
let app = TlsApp {
app_id: config.default_app_id.clone().unwrap_or(String::new()),
hostname: config.default_name.clone().unwrap_or(String::new()),
path_begin: String::new(),
cert_fingerprint: fingerprint.clone(),
};
fronts.insert(config.default_name.clone().unwrap_or(String::from("")), vec![app]);
match server_bind(&config.front) {
Ok(listener) => {
event_loop.register(&listener, Token(start_at), Ready::readable(), PollOpt::level());
let default = DefaultAnswers {
NotFound: Vec::from(if config.answer_404.len() > 0 {
config.answer_404.as_bytes()
} else {
&b"HTTP/1.1 404 Not Found\r\nCache-Control: no-cache\r\nConnection: close\r\n\r\n"[..]
}),
ServiceUnavailable: Vec::from(if config.answer_503.len() > 0 {
config.answer_503.as_bytes()
} else {
&b"HTTP/1.1 503 your application is in deployment\r\nCache-Control: no-cache\r\nConnection: close\r\n\r\n"[..]
}),
};
Ok(ServerConfiguration {
listener: listener,
address: config.front.clone(),
instances: HashMap::new(),
fronts: fronts,
domains: rc_domains,
default_context: tls_data,
contexts: rc_ctx,
channel: channel,
pool: Rc::new(RefCell::new(
Pool::with_capacity(2*config.max_connections, 0, || BufferQueue::with_capacity(config.buffer_size))
)),
front_timeout: 50000,
back_timeout: 50000,
answers: default,
config: config,
tag: tag,
})
},
Err(e) => {
error!("{}\tcould not create listener {:?}: {:?}", tag, config.front, e);
Err(e)
}
}
}
pub fn create_default_context(config: &TlsProxyConfiguration, ref_ctx: Arc<Mutex<HashMap<CertFingerprint,TlsData>>>, ref_domains: Arc<Mutex<TrieNode<CertFingerprint>>>, tag: String, default_name: String) -> Option<(CertFingerprint,TlsData,Vec<String>)> {
let ctx = SslContext::builder(SslMethod::tls());
if let Err(e) = ctx {
//return Err(io::Error::new(io::ErrorKind::Other, e.description()));
return None
}
let mut context = ctx.unwrap();
let mut options = context.options();
options.insert(ssl::SSL_OP_NO_SSLV2);
options.insert(ssl::SSL_OP_NO_SSLV3);
options.insert(ssl::SSL_OP_NO_TLSV1);
options.insert(ssl::SSL_OP_NO_COMPRESSION);
options.insert(ssl::SSL_OP_NO_TICKET);
options.insert(ssl::SSL_OP_CIPHER_SERVER_PREFERENCE);
let opt = context.set_options(options);
context.set_cipher_list(&config.cipher_list);
match Dh::get_2048_256() {
Ok(dh) => context.set_tmp_dh(&dh),
Err(e) => {
//return Err(io::Error::new(io::ErrorKind::Other, e.description()))
return None
}
};
context.set_ecdh_auto(true);
//FIXME: get the default cert and key from the configuration
let cert_read = config.default_certificate.as_ref().map(|vec| &vec[..]).unwrap_or(&include_bytes!("../../../assets/certificate.pem")[..]);
let key_read = config.default_key.as_ref().map(|vec| &vec[..]).unwrap_or(&include_bytes!("../../../assets/key.pem")[..]);
if let Some(path) = config.default_certificate_chain.as_ref() {
context.set_certificate_chain_file(path);
}
if let (Ok(cert), Ok(key)) = (X509::from_pem(&cert_read[..]), PKey::private_key_from_pem(&key_read[..])) {
if let Ok(fingerprint) = cert.fingerprint(MessageDigest::sha256()) {
context.set_certificate(&cert);
context.set_private_key(&key);
let mut names: Vec<String> = cert.subject_alt_names().map(|names| {
names.iter().filter_map(|general_name|
general_name.dnsname().map(|name| String::from(name))
).collect()
}).unwrap_or(vec!());
info!("{}\tgot subject alt names: {:?}", &tag, names);
{
let mut domains = ref_domains.lock().unwrap();
for name in &names {
domains.domain_insert(name.clone().into_bytes(), fingerprint.clone());
}
}
if let Some(common_name) = get_cert_common_name(&cert) {
info!("got common name: {:?}", common_name);
names.push(common_name);
}
context.set_servername_callback(move |ssl: &mut SslRef| {
let contexts = ref_ctx.lock().unwrap();
let domains = ref_domains.lock().unwrap();
info!("{}\tref: {:?}", tag, ssl);
if let Some(servername) = ssl.servername().map(|s| s.to_string()) {
info!("checking servername: {}", servername);
if &servername == &default_name {
return Ok(());
}
info!("{}\tlooking for fingerprint for {:?}", tag, servername);
if let Some(kv) = domains.domain_lookup(servername.as_bytes()) {
info!("{}\tlooking for context for {:?} with fingerprint {:?}", tag, servername, kv.1);
if let Some(ref tls_data) = contexts.get(&kv.1) {
info!("{}\tfound context for {:?}", tag, servername);
let context: &SslContext = &tls_data.context;
if let Ok(()) = ssl.set_ssl_context(context) {
info!("{}\tservername is now {:?}", tag, ssl.servername());
return Ok(());
} else {
error!("{}\tno context found for {:?}", tag, servername);
}
}
}
} else {
error!("{}\tgot no server name from ssl, answering with default one", tag);
}
//answer ok because we use the default certificate
Ok(())
});
let tls_data = TlsData {
context: context.build(),
certificate: cert_read.to_vec(),
refcount: 1,
};
Some((fingerprint, tls_data, names))
} else {
None
}
} else {
None
}
}
pub fn add_http_front(&mut self, http_front: TlsFront, event_loop: &mut Poll) -> bool {
//FIXME: insert some error management with a Result here
let c = SslContext::builder(SslMethod::tls());
if c.is_err() { return false; }
let mut ctx = c.unwrap();
let mut options = ctx.options();
options.insert(ssl::SSL_OP_NO_SSLV2);
options.insert(ssl::SSL_OP_NO_SSLV3);
options.insert(ssl::SSL_OP_NO_TLSV1);
options.insert(ssl::SSL_OP_NO_COMPRESSION);
options.insert(ssl::SSL_OP_NO_TICKET);
options.insert(ssl::SSL_OP_CIPHER_SERVER_PREFERENCE);
let opt = ctx.set_options(options);
match Dh::get_2048_256() {
Ok(dh) => ctx.set_tmp_dh(&dh),
Err(e) => {
return false;
}
};
ctx.set_ecdh_auto(true);
let mut cert_read = &http_front.certificate.as_bytes()[..];
let mut key_read = &http_front.key.as_bytes()[..];
let cert_chain: Vec<X509> = http_front.certificate_chain.iter().filter_map(|c| {
X509::from_pem(c.as_bytes()).ok()
}).collect();
if let (Ok(cert), Ok(key)) = (X509::from_pem(&mut cert_read), PKey::private_key_from_pem(&mut key_read)) {
//FIXME: would need more logs here
//FIXME
let fingerprint = cert.fingerprint(MessageDigest::sha256()).unwrap();
let common_name: Option<String> = get_cert_common_name(&cert);
info!("{}\tgot common name: {:?}", self.tag, common_name);
let names: Vec<String> = cert.subject_alt_names().map(|names| {
names.iter().filter_map(|general_name|
general_name.dnsname().map(|name| String::from(name))
).collect()
}).unwrap_or(vec!());
info!("{}\tgot subject alt names: {:?}", self.tag, names);
ctx.set_certificate(&cert);
ctx.set_private_key(&key);
cert_chain.iter().map(|cert| ctx.add_extra_chain_cert(cert.clone()));
let tls_data = TlsData {
context: ctx.build(),
certificate: cert_read.to_vec(),
refcount: 1,
};
// if the name or the fingerprint are already used,
// those insertions should fail, because it would be
// from the same certificate
// Add a refcount?
//FIXME: this is blocking
//this lock is only obtained from this thread, so is it alright?
{
let mut contexts = self.contexts.lock().unwrap();
if contexts.contains_key(&fingerprint) {
contexts.get_mut(&fingerprint).map(|data| {
data.refcount += 1;
});
} else {
contexts.insert(fingerprint.clone(), tls_data);
}
}
{
let mut domains = self.domains.lock().unwrap();
if let Some(name) = common_name {
domains.domain_insert(name.into_bytes(), fingerprint.clone());
}
for name in names {
domains.domain_insert(name.into_bytes(), fingerprint.clone());
}
}
let app = TlsApp {
app_id: http_front.app_id.clone(),
hostname: http_front.hostname.clone(),
path_begin: http_front.path_begin.clone(),
cert_fingerprint: fingerprint.clone(),
};
if let Some(fronts) = self.fronts.get_mut(&http_front.hostname) {
if ! fronts.contains(&app) {
fronts.push(app.clone());
}
}
if self.fronts.get(&http_front.hostname).is_none() {
self.fronts.insert(http_front.hostname, vec![app]);
}
true
} else {
false
}
}
pub fn remove_http_front(&mut self, front: TlsFront, event_loop: &mut Poll) {
info!("{}\tremoving http_front {:?}", self.tag, front);
if let Some(fronts) = self.fronts.get_mut(&front.hostname) {
if let Some(pos) = fronts.iter().position(|f| &f.app_id == &front.app_id) {
let front = fronts.remove(pos);
{
let mut contexts = self.contexts.lock().unwrap();
let mut domains = self.domains.lock().unwrap();
let must_delete = contexts.get_mut(&front.cert_fingerprint).map(|tls_data| {
tls_data.refcount -= 1;
tls_data.refcount == 0
});
if must_delete == Some(true) {
if let Some(data) = contexts.remove(&front.cert_fingerprint) {
if let Ok(cert) = X509::from_pem(&data.certificate) {
let common_name: Option<String> = get_cert_common_name(&cert);
//info!("got common name: {:?}", common_name);
if let Some(name) = common_name {
domains.domain_remove(&name.into_bytes());
}
let names: Vec<String> = cert.subject_alt_names().map(|names| {
names.iter().filter_map(|general_name|
general_name.dnsname().map(|name| String::from(name))
).collect()
}).unwrap_or(vec!());
for name in names {
domains.domain_remove(&name.into_bytes());
}
}
}
}
}
}
}
}
pub fn add_instance(&mut self, app_id: &str, instance_address: &SocketAddr, event_loop: &mut Poll) {
if let Some(addrs) = self.instances.get_mut(app_id) {
let backend = Backend::new(*instance_address);
if !addrs.contains(&backend) {
addrs.push(backend);
}
}
if self.instances.get(app_id).is_none() {
let backend = Backend::new(*instance_address);
self.instances.insert(String::from(app_id), vec![backend]);
}
}
pub fn remove_instance(&mut self, app_id: &str, instance_address: &SocketAddr, event_loop: &mut Poll) {
if let Some(instances) = self.instances.get_mut(app_id) {
instances.retain(|backend| &backend.address != instance_address);
} else {
error!("{}\tInstance was already removed", self.tag);
}
}
// ToDo factor out with http.rs
pub fn frontend_from_request(&self, host: &str, uri: &str) -> Option<&TlsApp> {
if let Some(http_fronts) = self.fronts.get(host) {
let matching_fronts = http_fronts.iter().filter(|f| uri.starts_with(&f.path_begin)); // ToDo match on uri
let mut front = None;
for f in matching_fronts {
if front.is_none() {
front = Some(f);
}
if let Some(ff) = front {
if f.path_begin.len() > ff.path_begin.len() {
front = Some(f)
}
}
}
front
} else {
None
}
}
pub fn backend_from_request(&mut self, client: &mut TlsClient, host: &str, uri: &str) -> Result<TcpStream,ConnectionError> {
trace!("{}\tlooking for backend for host: {}", self.tag, host);
let real_host = if let Some(h) = host.split(":").next() {
h
} else {
host
};
trace!("{}\tlooking for backend for real host: {}", self.tag, real_host);
if let Some(app_id) = self.frontend_from_request(real_host, uri).map(|ref front| front.app_id.clone()) {
client.http().unwrap().app_id = Some(app_id.clone());
// ToDo round-robin on instances
if let Some(ref mut app_instances) = self.instances.get_mut(&app_id) {
if app_instances.len() == 0 {
client.http().unwrap().set_answer(&self.answers.ServiceUnavailable);
return Err(ConnectionError::NoBackendAvailable);
}
let rnd = random::<usize>();
let mut instances:Vec<&mut Backend> = app_instances.iter_mut().filter(|backend| backend.can_open()).collect();
let idx = rnd % instances.len();
info!("{}\tConnecting {} -> {:?}", client.http().unwrap().log_context(), host, instances.get(idx).map(|backend| (backend.address, backend.active_connections)));
instances.get_mut(idx).ok_or(ConnectionError::NoBackendAvailable).and_then(|ref mut backend| {
let conn = TcpStream::connect(&backend.address).map_err(|_| ConnectionError::NoBackendAvailable);
if conn.is_ok() {
backend.inc_connections();
}
conn
})
} else {
Err(ConnectionError::NoBackendAvailable)
}
} else {
Err(ConnectionError::HostNotFound)
}
}
}
impl ProxyConfiguration<TlsClient> for ServerConfiguration {
fn accept(&mut self, token: ListenToken) -> Option<(TlsClient,bool)> {
let accepted = self.listener.accept();
if let Ok((frontend_sock, _)) = accepted {
frontend_sock.set_nodelay(true);
if let Ok(ssl) = Ssl::new(&self.default_context.context) {
if let Some(c) = TlsClient::new("TLS", ssl, frontend_sock, Rc::downgrade(&self.pool), self.config.public_address) {
return Some((c, false))
}
} else {
error!("{}\tcould not create ssl context", self.tag);
}
} else {
error!("{}\tcould not accept connection: {:?}", self.tag, accepted);
}
None
}
fn connect_to_backend(&mut self, event_loop: &mut Poll, client: &mut TlsClient) -> Result<BackendConnectAction,ConnectionError> {
let h = try!(client.http().unwrap().state().get_host().ok_or(ConnectionError::NoHostGiven));
let host: &str = if let IResult::Done(i, (hostname, port)) = hostname_and_port(h.as_bytes()) {
if i != &b""[..] {
error!("{}\tinvalid remaining chars after hostname", self.tag);
return Err(ConnectionError::ToBeDefined);
}
// it is alright to call from_utf8_unchecked,
// we already verified that there are only ascii
// chars in there
let hostname_str = unsafe { from_utf8_unchecked(hostname) };
//FIXME: what if we don't use SNI?
let servername: Option<String> = client.http().unwrap().frontend.ssl().servername().map(|s| s.to_string());
if servername.as_ref().map(|s| s.as_str()) != Some(hostname_str) {
error!("{}\tTLS SNI hostname and Host header don't match", self.tag);
return Err(ConnectionError::HostNotFound);
}
//FIXME: we should check that the port is right too
if port == Some(&b"443"[..]) {
hostname_str
} else {
&h
}
} else {
error!("{}\thostname parsing failed", self.tag);
return Err(ConnectionError::ToBeDefined);
};
let rl:RRequestLine = try!(client.http().unwrap().state().get_request_line().ok_or(ConnectionError::NoRequestLineGiven));
let conn = try!(client.http().unwrap().state().get_front_keep_alive().ok_or(ConnectionError::ToBeDefined));
let conn = self.backend_from_request(client, &host, &rl.uri);
match conn {
Ok(socket) => {
let req_state = client.http().unwrap().state().request.clone();
let req_header_end = client.http().unwrap().state().req_header_end;
let res_header_end = client.http().unwrap().state().res_header_end;
let added_req_header = client.http().unwrap().state().added_req_header.clone();
let added_res_header = client.http().unwrap().state().added_res_header.clone();
// FIXME: is this still needed?
client.http().unwrap().set_state(HttpState {
req_header_end: req_header_end,
res_header_end: res_header_end,
request: req_state,
response: Some(ResponseState::Initial),
added_req_header: added_req_header,
added_res_header: added_res_header,
});
client.set_back_socket(socket);
//FIXME: implement keepalive
Ok(BackendConnectAction::New)
},
Err(ConnectionError::NoBackendAvailable) => {
client.http().unwrap().set_answer(&self.answers.ServiceUnavailable);
Err(ConnectionError::NoBackendAvailable)
},
Err(ConnectionError::HostNotFound) => {
client.http().unwrap().set_answer(&self.answers.NotFound);
Err(ConnectionError::HostNotFound)
},
e => panic!(e)
}
}
fn notify(&mut self, event_loop: &mut Poll, message: ProxyOrder) {
trace!("{}\t{} notified", self.tag, message);
match message.command {
Command::AddTlsFront(front) => {
//info!("TLS\t{} add front {:?}", id, front);
self.add_http_front(front, event_loop);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
},
Command::RemoveTlsFront(front) => {
//info!("TLS\t{} remove front {:?}", id, front);
self.remove_http_front(front, event_loop);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
},
Command::AddInstance(instance) => {
info!("{}\t{} add instance {:?}", self.tag, message.id, instance);
let addr_string = instance.ip_address + ":" + &instance.port.to_string();
let parsed:Option<SocketAddr> = addr_string.parse().ok();
if let Some(addr) = parsed {
self.add_instance(&instance.app_id, &addr, event_loop);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
} else {
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Error(String::from("cannot parse the address"))});
}
},
Command::RemoveInstance(instance) => {
info!("{}\t{} remove instance {:?}", self.tag, message.id, instance);
let addr_string = instance.ip_address + ":" + &instance.port.to_string();
let parsed:Option<SocketAddr> = addr_string.parse().ok();
if let Some(addr) = parsed {
self.remove_instance(&instance.app_id, &addr, event_loop);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
} else {
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Error(String::from("cannot parse the address"))});
}
},
Command::HttpProxy(configuration) => {
info!("{}\t{} modifying proxy configuration: {:?}", self.tag, message.id, configuration);
self.front_timeout = configuration.front_timeout;
self.back_timeout = configuration.back_timeout;
self.answers = DefaultAnswers {
NotFound: configuration.answer_404.into_bytes(),
ServiceUnavailable: configuration.answer_503.into_bytes(),
};
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
},
Command::SoftStop => {
info!("{}\t{} processing soft shutdown", self.tag, message.id);
event_loop.deregister(&self.listener);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Processing});
},
Command::HardStop => {
info!("{}\t{} hard shutdown", self.tag, message.id);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Ok});
},
command => {
error!("{}\t{} unsupported message, ignoring {:?}", self.tag, message.id, command);
self.channel.write_message(&ServerMessage{ id: message.id, status: ServerMessageStatus::Error(String::from("unsupported message"))});
}
}
}
fn close_backend(&mut self, app_id: AppId, addr: &SocketAddr) {
if let Some(app_instances) = self.instances.get_mut(&app_id) {
if let Some(ref mut backend) = app_instances.iter_mut().find(|backend| &backend.address == addr) {
backend.dec_connections();
}
}
}
fn front_timeout(&self) -> u64 {
self.front_timeout
}
fn back_timeout(&self) -> u64 {
self.back_timeout
}
fn channel(&mut self) -> &mut Channel {
&mut self.channel
}
}
pub type TlsServer = Server<ServerConfiguration,TlsClient>;
pub fn start_listener(tag: String, config: TlsProxyConfiguration, channel: Channel) {
let mut event_loop = Poll::new().unwrap();
let max_connections = config.max_connections;
let max_listeners = 1;
// start at max_listeners + 1 because token(0) is the channel, and token(1) is the timer
let configuration = ServerConfiguration::new(tag.clone(), config, channel, &mut event_loop, 1 + max_listeners).unwrap();
let mut server = TlsServer::new(max_listeners, max_connections, configuration, event_loop);
info!("{}\tstarting event loop", &tag);
server.run();
//event_loop.run(&mut server).unwrap();
info!("{}\tending event loop", &tag);
}
#[cfg(test)]
mod tests {
extern crate tiny_http;
use super::*;
use std::collections::HashMap;
use std::net::{TcpListener, TcpStream, Shutdown};
use std::io::{Read,Write};
use std::{thread,str};
use std::sync::mpsc::channel;
use std::net::SocketAddr;
use std::str::FromStr;
use std::time::Duration;
use std::rc::{Rc,Weak};
use std::sync::{Arc,Mutex};
use std::cell::RefCell;
use messages::{Command,TlsFront,Instance};
use slab::Slab;
use pool::Pool;
use network::buffer::Buffer;
use network::buffer_queue::BufferQueue;
use network::{ProxyOrder,ServerMessage};
use network::http::DefaultAnswers;
use network::trie::TrieNode;
use openssl::ssl::{SslContext, SslMethod, Ssl, SslStream};
use openssl::x509::X509;
/*
#[allow(unused_mut, unused_must_use, unused_variables)]
#[test]
fn mi() {
thread::spawn(|| { start_server(); });
let front: SocketAddr = FromStr::from_str("127.0.0.1:1024").unwrap();
let (tx,rx) = channel::<ServerMessage>();
let (sender, jg) = start_listener(front, 10, 10, tx.clone());
let front = HttpFront { app_id: String::from("app_1"), hostname: String::from("localhost:1024"), path_begin: String::from("/") };
sender.send(ProxyOrder::Command(Command::AddHttpFront(front)));
let instance = Instance { app_id: String::from("app_1"), ip_address: String::from("127.0.0.1"), port: 1025 };
sender.send(ProxyOrder::Command(Command::AddInstance(instance)));
println!("test received: {:?}", rx.recv());
println!("test received: {:?}", rx.recv());
thread::sleep_ms(300);
let mut client = TcpStream::connect(("127.0.0.1", 1024)).unwrap();
// 5 seconds of timeout
client.set_read_timeout(Some(Duration::new(5,0)));
thread::sleep_ms(100);
let mut w = client.write(&b"GET / HTTP/1.1\r\nHost: localhost:1024\r\nConnection: Close\r\n\r\n"[..]);
println!("http client write: {:?}", w);
let mut buffer = [0;4096];
thread::sleep_ms(500);
let mut r = client.read(&mut buffer[..]);
println!("http client read: {:?}", r);
match r {
Err(e) => assert!(false, "client request should not fail. Error: {:?}",e),
Ok(sz) => {
// Read the Response.
println!("read response");
println!("Response: {}", str::from_utf8(&buffer[..]).unwrap());
//thread::sleep_ms(300);
//assert_eq!(&body, &"Hello World!"[..]);
assert_eq!(sz, 154);
//assert!(false);
}
}
}
use self::tiny_http::{ServerBuilder, Response};
#[allow(unused_mut, unused_must_use, unused_variables)]
fn start_server() {
thread::spawn(move|| {
let server = ServerBuilder::new().with_port(1025).build().unwrap();
println!("starting web server");
for request in server.incoming_requests() {
println!("backend web server got request -> method: {:?}, url: {:?}, headers: {:?}",
request.method(),
request.url(),
request.headers()
);
let response = Response::from_string("hello world");
request.respond(response);
println!("backend web server sent response");
}
});
}
*/
use mio::tcp;
#[test]
fn frontend_from_request_test() {
let app_id1 = "app_1".to_owned();
let app_id2 = "app_2".to_owned();
let app_id3 = "app_3".to_owned();
let uri1 = "/".to_owned();
let uri2 = "/yolo".to_owned();
let uri3 = "/yolo/swag".to_owned();
let mut fronts = HashMap::new();
fronts.insert("lolcatho.st".to_owned(), vec![
TlsApp {
app_id: app_id1, hostname: "lolcatho.st".to_owned(), path_begin: uri1,
cert_fingerprint: vec!()
},
TlsApp {
app_id: app_id2, hostname: "lolcatho.st".to_owned(), path_begin: uri2,
cert_fingerprint: vec!()
},
TlsApp {
app_id: app_id3, hostname: "lolcatho.st".to_owned(), path_begin: uri3,
cert_fingerprint: vec!()
}
]);
fronts.insert("other.domain".to_owned(), vec![
TlsApp {
app_id: "app_1".to_owned(), hostname: "other.domain".to_owned(), path_begin: "/test".to_owned(),
cert_fingerprint: vec!()
},
]);
let contexts = HashMap::new();
let rc_ctx = Arc::new(Mutex::new(contexts));
let domains = TrieNode::root();
let rc_domains = Arc::new(Mutex::new(domains));
let context = SslContext::builder(SslMethod::tls()).unwrap();
let (command, channel) = CommandChannel::generate(1000, 10000).expect("should create a channel");
let tls_data = TlsData {
context: context.build(),
certificate: vec!(),
refcount: 0,
};
let front: SocketAddr = FromStr::from_str("127.0.0.1:1032").expect("test address 127.0.0.1:1032 should be parsed");
let listener = tcp::TcpListener::bind(&front).expect("test address 127.0.0.1:1032 should be available");
let server_config = ServerConfiguration {
listener: listener,
address: front,
instances: HashMap::new(),
fronts: fronts,
domains: rc_domains,
default_context: tls_data,
contexts: rc_ctx,
channel: channel,
pool: Rc::new(RefCell::new(Pool::with_capacity(1, 0, || BufferQueue::with_capacity(16384)))),
front_timeout: 5000,
back_timeout: 5000,
answers: DefaultAnswers {
NotFound: Vec::from(&b"HTTP/1.1 404 Not Found\r\n\r\n"[..]),
ServiceUnavailable: Vec::from(&b"HTTP/1.1 503 your application is in deployment\r\n\r\n"[..]),
},
config: Default::default(),
tag: String::from("TLS"),
};
println!("TEST {}", line!());
let frontend1 = server_config.frontend_from_request("lolcatho.st", "/");
assert_eq!(frontend1.unwrap().app_id, "app_1");
println!("TEST {}", line!());
let frontend2 = server_config.frontend_from_request("lolcatho.st", "/test");
assert_eq!(frontend2.unwrap().app_id, "app_1");
println!("TEST {}", line!());
let frontend3 = server_config.frontend_from_request("lolcatho.st", "/yolo/test");
assert_eq!(frontend3.unwrap().app_id, "app_2");
println!("TEST {}", line!());
let frontend4 = server_config.frontend_from_request("lolcatho.st", "/yolo/swag");
assert_eq!(frontend4.unwrap().app_id, "app_3");
println!("TEST {}", line!());
let frontend5 = server_config.frontend_from_request("domain", "/");
assert_eq!(frontend5, None);
// assert!(false);
}
}
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use self::Destination::*;
use syntax_pos::{COMMAND_LINE_SP, DUMMY_SP, FileMap, Span, MultiSpan, CharPos};
use {Level, CodeSuggestion, DiagnosticBuilder, SubDiagnostic, CodeMapper};
use RenderSpan::*;
use snippet::{StyledString, Style, Annotation, Line};
use styled_buffer::StyledBuffer;
use std::io::prelude::*;
use std::io;
use std::ops;
use std::rc::Rc;
use term;
/// Emitter trait for emitting errors.
pub trait Emitter {
/// Emit a structured diagnostic.
fn emit(&mut self, db: &DiagnosticBuilder);
}
impl Emitter for EmitterWriter {
fn emit(&mut self, db: &DiagnosticBuilder) {
let mut primary_span = db.span.clone();
let mut children = db.children.clone();
self.fix_multispans_in_std_macros(&mut primary_span, &mut children);
self.emit_messages_default(&db.level, &db.message, &db.code, &primary_span, &children);
}
}
/// maximum number of lines we will print for each error; arbitrary.
pub const MAX_HIGHLIGHT_LINES: usize = 6;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ColorConfig {
Auto,
Always,
Never,
}
impl ColorConfig {
fn use_color(&self) -> bool {
match *self {
ColorConfig::Always => true,
ColorConfig::Never => false,
ColorConfig::Auto => stderr_isatty(),
}
}
}
pub struct EmitterWriter {
dst: Destination,
cm: Option<Rc<CodeMapper>>,
}
struct FileWithAnnotatedLines {
file: Rc<FileMap>,
lines: Vec<Line>,
}
/// Do not use this for messages that end in `\n` – use `println_maybe_styled` instead. See
/// `EmitterWriter::print_maybe_styled` for details.
macro_rules! print_maybe_styled {
($dst: expr, $style: expr, $($arg: tt)*) => {
$dst.print_maybe_styled(format_args!($($arg)*), $style, false)
}
}
macro_rules! println_maybe_styled {
($dst: expr, $style: expr, $($arg: tt)*) => {
$dst.print_maybe_styled(format_args!($($arg)*), $style, true)
}
}
impl EmitterWriter {
pub fn stderr(color_config: ColorConfig, code_map: Option<Rc<CodeMapper>>) -> EmitterWriter {
if color_config.use_color() {
let dst = Destination::from_stderr();
EmitterWriter {
dst: dst,
cm: code_map,
}
} else {
EmitterWriter {
dst: Raw(Box::new(io::stderr())),
cm: code_map,
}
}
}
pub fn new(dst: Box<Write + Send>, code_map: Option<Rc<CodeMapper>>) -> EmitterWriter {
EmitterWriter {
dst: Raw(dst),
cm: code_map,
}
}
fn preprocess_annotations(&self, msp: &MultiSpan) -> Vec<FileWithAnnotatedLines> {
fn add_annotation_to_file(file_vec: &mut Vec<FileWithAnnotatedLines>,
file: Rc<FileMap>,
line_index: usize,
ann: Annotation) {
for slot in file_vec.iter_mut() {
// Look through each of our files for the one we're adding to
if slot.file.name == file.name {
// See if we already have a line for it
for line_slot in &mut slot.lines {
if line_slot.line_index == line_index {
line_slot.annotations.push(ann);
return;
}
}
// We don't have a line yet, create one
slot.lines.push(Line {
line_index: line_index,
annotations: vec![ann],
});
slot.lines.sort();
return;
}
}
// This is the first time we're seeing the file
file_vec.push(FileWithAnnotatedLines {
file: file,
lines: vec![Line {
line_index: line_index,
annotations: vec![ann],
}],
});
}
let mut output = vec![];
if let Some(ref cm) = self.cm {
for span_label in msp.span_labels() {
if span_label.span == DUMMY_SP || span_label.span == COMMAND_LINE_SP {
continue;
}
let lo = cm.lookup_char_pos(span_label.span.lo);
let mut hi = cm.lookup_char_pos(span_label.span.hi);
let mut is_minimized = false;
// If the span is multi-line, simplify down to the span of one character
if lo.line != hi.line {
hi.line = lo.line;
hi.col = CharPos(lo.col.0 + 1);
is_minimized = true;
}
// Watch out for "empty spans". If we get a span like 6..6, we
// want to just display a `^` at 6, so convert that to
// 6..7. This is degenerate input, but it's best to degrade
// gracefully -- and the parser likes to supply a span like
// that for EOF, in particular.
if lo.col == hi.col {
hi.col = CharPos(lo.col.0 + 1);
}
add_annotation_to_file(&mut output,
lo.file,
lo.line,
Annotation {
start_col: lo.col.0,
end_col: hi.col.0,
is_primary: span_label.is_primary,
is_minimized: is_minimized,
label: span_label.label.clone(),
});
}
}
output
}
fn render_source_line(&self,
buffer: &mut StyledBuffer,
file: Rc<FileMap>,
line: &Line,
width_offset: usize) {
let source_string = file.get_line(line.line_index - 1)
.unwrap_or("");
let line_offset = buffer.num_lines();
// First create the source line we will highlight.
buffer.puts(line_offset, width_offset, &source_string, Style::Quotation);
buffer.puts(line_offset,
0,
&(line.line_index.to_string()),
Style::LineNumber);
draw_col_separator(buffer, line_offset, width_offset - 2);
if line.annotations.is_empty() {
return;
}
// We want to display like this:
//
// vec.push(vec.pop().unwrap());
// --- ^^^ _ previous borrow ends here
// | |
// | error occurs here
// previous borrow of `vec` occurs here
//
// But there are some weird edge cases to be aware of:
//
// vec.push(vec.pop().unwrap());
// -------- - previous borrow ends here
// ||
// |this makes no sense
// previous borrow of `vec` occurs here
//
// For this reason, we group the lines into "highlight lines"
// and "annotations lines", where the highlight lines have the `~`.
// Sort the annotations by (start, end col)
let mut annotations = line.annotations.clone();
annotations.sort();
// Next, create the highlight line.
for annotation in &annotations {
for p in annotation.start_col..annotation.end_col {
if annotation.is_primary {
buffer.putc(line_offset + 1,
width_offset + p,
'^',
Style::UnderlinePrimary);
if !annotation.is_minimized {
buffer.set_style(line_offset, width_offset + p, Style::UnderlinePrimary);
}
} else {
buffer.putc(line_offset + 1,
width_offset + p,
'-',
Style::UnderlineSecondary);
if !annotation.is_minimized {
buffer.set_style(line_offset, width_offset + p, Style::UnderlineSecondary);
}
}
}
}
draw_col_separator(buffer, line_offset + 1, width_offset - 2);
// Now we are going to write labels in. To start, we'll exclude
// the annotations with no labels.
let (labeled_annotations, unlabeled_annotations): (Vec<_>, _) = annotations.into_iter()
.partition(|a| a.label.is_some());
// If there are no annotations that need text, we're done.
if labeled_annotations.is_empty() {
return;
}
// Now add the text labels. We try, when possible, to stick the rightmost
// annotation at the end of the highlight line:
//
// vec.push(vec.pop().unwrap());
// --- --- - previous borrow ends here
//
// But sometimes that's not possible because one of the other
// annotations overlaps it. For example, from the test
// `span_overlap_label`, we have the following annotations
// (written on distinct lines for clarity):
//
// fn foo(x: u32) {
// --------------
// -
//
// In this case, we can't stick the rightmost-most label on
// the highlight line, or we would get:
//
// fn foo(x: u32) {
// -------- x_span
// |
// fn_span
//
// which is totally weird. Instead we want:
//
// fn foo(x: u32) {
// --------------
// | |
// | x_span
// fn_span
//
// which is...less weird, at least. In fact, in general, if
// the rightmost span overlaps with any other span, we should
// use the "hang below" version, so we can at least make it
// clear where the span *starts*.
let mut labeled_annotations = &labeled_annotations[..];
match labeled_annotations.split_last().unwrap() {
(last, previous) => {
if previous.iter()
.chain(&unlabeled_annotations)
.all(|a| !overlaps(a, last)) {
// append the label afterwards; we keep it in a separate
// string
let highlight_label: String = format!(" {}", last.label.as_ref().unwrap());
if last.is_primary {
buffer.append(line_offset + 1, &highlight_label, Style::LabelPrimary);
} else {
buffer.append(line_offset + 1, &highlight_label, Style::LabelSecondary);
}
labeled_annotations = previous;
}
}
}
// If that's the last annotation, we're done
if labeled_annotations.is_empty() {
return;
}
for (index, annotation) in labeled_annotations.iter().enumerate() {
// Leave:
// - 1 extra line
// - One line for each thing that comes after
let comes_after = labeled_annotations.len() - index - 1;
let blank_lines = 3 + comes_after;
// For each blank line, draw a `|` at our column. The
// text ought to be long enough for this.
for index in 2..blank_lines {
if annotation.is_primary {
buffer.putc(line_offset + index,
width_offset + annotation.start_col,
'|',
Style::UnderlinePrimary);
} else {
buffer.putc(line_offset + index,
width_offset + annotation.start_col,
'|',
Style::UnderlineSecondary);
}
draw_col_separator(buffer, line_offset + index, width_offset - 2);
}
if annotation.is_primary {
buffer.puts(line_offset + blank_lines,
width_offset + annotation.start_col,
annotation.label.as_ref().unwrap(),
Style::LabelPrimary);
} else {
buffer.puts(line_offset + blank_lines,
width_offset + annotation.start_col,
annotation.label.as_ref().unwrap(),
Style::LabelSecondary);
}
draw_col_separator(buffer, line_offset + blank_lines, width_offset - 2);
}
}
fn get_multispan_max_line_num(&mut self, msp: &MultiSpan) -> usize {
let mut max = 0;
if let Some(ref cm) = self.cm {
for primary_span in msp.primary_spans() {
if primary_span != &DUMMY_SP && primary_span != &COMMAND_LINE_SP {
let hi = cm.lookup_char_pos(primary_span.hi);
if hi.line > max {
max = hi.line;
}
}
}
for span_label in msp.span_labels() {
if span_label.span != DUMMY_SP && span_label.span != COMMAND_LINE_SP {
let hi = cm.lookup_char_pos(span_label.span.hi);
if hi.line > max {
max = hi.line;
}
}
}
}
max
}
fn get_max_line_num(&mut self, span: &MultiSpan, children: &Vec<SubDiagnostic>) -> usize {
let mut max = 0;
let primary = self.get_multispan_max_line_num(span);
max = if primary > max { primary } else { max };
for sub in children {
let sub_result = self.get_multispan_max_line_num(&sub.span);
max = if sub_result > max { primary } else { max };
}
max
}
// This "fixes" MultiSpans that contain Spans that are pointing to locations inside of
// <*macros>. Since these locations are often difficult to read, we move these Spans from
// <*macros> to their corresponding use site.
fn fix_multispan_in_std_macros(&mut self, span: &mut MultiSpan) -> bool {
let mut spans_updated = false;
if let Some(ref cm) = self.cm {
let mut before_after: Vec<(Span, Span)> = vec![];
let mut new_labels: Vec<(Span, String)> = vec![];
// First, find all the spans in <*macros> and point instead at their use site
for sp in span.primary_spans() {
if (*sp == COMMAND_LINE_SP) || (*sp == DUMMY_SP) {
continue;
}
if cm.span_to_filename(sp.clone()).contains("macros>") {
let v = cm.macro_backtrace(sp.clone());
if let Some(use_site) = v.last() {
before_after.push((sp.clone(), use_site.call_site.clone()));
}
}
for trace in cm.macro_backtrace(sp.clone()).iter().rev() {
// Only show macro locations that are local
// and display them like a span_note
if let Some(def_site) = trace.def_site_span {
if (def_site == COMMAND_LINE_SP) || (def_site == DUMMY_SP) {
continue;
}
// Check to make sure we're not in any <*macros>
if !cm.span_to_filename(def_site).contains("macros>") &&
!trace.macro_decl_name.starts_with("#[") {
new_labels.push((trace.call_site,
"in this macro invocation".to_string()));
break;
}
}
}
}
for (label_span, label_text) in new_labels {
span.push_span_label(label_span, label_text);
}
for sp_label in span.span_labels() {
if (sp_label.span == COMMAND_LINE_SP) || (sp_label.span == DUMMY_SP) {
continue;
}
if cm.span_to_filename(sp_label.span.clone()).contains("macros>") {
let v = cm.macro_backtrace(sp_label.span.clone());
if let Some(use_site) = v.last() {
before_after.push((sp_label.span.clone(), use_site.call_site.clone()));
}
}
}
// After we have them, make sure we replace these 'bad' def sites with their use sites
for (before, after) in before_after {
span.replace(before, after);
spans_updated = true;
}
}
spans_updated
}
// This does a small "fix" for multispans by looking to see if it can find any that
// point directly at <*macros>. Since these are often difficult to read, this
// will change the span to point at the use site.
fn fix_multispans_in_std_macros(&mut self,
span: &mut MultiSpan,
children: &mut Vec<SubDiagnostic>) {
let mut spans_updated = self.fix_multispan_in_std_macros(span);
for child in children.iter_mut() {
spans_updated |= self.fix_multispan_in_std_macros(&mut child.span);
}
if spans_updated {
children.push(SubDiagnostic {
level: Level::Note,
message: "this error originates in a macro outside of the current crate"
.to_string(),
span: MultiSpan::new(),
render_span: None,
});
}
}
fn emit_message_default(&mut self,
msp: &MultiSpan,
msg: &str,
code: &Option<String>,
level: &Level,
max_line_num_len: usize,
is_secondary: bool)
-> io::Result<()> {
let mut buffer = StyledBuffer::new();
if msp.primary_spans().is_empty() && msp.span_labels().is_empty() && is_secondary {
// This is a secondary message with no span info
for _ in 0..max_line_num_len {
buffer.prepend(0, " ", Style::NoStyle);
}
draw_note_separator(&mut buffer, 0, max_line_num_len + 1);
buffer.append(0, &level.to_string(), Style::HeaderMsg);
buffer.append(0, ": ", Style::NoStyle);
buffer.append(0, msg, Style::NoStyle);
} else {
buffer.append(0, &level.to_string(), Style::Level(level.clone()));
match code {
&Some(ref code) => {
buffer.append(0, "[", Style::Level(level.clone()));
buffer.append(0, &code, Style::Level(level.clone()));
buffer.append(0, "]", Style::Level(level.clone()));
}
_ => {}
}
buffer.append(0, ": ", Style::HeaderMsg);
buffer.append(0, msg, Style::HeaderMsg);
}
// Preprocess all the annotations so that they are grouped by file and by line number
// This helps us quickly iterate over the whole message (including secondary file spans)
let mut annotated_files = self.preprocess_annotations(msp);
// Make sure our primary file comes first
let primary_lo = if let (Some(ref cm), Some(ref primary_span)) =
(self.cm.as_ref(), msp.primary_span().as_ref()) {
if primary_span != &&DUMMY_SP && primary_span != &&COMMAND_LINE_SP {
cm.lookup_char_pos(primary_span.lo)
} else {
try!(emit_to_destination(&buffer.render(), level, &mut self.dst));
return Ok(());
}
} else {
// If we don't have span information, emit and exit
emit_to_destination(&buffer.render(), level, &mut self.dst)?;
return Ok(());
};
if let Ok(pos) =
annotated_files.binary_search_by(|x| x.file.name.cmp(&primary_lo.file.name)) {
annotated_files.swap(0, pos);
}
// Print out the annotate source lines that correspond with the error
for annotated_file in annotated_files {
// print out the span location and spacer before we print the annotated source
// to do this, we need to know if this span will be primary
let is_primary = primary_lo.file.name == annotated_file.file.name;
if is_primary {
// remember where we are in the output buffer for easy reference
let buffer_msg_line_offset = buffer.num_lines();
buffer.prepend(buffer_msg_line_offset, "--> ", Style::LineNumber);
let loc = primary_lo.clone();
buffer.append(buffer_msg_line_offset,
&format!("{}:{}:{}", loc.file.name, loc.line, loc.col.0 + 1),
Style::LineAndColumn);
for _ in 0..max_line_num_len {
buffer.prepend(buffer_msg_line_offset, " ", Style::NoStyle);
}
} else {
// remember where we are in the output buffer for easy reference
let buffer_msg_line_offset = buffer.num_lines();
// Add spacing line
draw_col_separator(&mut buffer, buffer_msg_line_offset, max_line_num_len + 1);
// Then, the secondary file indicator
buffer.prepend(buffer_msg_line_offset + 1, "::: ", Style::LineNumber);
buffer.append(buffer_msg_line_offset + 1,
&annotated_file.file.name,
Style::LineAndColumn);
for _ in 0..max_line_num_len {
buffer.prepend(buffer_msg_line_offset + 1, " ", Style::NoStyle);
}
}
// Put in the spacer between the location and annotated source
let buffer_msg_line_offset = buffer.num_lines();
draw_col_separator_no_space(&mut buffer, buffer_msg_line_offset, max_line_num_len + 1);
// Next, output the annotate source for this file
for line_idx in 0..annotated_file.lines.len() {
self.render_source_line(&mut buffer,
annotated_file.file.clone(),
&annotated_file.lines[line_idx],
3 + max_line_num_len);
// check to see if we need to print out or elide lines that come between
// this annotated line and the next one
if line_idx < (annotated_file.lines.len() - 1) {
let line_idx_delta = annotated_file.lines[line_idx + 1].line_index -
annotated_file.lines[line_idx].line_index;
if line_idx_delta > 2 {
let last_buffer_line_num = buffer.num_lines();
buffer.puts(last_buffer_line_num, 0, "...", Style::LineNumber);
} else if line_idx_delta == 2 {
let unannotated_line = annotated_file.file
.get_line(annotated_file.lines[line_idx].line_index)
.unwrap_or("");
let last_buffer_line_num = buffer.num_lines();
buffer.puts(last_buffer_line_num,
0,
&(annotated_file.lines[line_idx + 1].line_index - 1)
.to_string(),
Style::LineNumber);
draw_col_separator(&mut buffer, last_buffer_line_num, 1 + max_line_num_len);
buffer.puts(last_buffer_line_num,
3 + max_line_num_len,
&unannotated_line,
Style::Quotation);
}
}
}
}
// final step: take our styled buffer, render it, then output it
try!(emit_to_destination(&buffer.render(), level, &mut self.dst));
Ok(())
}
fn emit_suggestion_default(&mut self,
suggestion: &CodeSuggestion,
level: &Level,
msg: &str,
max_line_num_len: usize)
-> io::Result<()> {
use std::borrow::Borrow;
let primary_span = suggestion.msp.primary_span().unwrap();
if let Some(ref cm) = self.cm {
let mut buffer = StyledBuffer::new();
buffer.append(0, &level.to_string(), Style::Level(level.clone()));
buffer.append(0, ": ", Style::HeaderMsg);
buffer.append(0, msg, Style::HeaderMsg);
let lines = cm.span_to_lines(primary_span).unwrap();
assert!(!lines.lines.is_empty());
let complete = suggestion.splice_lines(cm.borrow());
// print the suggestion without any line numbers, but leave
// space for them. This helps with lining up with previous
// snippets from the actual error being reported.
let mut lines = complete.lines();
let mut row_num = 1;
for line in lines.by_ref().take(MAX_HIGHLIGHT_LINES) {
draw_col_separator(&mut buffer, row_num, max_line_num_len + 1);
buffer.append(row_num, line, Style::NoStyle);
row_num += 1;
}
// if we elided some lines, add an ellipsis
if let Some(_) = lines.next() {
buffer.append(row_num, "...", Style::NoStyle);
}
try!(emit_to_destination(&buffer.render(), level, &mut self.dst));
}
Ok(())
}
fn emit_messages_default(&mut self,
level: &Level,
message: &String,
code: &Option<String>,
span: &MultiSpan,
children: &Vec<SubDiagnostic>) {
let max_line_num = self.get_max_line_num(span, children);
let max_line_num_len = max_line_num.to_string().len();
match self.emit_message_default(span, message, code, level, max_line_num_len, false) {
Ok(()) => {
if !children.is_empty() {
let mut buffer = StyledBuffer::new();
draw_col_separator_no_space(&mut buffer, 0, max_line_num_len + 1);
match emit_to_destination(&buffer.render(), level, &mut self.dst) {
Ok(()) => (),
Err(e) => panic!("failed to emit error: {}", e)
}
}
for child in children {
match child.render_span {
Some(FullSpan(ref msp)) => {
match self.emit_message_default(msp,
&child.message,
&None,
&child.level,
max_line_num_len,
true) {
Err(e) => panic!("failed to emit error: {}", e),
_ => ()
}
},
Some(Suggestion(ref cs)) => {
match self.emit_suggestion_default(cs,
&child.level,
&child.message,
max_line_num_len) {
Err(e) => panic!("failed to emit error: {}", e),
_ => ()
}
},
None => {
match self.emit_message_default(&child.span,
&child.message,
&None,
&child.level,
max_line_num_len,
true) {
Err(e) => panic!("failed to emit error: {}", e),
_ => ()
}
}
}
}
}
Err(e) => panic!("failed to emit error: {}", e),
}
match write!(&mut self.dst, "\n") {
Err(e) => panic!("failed to emit error: {}", e),
_ => {
match self.dst.flush() {
Err(e) => panic!("failed to emit error: {}", e),
_ => (),
}
}
}
}
}
fn draw_col_separator(buffer: &mut StyledBuffer, line: usize, col: usize) {
buffer.puts(line, col, "| ", Style::LineNumber);
}
fn draw_col_separator_no_space(buffer: &mut StyledBuffer, line: usize, col: usize) {
buffer.puts(line, col, "|", Style::LineNumber);
}
fn draw_note_separator(buffer: &mut StyledBuffer, line: usize, col: usize) {
buffer.puts(line, col, "= ", Style::LineNumber);
}
fn overlaps(a1: &Annotation, a2: &Annotation) -> bool {
(a2.start_col..a2.end_col).syntex_contains(a1.start_col) ||
(a1.start_col..a1.end_col).syntex_contains(a2.start_col)
}
trait SyntexContains<Idx> {
fn syntex_contains(&self, item: Idx) -> bool;
}
impl<Idx> SyntexContains<Idx> for ops::Range<Idx> where Idx: PartialOrd {
fn syntex_contains(&self, item: Idx) -> bool {
(self.start <= item) && (item < self.end)
}
}
fn emit_to_destination(rendered_buffer: &Vec<Vec<StyledString>>,
lvl: &Level,
dst: &mut Destination)
-> io::Result<()> {
use lock;
// In order to prevent error message interleaving, where multiple error lines get intermixed
// when multiple compiler processes error simultaneously, we emit errors with additional
// steps.
//
// On Unix systems, we write into a buffered terminal rather than directly to a terminal. When
// the .flush() is called we take the buffer created from the buffered writes and write it at
// one shot. Because the Unix systems use ANSI for the colors, which is a text-based styling
// scheme, this buffered approach works and maintains the styling.
//
// On Windows, styling happens through calls to a terminal API. This prevents us from using the
// same buffering approach. Instead, we use a global Windows mutex, which we acquire long
// enough to output the full error message, then we release.
let _buffer_lock = lock::acquire_global_lock("rustc_errors");
for line in rendered_buffer {
for part in line {
try!(dst.apply_style(lvl.clone(), part.style));
try!(write!(dst, "{}", part.text));
try!(dst.reset_attrs());
}
try!(write!(dst, "\n"));
}
try!(dst.flush());
Ok(())
}
#[cfg(unix)]
fn stderr_isatty() -> bool {
use libc;
unsafe { libc::isatty(libc::STDERR_FILENO) != 0 }
}
#[cfg(windows)]
fn stderr_isatty() -> bool {
type DWORD = u32;
type BOOL = i32;
type HANDLE = *mut u8;
const STD_ERROR_HANDLE: DWORD = -12i32 as DWORD;
extern "system" {
fn GetStdHandle(which: DWORD) -> HANDLE;
fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: *mut DWORD) -> BOOL;
}
unsafe {
let handle = GetStdHandle(STD_ERROR_HANDLE);
let mut out = 0;
GetConsoleMode(handle, &mut out) != 0
}
}
pub type BufferedStderr = term::Terminal<Output = BufferedWriter> + Send;
pub enum Destination {
Terminal(Box<term::StderrTerminal>),
BufferedTerminal(Box<BufferedStderr>),
Raw(Box<Write + Send>),
}
/// Buffered writer gives us a way on Unix to buffer up an entire error message before we output
/// it. This helps to prevent interleaving of multiple error messages when multiple compiler
/// processes error simultaneously
pub struct BufferedWriter {
buffer: Vec<u8>,
}
impl BufferedWriter {
// note: we use _new because the conditional compilation at its use site may make this
// this function unused on some platforms
fn _new() -> BufferedWriter {
BufferedWriter { buffer: vec![] }
}
}
impl Write for BufferedWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
for b in buf {
self.buffer.push(*b);
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
let mut stderr = io::stderr();
let result = (|| {
try!(stderr.write_all(&self.buffer));
stderr.flush()
})();
self.buffer.clear();
result
}
}
impl Destination {
#[cfg(not(windows))]
/// When not on Windows, prefer the buffered terminal so that we can buffer an entire error
/// to be emitted at one time.
fn from_stderr() -> Destination {
let stderr: Option<Box<BufferedStderr>> =
term::TerminfoTerminal::new(BufferedWriter::_new())
.map(|t| Box::new(t) as Box<BufferedStderr>);
match stderr {
Some(t) => BufferedTerminal(t),
None => Raw(Box::new(io::stderr())),
}
}
#[cfg(windows)]
/// Return a normal, unbuffered terminal when on Windows.
fn from_stderr() -> Destination {
let stderr: Option<Box<term::StderrTerminal>> = term::TerminfoTerminal::new(io::stderr())
.map(|t| Box::new(t) as Box<term::StderrTerminal>)
.or_else(|| {
term::WinConsole::new(io::stderr())
.ok()
.map(|t| Box::new(t) as Box<term::StderrTerminal>)
});
match stderr {
Some(t) => Terminal(t),
None => Raw(Box::new(io::stderr())),
}
}
fn apply_style(&mut self, lvl: Level, style: Style) -> io::Result<()> {
match style {
Style::FileNameStyle | Style::LineAndColumn => {}
Style::LineNumber => {
try!(self.start_attr(term::Attr::Bold));
if cfg!(windows) {
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_CYAN)));
} else {
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_BLUE)));
}
}
Style::ErrorCode => {
try!(self.start_attr(term::Attr::Bold));
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_MAGENTA)));
}
Style::Quotation => {}
Style::OldSchoolNote => {
try!(self.start_attr(term::Attr::Bold));
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_GREEN)));
}
Style::OldSchoolNoteText | Style::HeaderMsg => {
try!(self.start_attr(term::Attr::Bold));
if cfg!(windows) {
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_WHITE)));
}
}
Style::UnderlinePrimary | Style::LabelPrimary => {
try!(self.start_attr(term::Attr::Bold));
try!(self.start_attr(term::Attr::ForegroundColor(lvl.color())));
}
Style::UnderlineSecondary |
Style::LabelSecondary => {
try!(self.start_attr(term::Attr::Bold));
if cfg!(windows) {
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_CYAN)));
} else {
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_BLUE)));
}
}
Style::NoStyle => {}
Style::Level(l) => {
try!(self.start_attr(term::Attr::Bold));
try!(self.start_attr(term::Attr::ForegroundColor(l.color())));
}
}
Ok(())
}
fn start_attr(&mut self, attr: term::Attr) -> io::Result<()> {
match *self {
Terminal(ref mut t) => {
try!(t.attr(attr));
}
BufferedTerminal(ref mut t) => {
try!(t.attr(attr));
}
Raw(_) => {}
}
Ok(())
}
fn reset_attrs(&mut self) -> io::Result<()> {
match *self {
Terminal(ref mut t) => {
try!(t.reset());
}
BufferedTerminal(ref mut t) => {
try!(t.reset());
}
Raw(_) => {}
}
Ok(())
}
}
impl Write for Destination {
fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
match *self {
Terminal(ref mut t) => t.write(bytes),
BufferedTerminal(ref mut t) => t.write(bytes),
Raw(ref mut w) => w.write(bytes),
}
}
fn flush(&mut self) -> io::Result<()> {
match *self {
Terminal(ref mut t) => t.flush(),
BufferedTerminal(ref mut t) => t.flush(),
Raw(ref mut w) => w.flush(),
}
}
}
Stray question mark
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use self::Destination::*;
use syntax_pos::{COMMAND_LINE_SP, DUMMY_SP, FileMap, Span, MultiSpan, CharPos};
use {Level, CodeSuggestion, DiagnosticBuilder, SubDiagnostic, CodeMapper};
use RenderSpan::*;
use snippet::{StyledString, Style, Annotation, Line};
use styled_buffer::StyledBuffer;
use std::io::prelude::*;
use std::io;
use std::ops;
use std::rc::Rc;
use term;
/// Emitter trait for emitting errors.
pub trait Emitter {
/// Emit a structured diagnostic.
fn emit(&mut self, db: &DiagnosticBuilder);
}
impl Emitter for EmitterWriter {
fn emit(&mut self, db: &DiagnosticBuilder) {
let mut primary_span = db.span.clone();
let mut children = db.children.clone();
self.fix_multispans_in_std_macros(&mut primary_span, &mut children);
self.emit_messages_default(&db.level, &db.message, &db.code, &primary_span, &children);
}
}
/// maximum number of lines we will print for each error; arbitrary.
pub const MAX_HIGHLIGHT_LINES: usize = 6;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ColorConfig {
Auto,
Always,
Never,
}
impl ColorConfig {
fn use_color(&self) -> bool {
match *self {
ColorConfig::Always => true,
ColorConfig::Never => false,
ColorConfig::Auto => stderr_isatty(),
}
}
}
pub struct EmitterWriter {
dst: Destination,
cm: Option<Rc<CodeMapper>>,
}
struct FileWithAnnotatedLines {
file: Rc<FileMap>,
lines: Vec<Line>,
}
/// Do not use this for messages that end in `\n` – use `println_maybe_styled` instead. See
/// `EmitterWriter::print_maybe_styled` for details.
macro_rules! print_maybe_styled {
($dst: expr, $style: expr, $($arg: tt)*) => {
$dst.print_maybe_styled(format_args!($($arg)*), $style, false)
}
}
macro_rules! println_maybe_styled {
($dst: expr, $style: expr, $($arg: tt)*) => {
$dst.print_maybe_styled(format_args!($($arg)*), $style, true)
}
}
impl EmitterWriter {
pub fn stderr(color_config: ColorConfig, code_map: Option<Rc<CodeMapper>>) -> EmitterWriter {
if color_config.use_color() {
let dst = Destination::from_stderr();
EmitterWriter {
dst: dst,
cm: code_map,
}
} else {
EmitterWriter {
dst: Raw(Box::new(io::stderr())),
cm: code_map,
}
}
}
pub fn new(dst: Box<Write + Send>, code_map: Option<Rc<CodeMapper>>) -> EmitterWriter {
EmitterWriter {
dst: Raw(dst),
cm: code_map,
}
}
fn preprocess_annotations(&self, msp: &MultiSpan) -> Vec<FileWithAnnotatedLines> {
fn add_annotation_to_file(file_vec: &mut Vec<FileWithAnnotatedLines>,
file: Rc<FileMap>,
line_index: usize,
ann: Annotation) {
for slot in file_vec.iter_mut() {
// Look through each of our files for the one we're adding to
if slot.file.name == file.name {
// See if we already have a line for it
for line_slot in &mut slot.lines {
if line_slot.line_index == line_index {
line_slot.annotations.push(ann);
return;
}
}
// We don't have a line yet, create one
slot.lines.push(Line {
line_index: line_index,
annotations: vec![ann],
});
slot.lines.sort();
return;
}
}
// This is the first time we're seeing the file
file_vec.push(FileWithAnnotatedLines {
file: file,
lines: vec![Line {
line_index: line_index,
annotations: vec![ann],
}],
});
}
let mut output = vec![];
if let Some(ref cm) = self.cm {
for span_label in msp.span_labels() {
if span_label.span == DUMMY_SP || span_label.span == COMMAND_LINE_SP {
continue;
}
let lo = cm.lookup_char_pos(span_label.span.lo);
let mut hi = cm.lookup_char_pos(span_label.span.hi);
let mut is_minimized = false;
// If the span is multi-line, simplify down to the span of one character
if lo.line != hi.line {
hi.line = lo.line;
hi.col = CharPos(lo.col.0 + 1);
is_minimized = true;
}
// Watch out for "empty spans". If we get a span like 6..6, we
// want to just display a `^` at 6, so convert that to
// 6..7. This is degenerate input, but it's best to degrade
// gracefully -- and the parser likes to supply a span like
// that for EOF, in particular.
if lo.col == hi.col {
hi.col = CharPos(lo.col.0 + 1);
}
add_annotation_to_file(&mut output,
lo.file,
lo.line,
Annotation {
start_col: lo.col.0,
end_col: hi.col.0,
is_primary: span_label.is_primary,
is_minimized: is_minimized,
label: span_label.label.clone(),
});
}
}
output
}
fn render_source_line(&self,
buffer: &mut StyledBuffer,
file: Rc<FileMap>,
line: &Line,
width_offset: usize) {
let source_string = file.get_line(line.line_index - 1)
.unwrap_or("");
let line_offset = buffer.num_lines();
// First create the source line we will highlight.
buffer.puts(line_offset, width_offset, &source_string, Style::Quotation);
buffer.puts(line_offset,
0,
&(line.line_index.to_string()),
Style::LineNumber);
draw_col_separator(buffer, line_offset, width_offset - 2);
if line.annotations.is_empty() {
return;
}
// We want to display like this:
//
// vec.push(vec.pop().unwrap());
// --- ^^^ _ previous borrow ends here
// | |
// | error occurs here
// previous borrow of `vec` occurs here
//
// But there are some weird edge cases to be aware of:
//
// vec.push(vec.pop().unwrap());
// -------- - previous borrow ends here
// ||
// |this makes no sense
// previous borrow of `vec` occurs here
//
// For this reason, we group the lines into "highlight lines"
// and "annotations lines", where the highlight lines have the `~`.
// Sort the annotations by (start, end col)
let mut annotations = line.annotations.clone();
annotations.sort();
// Next, create the highlight line.
for annotation in &annotations {
for p in annotation.start_col..annotation.end_col {
if annotation.is_primary {
buffer.putc(line_offset + 1,
width_offset + p,
'^',
Style::UnderlinePrimary);
if !annotation.is_minimized {
buffer.set_style(line_offset, width_offset + p, Style::UnderlinePrimary);
}
} else {
buffer.putc(line_offset + 1,
width_offset + p,
'-',
Style::UnderlineSecondary);
if !annotation.is_minimized {
buffer.set_style(line_offset, width_offset + p, Style::UnderlineSecondary);
}
}
}
}
draw_col_separator(buffer, line_offset + 1, width_offset - 2);
// Now we are going to write labels in. To start, we'll exclude
// the annotations with no labels.
let (labeled_annotations, unlabeled_annotations): (Vec<_>, _) = annotations.into_iter()
.partition(|a| a.label.is_some());
// If there are no annotations that need text, we're done.
if labeled_annotations.is_empty() {
return;
}
// Now add the text labels. We try, when possible, to stick the rightmost
// annotation at the end of the highlight line:
//
// vec.push(vec.pop().unwrap());
// --- --- - previous borrow ends here
//
// But sometimes that's not possible because one of the other
// annotations overlaps it. For example, from the test
// `span_overlap_label`, we have the following annotations
// (written on distinct lines for clarity):
//
// fn foo(x: u32) {
// --------------
// -
//
// In this case, we can't stick the rightmost-most label on
// the highlight line, or we would get:
//
// fn foo(x: u32) {
// -------- x_span
// |
// fn_span
//
// which is totally weird. Instead we want:
//
// fn foo(x: u32) {
// --------------
// | |
// | x_span
// fn_span
//
// which is...less weird, at least. In fact, in general, if
// the rightmost span overlaps with any other span, we should
// use the "hang below" version, so we can at least make it
// clear where the span *starts*.
let mut labeled_annotations = &labeled_annotations[..];
match labeled_annotations.split_last().unwrap() {
(last, previous) => {
if previous.iter()
.chain(&unlabeled_annotations)
.all(|a| !overlaps(a, last)) {
// append the label afterwards; we keep it in a separate
// string
let highlight_label: String = format!(" {}", last.label.as_ref().unwrap());
if last.is_primary {
buffer.append(line_offset + 1, &highlight_label, Style::LabelPrimary);
} else {
buffer.append(line_offset + 1, &highlight_label, Style::LabelSecondary);
}
labeled_annotations = previous;
}
}
}
// If that's the last annotation, we're done
if labeled_annotations.is_empty() {
return;
}
for (index, annotation) in labeled_annotations.iter().enumerate() {
// Leave:
// - 1 extra line
// - One line for each thing that comes after
let comes_after = labeled_annotations.len() - index - 1;
let blank_lines = 3 + comes_after;
// For each blank line, draw a `|` at our column. The
// text ought to be long enough for this.
for index in 2..blank_lines {
if annotation.is_primary {
buffer.putc(line_offset + index,
width_offset + annotation.start_col,
'|',
Style::UnderlinePrimary);
} else {
buffer.putc(line_offset + index,
width_offset + annotation.start_col,
'|',
Style::UnderlineSecondary);
}
draw_col_separator(buffer, line_offset + index, width_offset - 2);
}
if annotation.is_primary {
buffer.puts(line_offset + blank_lines,
width_offset + annotation.start_col,
annotation.label.as_ref().unwrap(),
Style::LabelPrimary);
} else {
buffer.puts(line_offset + blank_lines,
width_offset + annotation.start_col,
annotation.label.as_ref().unwrap(),
Style::LabelSecondary);
}
draw_col_separator(buffer, line_offset + blank_lines, width_offset - 2);
}
}
fn get_multispan_max_line_num(&mut self, msp: &MultiSpan) -> usize {
let mut max = 0;
if let Some(ref cm) = self.cm {
for primary_span in msp.primary_spans() {
if primary_span != &DUMMY_SP && primary_span != &COMMAND_LINE_SP {
let hi = cm.lookup_char_pos(primary_span.hi);
if hi.line > max {
max = hi.line;
}
}
}
for span_label in msp.span_labels() {
if span_label.span != DUMMY_SP && span_label.span != COMMAND_LINE_SP {
let hi = cm.lookup_char_pos(span_label.span.hi);
if hi.line > max {
max = hi.line;
}
}
}
}
max
}
fn get_max_line_num(&mut self, span: &MultiSpan, children: &Vec<SubDiagnostic>) -> usize {
let mut max = 0;
let primary = self.get_multispan_max_line_num(span);
max = if primary > max { primary } else { max };
for sub in children {
let sub_result = self.get_multispan_max_line_num(&sub.span);
max = if sub_result > max { primary } else { max };
}
max
}
// This "fixes" MultiSpans that contain Spans that are pointing to locations inside of
// <*macros>. Since these locations are often difficult to read, we move these Spans from
// <*macros> to their corresponding use site.
fn fix_multispan_in_std_macros(&mut self, span: &mut MultiSpan) -> bool {
let mut spans_updated = false;
if let Some(ref cm) = self.cm {
let mut before_after: Vec<(Span, Span)> = vec![];
let mut new_labels: Vec<(Span, String)> = vec![];
// First, find all the spans in <*macros> and point instead at their use site
for sp in span.primary_spans() {
if (*sp == COMMAND_LINE_SP) || (*sp == DUMMY_SP) {
continue;
}
if cm.span_to_filename(sp.clone()).contains("macros>") {
let v = cm.macro_backtrace(sp.clone());
if let Some(use_site) = v.last() {
before_after.push((sp.clone(), use_site.call_site.clone()));
}
}
for trace in cm.macro_backtrace(sp.clone()).iter().rev() {
// Only show macro locations that are local
// and display them like a span_note
if let Some(def_site) = trace.def_site_span {
if (def_site == COMMAND_LINE_SP) || (def_site == DUMMY_SP) {
continue;
}
// Check to make sure we're not in any <*macros>
if !cm.span_to_filename(def_site).contains("macros>") &&
!trace.macro_decl_name.starts_with("#[") {
new_labels.push((trace.call_site,
"in this macro invocation".to_string()));
break;
}
}
}
}
for (label_span, label_text) in new_labels {
span.push_span_label(label_span, label_text);
}
for sp_label in span.span_labels() {
if (sp_label.span == COMMAND_LINE_SP) || (sp_label.span == DUMMY_SP) {
continue;
}
if cm.span_to_filename(sp_label.span.clone()).contains("macros>") {
let v = cm.macro_backtrace(sp_label.span.clone());
if let Some(use_site) = v.last() {
before_after.push((sp_label.span.clone(), use_site.call_site.clone()));
}
}
}
// After we have them, make sure we replace these 'bad' def sites with their use sites
for (before, after) in before_after {
span.replace(before, after);
spans_updated = true;
}
}
spans_updated
}
// This does a small "fix" for multispans by looking to see if it can find any that
// point directly at <*macros>. Since these are often difficult to read, this
// will change the span to point at the use site.
fn fix_multispans_in_std_macros(&mut self,
span: &mut MultiSpan,
children: &mut Vec<SubDiagnostic>) {
let mut spans_updated = self.fix_multispan_in_std_macros(span);
for child in children.iter_mut() {
spans_updated |= self.fix_multispan_in_std_macros(&mut child.span);
}
if spans_updated {
children.push(SubDiagnostic {
level: Level::Note,
message: "this error originates in a macro outside of the current crate"
.to_string(),
span: MultiSpan::new(),
render_span: None,
});
}
}
fn emit_message_default(&mut self,
msp: &MultiSpan,
msg: &str,
code: &Option<String>,
level: &Level,
max_line_num_len: usize,
is_secondary: bool)
-> io::Result<()> {
let mut buffer = StyledBuffer::new();
if msp.primary_spans().is_empty() && msp.span_labels().is_empty() && is_secondary {
// This is a secondary message with no span info
for _ in 0..max_line_num_len {
buffer.prepend(0, " ", Style::NoStyle);
}
draw_note_separator(&mut buffer, 0, max_line_num_len + 1);
buffer.append(0, &level.to_string(), Style::HeaderMsg);
buffer.append(0, ": ", Style::NoStyle);
buffer.append(0, msg, Style::NoStyle);
} else {
buffer.append(0, &level.to_string(), Style::Level(level.clone()));
match code {
&Some(ref code) => {
buffer.append(0, "[", Style::Level(level.clone()));
buffer.append(0, &code, Style::Level(level.clone()));
buffer.append(0, "]", Style::Level(level.clone()));
}
_ => {}
}
buffer.append(0, ": ", Style::HeaderMsg);
buffer.append(0, msg, Style::HeaderMsg);
}
// Preprocess all the annotations so that they are grouped by file and by line number
// This helps us quickly iterate over the whole message (including secondary file spans)
let mut annotated_files = self.preprocess_annotations(msp);
// Make sure our primary file comes first
let primary_lo = if let (Some(ref cm), Some(ref primary_span)) =
(self.cm.as_ref(), msp.primary_span().as_ref()) {
if primary_span != &&DUMMY_SP && primary_span != &&COMMAND_LINE_SP {
cm.lookup_char_pos(primary_span.lo)
} else {
try!(emit_to_destination(&buffer.render(), level, &mut self.dst));
return Ok(());
}
} else {
// If we don't have span information, emit and exit
try!(emit_to_destination(&buffer.render(), level, &mut self.dst));
return Ok(());
};
if let Ok(pos) =
annotated_files.binary_search_by(|x| x.file.name.cmp(&primary_lo.file.name)) {
annotated_files.swap(0, pos);
}
// Print out the annotate source lines that correspond with the error
for annotated_file in annotated_files {
// print out the span location and spacer before we print the annotated source
// to do this, we need to know if this span will be primary
let is_primary = primary_lo.file.name == annotated_file.file.name;
if is_primary {
// remember where we are in the output buffer for easy reference
let buffer_msg_line_offset = buffer.num_lines();
buffer.prepend(buffer_msg_line_offset, "--> ", Style::LineNumber);
let loc = primary_lo.clone();
buffer.append(buffer_msg_line_offset,
&format!("{}:{}:{}", loc.file.name, loc.line, loc.col.0 + 1),
Style::LineAndColumn);
for _ in 0..max_line_num_len {
buffer.prepend(buffer_msg_line_offset, " ", Style::NoStyle);
}
} else {
// remember where we are in the output buffer for easy reference
let buffer_msg_line_offset = buffer.num_lines();
// Add spacing line
draw_col_separator(&mut buffer, buffer_msg_line_offset, max_line_num_len + 1);
// Then, the secondary file indicator
buffer.prepend(buffer_msg_line_offset + 1, "::: ", Style::LineNumber);
buffer.append(buffer_msg_line_offset + 1,
&annotated_file.file.name,
Style::LineAndColumn);
for _ in 0..max_line_num_len {
buffer.prepend(buffer_msg_line_offset + 1, " ", Style::NoStyle);
}
}
// Put in the spacer between the location and annotated source
let buffer_msg_line_offset = buffer.num_lines();
draw_col_separator_no_space(&mut buffer, buffer_msg_line_offset, max_line_num_len + 1);
// Next, output the annotate source for this file
for line_idx in 0..annotated_file.lines.len() {
self.render_source_line(&mut buffer,
annotated_file.file.clone(),
&annotated_file.lines[line_idx],
3 + max_line_num_len);
// check to see if we need to print out or elide lines that come between
// this annotated line and the next one
if line_idx < (annotated_file.lines.len() - 1) {
let line_idx_delta = annotated_file.lines[line_idx + 1].line_index -
annotated_file.lines[line_idx].line_index;
if line_idx_delta > 2 {
let last_buffer_line_num = buffer.num_lines();
buffer.puts(last_buffer_line_num, 0, "...", Style::LineNumber);
} else if line_idx_delta == 2 {
let unannotated_line = annotated_file.file
.get_line(annotated_file.lines[line_idx].line_index)
.unwrap_or("");
let last_buffer_line_num = buffer.num_lines();
buffer.puts(last_buffer_line_num,
0,
&(annotated_file.lines[line_idx + 1].line_index - 1)
.to_string(),
Style::LineNumber);
draw_col_separator(&mut buffer, last_buffer_line_num, 1 + max_line_num_len);
buffer.puts(last_buffer_line_num,
3 + max_line_num_len,
&unannotated_line,
Style::Quotation);
}
}
}
}
// final step: take our styled buffer, render it, then output it
try!(emit_to_destination(&buffer.render(), level, &mut self.dst));
Ok(())
}
fn emit_suggestion_default(&mut self,
suggestion: &CodeSuggestion,
level: &Level,
msg: &str,
max_line_num_len: usize)
-> io::Result<()> {
use std::borrow::Borrow;
let primary_span = suggestion.msp.primary_span().unwrap();
if let Some(ref cm) = self.cm {
let mut buffer = StyledBuffer::new();
buffer.append(0, &level.to_string(), Style::Level(level.clone()));
buffer.append(0, ": ", Style::HeaderMsg);
buffer.append(0, msg, Style::HeaderMsg);
let lines = cm.span_to_lines(primary_span).unwrap();
assert!(!lines.lines.is_empty());
let complete = suggestion.splice_lines(cm.borrow());
// print the suggestion without any line numbers, but leave
// space for them. This helps with lining up with previous
// snippets from the actual error being reported.
let mut lines = complete.lines();
let mut row_num = 1;
for line in lines.by_ref().take(MAX_HIGHLIGHT_LINES) {
draw_col_separator(&mut buffer, row_num, max_line_num_len + 1);
buffer.append(row_num, line, Style::NoStyle);
row_num += 1;
}
// if we elided some lines, add an ellipsis
if let Some(_) = lines.next() {
buffer.append(row_num, "...", Style::NoStyle);
}
try!(emit_to_destination(&buffer.render(), level, &mut self.dst));
}
Ok(())
}
fn emit_messages_default(&mut self,
level: &Level,
message: &String,
code: &Option<String>,
span: &MultiSpan,
children: &Vec<SubDiagnostic>) {
let max_line_num = self.get_max_line_num(span, children);
let max_line_num_len = max_line_num.to_string().len();
match self.emit_message_default(span, message, code, level, max_line_num_len, false) {
Ok(()) => {
if !children.is_empty() {
let mut buffer = StyledBuffer::new();
draw_col_separator_no_space(&mut buffer, 0, max_line_num_len + 1);
match emit_to_destination(&buffer.render(), level, &mut self.dst) {
Ok(()) => (),
Err(e) => panic!("failed to emit error: {}", e)
}
}
for child in children {
match child.render_span {
Some(FullSpan(ref msp)) => {
match self.emit_message_default(msp,
&child.message,
&None,
&child.level,
max_line_num_len,
true) {
Err(e) => panic!("failed to emit error: {}", e),
_ => ()
}
},
Some(Suggestion(ref cs)) => {
match self.emit_suggestion_default(cs,
&child.level,
&child.message,
max_line_num_len) {
Err(e) => panic!("failed to emit error: {}", e),
_ => ()
}
},
None => {
match self.emit_message_default(&child.span,
&child.message,
&None,
&child.level,
max_line_num_len,
true) {
Err(e) => panic!("failed to emit error: {}", e),
_ => ()
}
}
}
}
}
Err(e) => panic!("failed to emit error: {}", e),
}
match write!(&mut self.dst, "\n") {
Err(e) => panic!("failed to emit error: {}", e),
_ => {
match self.dst.flush() {
Err(e) => panic!("failed to emit error: {}", e),
_ => (),
}
}
}
}
}
fn draw_col_separator(buffer: &mut StyledBuffer, line: usize, col: usize) {
buffer.puts(line, col, "| ", Style::LineNumber);
}
fn draw_col_separator_no_space(buffer: &mut StyledBuffer, line: usize, col: usize) {
buffer.puts(line, col, "|", Style::LineNumber);
}
fn draw_note_separator(buffer: &mut StyledBuffer, line: usize, col: usize) {
buffer.puts(line, col, "= ", Style::LineNumber);
}
fn overlaps(a1: &Annotation, a2: &Annotation) -> bool {
(a2.start_col..a2.end_col).syntex_contains(a1.start_col) ||
(a1.start_col..a1.end_col).syntex_contains(a2.start_col)
}
trait SyntexContains<Idx> {
fn syntex_contains(&self, item: Idx) -> bool;
}
impl<Idx> SyntexContains<Idx> for ops::Range<Idx> where Idx: PartialOrd {
fn syntex_contains(&self, item: Idx) -> bool {
(self.start <= item) && (item < self.end)
}
}
fn emit_to_destination(rendered_buffer: &Vec<Vec<StyledString>>,
lvl: &Level,
dst: &mut Destination)
-> io::Result<()> {
use lock;
// In order to prevent error message interleaving, where multiple error lines get intermixed
// when multiple compiler processes error simultaneously, we emit errors with additional
// steps.
//
// On Unix systems, we write into a buffered terminal rather than directly to a terminal. When
// the .flush() is called we take the buffer created from the buffered writes and write it at
// one shot. Because the Unix systems use ANSI for the colors, which is a text-based styling
// scheme, this buffered approach works and maintains the styling.
//
// On Windows, styling happens through calls to a terminal API. This prevents us from using the
// same buffering approach. Instead, we use a global Windows mutex, which we acquire long
// enough to output the full error message, then we release.
let _buffer_lock = lock::acquire_global_lock("rustc_errors");
for line in rendered_buffer {
for part in line {
try!(dst.apply_style(lvl.clone(), part.style));
try!(write!(dst, "{}", part.text));
try!(dst.reset_attrs());
}
try!(write!(dst, "\n"));
}
try!(dst.flush());
Ok(())
}
#[cfg(unix)]
fn stderr_isatty() -> bool {
use libc;
unsafe { libc::isatty(libc::STDERR_FILENO) != 0 }
}
#[cfg(windows)]
fn stderr_isatty() -> bool {
type DWORD = u32;
type BOOL = i32;
type HANDLE = *mut u8;
const STD_ERROR_HANDLE: DWORD = -12i32 as DWORD;
extern "system" {
fn GetStdHandle(which: DWORD) -> HANDLE;
fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: *mut DWORD) -> BOOL;
}
unsafe {
let handle = GetStdHandle(STD_ERROR_HANDLE);
let mut out = 0;
GetConsoleMode(handle, &mut out) != 0
}
}
pub type BufferedStderr = term::Terminal<Output = BufferedWriter> + Send;
pub enum Destination {
Terminal(Box<term::StderrTerminal>),
BufferedTerminal(Box<BufferedStderr>),
Raw(Box<Write + Send>),
}
/// Buffered writer gives us a way on Unix to buffer up an entire error message before we output
/// it. This helps to prevent interleaving of multiple error messages when multiple compiler
/// processes error simultaneously
pub struct BufferedWriter {
buffer: Vec<u8>,
}
impl BufferedWriter {
// note: we use _new because the conditional compilation at its use site may make this
// this function unused on some platforms
fn _new() -> BufferedWriter {
BufferedWriter { buffer: vec![] }
}
}
impl Write for BufferedWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
for b in buf {
self.buffer.push(*b);
}
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
let mut stderr = io::stderr();
let result = (|| {
try!(stderr.write_all(&self.buffer));
stderr.flush()
})();
self.buffer.clear();
result
}
}
impl Destination {
#[cfg(not(windows))]
/// When not on Windows, prefer the buffered terminal so that we can buffer an entire error
/// to be emitted at one time.
fn from_stderr() -> Destination {
let stderr: Option<Box<BufferedStderr>> =
term::TerminfoTerminal::new(BufferedWriter::_new())
.map(|t| Box::new(t) as Box<BufferedStderr>);
match stderr {
Some(t) => BufferedTerminal(t),
None => Raw(Box::new(io::stderr())),
}
}
#[cfg(windows)]
/// Return a normal, unbuffered terminal when on Windows.
fn from_stderr() -> Destination {
let stderr: Option<Box<term::StderrTerminal>> = term::TerminfoTerminal::new(io::stderr())
.map(|t| Box::new(t) as Box<term::StderrTerminal>)
.or_else(|| {
term::WinConsole::new(io::stderr())
.ok()
.map(|t| Box::new(t) as Box<term::StderrTerminal>)
});
match stderr {
Some(t) => Terminal(t),
None => Raw(Box::new(io::stderr())),
}
}
fn apply_style(&mut self, lvl: Level, style: Style) -> io::Result<()> {
match style {
Style::FileNameStyle | Style::LineAndColumn => {}
Style::LineNumber => {
try!(self.start_attr(term::Attr::Bold));
if cfg!(windows) {
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_CYAN)));
} else {
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_BLUE)));
}
}
Style::ErrorCode => {
try!(self.start_attr(term::Attr::Bold));
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_MAGENTA)));
}
Style::Quotation => {}
Style::OldSchoolNote => {
try!(self.start_attr(term::Attr::Bold));
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_GREEN)));
}
Style::OldSchoolNoteText | Style::HeaderMsg => {
try!(self.start_attr(term::Attr::Bold));
if cfg!(windows) {
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_WHITE)));
}
}
Style::UnderlinePrimary | Style::LabelPrimary => {
try!(self.start_attr(term::Attr::Bold));
try!(self.start_attr(term::Attr::ForegroundColor(lvl.color())));
}
Style::UnderlineSecondary |
Style::LabelSecondary => {
try!(self.start_attr(term::Attr::Bold));
if cfg!(windows) {
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_CYAN)));
} else {
try!(self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_BLUE)));
}
}
Style::NoStyle => {}
Style::Level(l) => {
try!(self.start_attr(term::Attr::Bold));
try!(self.start_attr(term::Attr::ForegroundColor(l.color())));
}
}
Ok(())
}
fn start_attr(&mut self, attr: term::Attr) -> io::Result<()> {
match *self {
Terminal(ref mut t) => {
try!(t.attr(attr));
}
BufferedTerminal(ref mut t) => {
try!(t.attr(attr));
}
Raw(_) => {}
}
Ok(())
}
fn reset_attrs(&mut self) -> io::Result<()> {
match *self {
Terminal(ref mut t) => {
try!(t.reset());
}
BufferedTerminal(ref mut t) => {
try!(t.reset());
}
Raw(_) => {}
}
Ok(())
}
}
impl Write for Destination {
fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
match *self {
Terminal(ref mut t) => t.write(bytes),
BufferedTerminal(ref mut t) => t.write(bytes),
Raw(ref mut w) => w.write(bytes),
}
}
fn flush(&mut self) -> io::Result<()> {
match *self {
Terminal(ref mut t) => t.flush(),
BufferedTerminal(ref mut t) => t.flush(),
Raw(ref mut w) => w.flush(),
}
}
}
|
use libc;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::ptr;
use std::rc::Rc;
use allowed_url::AllowedUrl;
use handle::{self, RsvgHandle};
use node::{Node, RsvgNode};
use util::utf8_cstr;
pub enum RsvgDefs {}
pub struct Defs {
nodes: HashMap<String, Rc<Node>>,
externs: HashMap<String, *const RsvgHandle>,
}
impl Defs {
pub fn new() -> Defs {
Defs {
nodes: Default::default(),
externs: Default::default(),
}
}
pub fn insert(&mut self, id: &str, node: &Rc<Node>) {
self.nodes.entry(id.to_string()).or_insert(node.clone());
}
/// Returns a node from an URI reference, or `None`
///
/// This may return a node within the same RSVG handle, or a node in a secondary RSVG
/// handle that is referenced by the current one. If the element's id is not found,
/// returns `None`.
pub fn lookup(&mut self, handle: *const RsvgHandle, name: &str) -> Option<&Rc<Node>> {
if let Ok(reference) = Reference::parse(name) {
match reference {
Reference::PlainUri(_) => None,
Reference::FragmentId(fragment) => self.nodes.get(fragment),
Reference::UriWithFragmentId(href, fragment) => {
match self.get_extern_handle(handle, href) {
Ok(extern_handle) => handle::get_defs(extern_handle).nodes.get(fragment),
Err(()) => None,
}
}
}
} else {
None
}
}
fn get_extern_handle(
&mut self,
handle: *const RsvgHandle,
href: &str,
) -> Result<*const RsvgHandle, ()> {
let aurl =
AllowedUrl::from_href(href, handle::get_base_url(handle).as_ref()).map_err(|_| ())?;
match self.externs.entry(aurl.url().as_str().to_string()) {
Entry::Occupied(e) => Ok(*(e.get())),
Entry::Vacant(e) => {
let extern_handle = handle::load_extern(handle, e.key())?;
e.insert(extern_handle);
Ok(extern_handle)
}
}
}
}
/// Represents a possibly non-canonical URI with an optional fragment identifier
///
/// Sometimes in SVG element references (e.g. the `href` in the `<feImage>` element) we
/// must decide between referencing an external file, or using a plain fragment identifier
/// like `href="#foo"` as a reference to an SVG element in the same file as the one being
/// processes. This enum makes that distinction.
#[derive(Debug, PartialEq)]
pub enum Reference<'a> {
PlainUri(&'a str),
FragmentId(&'a str),
UriWithFragmentId(&'a str, &'a str),
}
impl<'a> Reference<'a> {
pub fn parse(s: &str) -> Result<Reference, ()> {
let (uri, fragment) = match s.rfind('#') {
None => (Some(s), None),
Some(p) if p == 0 => (None, Some(&s[1..])),
Some(p) => (Some(&s[..p]), Some(&s[(p + 1)..])),
};
match (uri, fragment) {
(None, Some(f)) if f.len() == 0 => Err(()),
(None, Some(f)) => Ok(Reference::FragmentId(f)),
(Some(u), _) if u.len() == 0 => Err(()),
(Some(u), None) => Ok(Reference::PlainUri(u)),
(Some(_u), Some(f)) if f.len() == 0 => Err(()),
(Some(u), Some(f)) => Ok(Reference::UriWithFragmentId(u, f)),
(_, _) => Err(()),
}
}
}
#[no_mangle]
pub extern "C" fn rsvg_defs_free(defs: *mut RsvgDefs) {
assert!(!defs.is_null());
unsafe {
let defs = { &mut *(defs as *mut Defs) };
Box::from_raw(defs);
}
}
#[no_mangle]
pub extern "C" fn rsvg_defs_lookup(
defs: *mut RsvgDefs,
handle: *const RsvgHandle,
name: *const libc::c_char,
) -> *const RsvgNode {
assert!(!defs.is_null());
assert!(!name.is_null());
let defs = unsafe { &mut *(defs as *mut Defs) };
let name = unsafe { utf8_cstr(name) };
match defs.lookup(handle, name) {
Some(n) => n as *const RsvgNode,
None => ptr::null(),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reference_kinds() {
assert_eq!(Reference::parse("uri"), Ok(Reference::PlainUri("uri")));
assert_eq!(
Reference::parse("#fragment"),
Ok(Reference::FragmentId("fragment"))
);
assert_eq!(
Reference::parse("uri#fragment"),
Ok(Reference::UriWithFragmentId("uri", "fragment"))
);
}
#[test]
fn reference_errors() {
assert!(Reference::parse("").is_err());
assert!(Reference::parse("#").is_err());
assert!(Reference::parse("uri#").is_err());
}
}
Reference: store owned Strings internally, not string slices
We will move to callers owning the Reference, instead of creating it
implicitly through Defs::lookup().
use libc;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::ptr;
use std::rc::Rc;
use allowed_url::AllowedUrl;
use handle::{self, RsvgHandle};
use node::{Node, RsvgNode};
use util::utf8_cstr;
pub enum RsvgDefs {}
pub struct Defs {
nodes: HashMap<String, Rc<Node>>,
externs: HashMap<String, *const RsvgHandle>,
}
impl Defs {
pub fn new() -> Defs {
Defs {
nodes: Default::default(),
externs: Default::default(),
}
}
pub fn insert(&mut self, id: &str, node: &Rc<Node>) {
self.nodes.entry(id.to_string()).or_insert(node.clone());
}
/// Returns a node from an URI reference, or `None`
///
/// This may return a node within the same RSVG handle, or a node in a secondary RSVG
/// handle that is referenced by the current one. If the element's id is not found,
/// returns `None`.
pub fn lookup(&mut self, handle: *const RsvgHandle, name: &str) -> Option<&Rc<Node>> {
if let Ok(reference) = Reference::parse(name) {
match reference {
Reference::PlainUri(_) => None,
Reference::FragmentId(fragment) => self.nodes.get(&fragment),
Reference::UriWithFragmentId(href, fragment) => {
match self.get_extern_handle(handle, &href) {
Ok(extern_handle) => handle::get_defs(extern_handle).nodes.get(&fragment),
Err(()) => None,
}
}
}
} else {
None
}
}
fn get_extern_handle(
&mut self,
handle: *const RsvgHandle,
href: &str,
) -> Result<*const RsvgHandle, ()> {
let aurl =
AllowedUrl::from_href(href, handle::get_base_url(handle).as_ref()).map_err(|_| ())?;
match self.externs.entry(aurl.url().as_str().to_string()) {
Entry::Occupied(e) => Ok(*(e.get())),
Entry::Vacant(e) => {
let extern_handle = handle::load_extern(handle, e.key())?;
e.insert(extern_handle);
Ok(extern_handle)
}
}
}
}
/// Represents a possibly non-canonical URI with an optional fragment identifier
///
/// Sometimes in SVG element references (e.g. the `href` in the `<feImage>` element) we
/// must decide between referencing an external file, or using a plain fragment identifier
/// like `href="#foo"` as a reference to an SVG element in the same file as the one being
/// processes. This enum makes that distinction.
#[derive(Debug, PartialEq)]
pub enum Reference {
PlainUri(String),
FragmentId(String),
UriWithFragmentId(String, String),
}
impl Reference {
pub fn parse(s: &str) -> Result<Reference, ()> {
let (uri, fragment) = match s.rfind('#') {
None => (Some(s), None),
Some(p) if p == 0 => (None, Some(&s[1..])),
Some(p) => (Some(&s[..p]), Some(&s[(p + 1)..])),
};
match (uri, fragment) {
(None, Some(f)) if f.len() == 0 => Err(()),
(None, Some(f)) => Ok(Reference::FragmentId(f.to_string())),
(Some(u), _) if u.len() == 0 => Err(()),
(Some(u), None) => Ok(Reference::PlainUri(u.to_string())),
(Some(_u), Some(f)) if f.len() == 0 => Err(()),
(Some(u), Some(f)) => Ok(Reference::UriWithFragmentId(u.to_string(), f.to_string())),
(_, _) => Err(()),
}
}
}
#[no_mangle]
pub extern "C" fn rsvg_defs_free(defs: *mut RsvgDefs) {
assert!(!defs.is_null());
unsafe {
let defs = { &mut *(defs as *mut Defs) };
Box::from_raw(defs);
}
}
#[no_mangle]
pub extern "C" fn rsvg_defs_lookup(
defs: *mut RsvgDefs,
handle: *const RsvgHandle,
name: *const libc::c_char,
) -> *const RsvgNode {
assert!(!defs.is_null());
assert!(!name.is_null());
let defs = unsafe { &mut *(defs as *mut Defs) };
let name = unsafe { utf8_cstr(name) };
match defs.lookup(handle, name) {
Some(n) => n as *const RsvgNode,
None => ptr::null(),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reference_kinds() {
assert_eq!(Reference::parse("uri"), Ok(Reference::PlainUri("uri".to_string())));
assert_eq!(
Reference::parse("#fragment"),
Ok(Reference::FragmentId("fragment".to_string()))
);
assert_eq!(
Reference::parse("uri#fragment"),
Ok(Reference::UriWithFragmentId("uri".to_string(), "fragment".to_string()))
);
}
#[test]
fn reference_errors() {
assert!(Reference::parse("").is_err());
assert!(Reference::parse("#").is_err());
assert!(Reference::parse("uri#").is_err());
}
}
|
use pango::{self, ContextExt, LayoutExt};
use std::cell::{Cell, RefCell};
use attributes::Attribute;
use defs::Fragment;
use drawing_ctx::DrawingCtx;
use error::{AttributeResultExt, RenderingError};
use font_props::FontWeightSpec;
use handle::RsvgHandle;
use length::*;
use node::{CascadedValues, NodeResult, NodeTrait, NodeType, RsvgNode};
use parsers::parse;
use property_bag::PropertyBag;
use space::{xml_space_normalize, NormalizeDefault, XmlSpaceNormalize};
use state::{
ComputedValues,
Direction,
FontStretch,
FontStyle,
FontVariant,
TextAnchor,
UnicodeBidi,
WritingMode,
XmlSpace,
};
/// In SVG text elements, we use `NodeChars` to store character data. For example,
/// an element like `<text>Foo Bar</text>` will be a `NodeText` with a single child,
/// and the child will be a `NodeChars` with "Foo Bar" for its contents.
///
/// Text elements can contain `<tspan>` sub-elements. In this case,
/// those `tspan` nodes will also contain `NodeChars` children.
///
/// A text or tspan element can contain more than one `NodeChars` child, for example,
/// if there is an XML comment that splits the character contents in two:
///
/// ```xml
/// <text>
/// This sentence will create a NodeChars.
/// <!-- this comment is ignored -->
/// This sentence will cretea another NodeChars.
/// </text>
/// ```
///
/// When rendering a text element, it will take care of concatenating the strings
/// in its `NodeChars` children as appropriate, depending on the
/// `xml:space="preserve"` attribute. A `NodeChars` stores the characters verbatim
/// as they come out of the XML parser, after ensuring that they are valid UTF-8.
pub struct NodeChars {
string: RefCell<String>,
space_normalized: RefCell<Option<String>>,
}
impl NodeChars {
pub fn new() -> NodeChars {
NodeChars {
string: RefCell::new(String::new()),
space_normalized: RefCell::new(None),
}
}
pub fn get_string(&self) -> String {
self.string.borrow().clone()
}
pub fn append(&self, s: &str) {
self.string.borrow_mut().push_str(s);
*self.space_normalized.borrow_mut() = None;
}
fn ensure_normalized_string(&self, node: &RsvgNode, values: &ComputedValues) {
let mut normalized = self.space_normalized.borrow_mut();
if (*normalized).is_none() {
let mode = match values.xml_space {
XmlSpace::Default => XmlSpaceNormalize::Default(NormalizeDefault {
has_element_before: node.has_previous_sibling(),
has_element_after: node.has_next_sibling(),
}),
XmlSpace::Preserve => XmlSpaceNormalize::Preserve,
};
*normalized = Some(xml_space_normalize(mode, &self.string.borrow()));
}
}
fn create_layout(
&self,
node: &RsvgNode,
values: &ComputedValues,
draw_ctx: &DrawingCtx,
) -> pango::Layout {
self.ensure_normalized_string(node, values);
let norm = self.space_normalized.borrow();
let s = norm.as_ref().unwrap();
create_pango_layout(draw_ctx, values, &s)
}
fn measure(
&self,
node: &RsvgNode,
values: &ComputedValues,
draw_ctx: &DrawingCtx,
length: &mut f64,
) {
let layout = self.create_layout(node, values, draw_ctx);
let (width, _) = layout.get_size();
*length += f64::from(width) / f64::from(pango::SCALE);
}
fn render(
&self,
node: &RsvgNode,
values: &ComputedValues,
draw_ctx: &mut DrawingCtx,
x: &mut f64,
y: &mut f64,
clipping: bool,
) -> Result<(), RenderingError> {
let layout = self.create_layout(node, values, draw_ctx);
let (width, _) = layout.get_size();
let baseline = f64::from(layout.get_baseline()) / f64::from(pango::SCALE);
let offset = baseline
+ values
.baseline_shift
.0
.normalize(values, &draw_ctx.get_view_params());
if values.text_gravity_is_vertical() {
draw_ctx.draw_pango_layout(&layout, values, *x + offset, *y, clipping)?;
*y += f64::from(width) / f64::from(pango::SCALE);
} else {
draw_ctx.draw_pango_layout(&layout, values, *x, *y - offset, clipping)?;
*x += f64::from(width) / f64::from(pango::SCALE);
}
Ok(())
}
}
impl NodeTrait for NodeChars {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, _: &PropertyBag<'_>) -> NodeResult {
Ok(())
}
}
pub struct NodeText {
x: Cell<Length>,
y: Cell<Length>,
dx: Cell<Length>,
dy: Cell<Length>,
}
impl NodeText {
pub fn new() -> NodeText {
NodeText {
x: Cell::new(Length::default()),
y: Cell::new(Length::default()),
dx: Cell::new(Length::default()),
dy: Cell::new(Length::default()),
}
}
}
impl NodeTrait for NodeText {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag<'_>) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::X => self.x.set(parse("x", value, LengthDir::Horizontal)?),
Attribute::Y => self.y.set(parse("y", value, LengthDir::Vertical)?),
Attribute::Dx => self.dx.set(parse("dx", value, LengthDir::Horizontal)?),
Attribute::Dy => self.dy.set(parse("dy", value, LengthDir::Vertical)?),
_ => (),
}
}
Ok(())
}
fn accept_chars(&self) -> bool {
true
}
fn draw(
&self,
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
clipping: bool,
) -> Result<(), RenderingError> {
let values = cascaded.get();
let params = draw_ctx.get_view_params();
let mut x = self.x.get().normalize(values, ¶ms);
let mut y = self.y.get().normalize(values, ¶ms);
let mut dx = self.dx.get().normalize(values, ¶ms);
let mut dy = self.dy.get().normalize(values, ¶ms);
let anchor = values.text_anchor;
let offset = anchor_offset(node, cascaded, draw_ctx, anchor, false);
if values.text_gravity_is_vertical() {
y -= offset;
dy = match anchor {
TextAnchor::Start => dy,
TextAnchor::Middle => dy / 2f64,
_ => 0f64,
}
} else {
x -= offset;
dx = match anchor {
TextAnchor::Start => dx,
TextAnchor::Middle => dx / 2f64,
_ => 0f64,
}
}
x += dx;
y += dy;
render_children(node, cascaded, draw_ctx, &mut x, &mut y, false, clipping)
}
}
pub struct NodeTRef {
link: RefCell<Option<Fragment>>,
}
impl NodeTRef {
pub fn new() -> NodeTRef {
NodeTRef {
link: RefCell::new(Default::default()),
}
}
fn measure(
&self,
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
length: &mut f64,
) -> bool {
let link = self.link.borrow();
if link.is_none() {
return false;
}
let link = link.as_ref().unwrap();
let done = if let Some(acquired) = draw_ctx.get_acquired_node(link) {
let c = acquired.get();
measure_children(&c, cascaded, draw_ctx, length, true)
} else {
rsvg_log!(
"element {} references a nonexistent text source \"{}\"",
node.get_human_readable_name(),
link,
);
false
};
done
}
fn render(
&self,
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
x: &mut f64,
y: &mut f64,
clipping: bool,
) -> Result<(), RenderingError> {
let link = self.link.borrow();
if link.is_none() {
return Ok(());
}
let link = link.as_ref().unwrap();
if let Some(acquired) = draw_ctx.get_acquired_node(link) {
let c = acquired.get();
render_children(&c, cascaded, draw_ctx, x, y, true, clipping)?;
} else {
rsvg_log!(
"element {} references a nonexistent text source \"{}\"",
node.get_human_readable_name(),
link,
);
}
Ok(())
}
}
impl NodeTrait for NodeTRef {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag<'_>) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::XlinkHref => {
*self.link.borrow_mut() =
Some(Fragment::parse(value).attribute(Attribute::XlinkHref)?)
}
_ => (),
}
}
Ok(())
}
}
pub struct NodeTSpan {
x: Cell<Option<Length>>,
y: Cell<Option<Length>>,
dx: Cell<Length>,
dy: Cell<Length>,
}
impl NodeTSpan {
pub fn new() -> NodeTSpan {
NodeTSpan {
x: Cell::new(Default::default()),
y: Cell::new(Default::default()),
dx: Cell::new(Length::default()),
dy: Cell::new(Length::default()),
}
}
fn measure(
&self,
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
length: &mut f64,
usetextonly: bool,
) -> bool {
let values = cascaded.get();
if self.x.get().is_some() || self.y.get().is_some() {
return true;
}
let params = draw_ctx.get_view_params();
if values.text_gravity_is_vertical() {
*length += self.dy.get().normalize(values, ¶ms);
} else {
*length += self.dx.get().normalize(values, ¶ms);
}
measure_children(node, cascaded, draw_ctx, length, usetextonly)
}
fn render(
&self,
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
x: &mut f64,
y: &mut f64,
usetextonly: bool,
clipping: bool,
) -> Result<(), RenderingError> {
let values = cascaded.get();
let params = draw_ctx.get_view_params();
let mut dx = self.dx.get().normalize(values, ¶ms);
let mut dy = self.dy.get().normalize(values, ¶ms);
let vertical = values.text_gravity_is_vertical();
let anchor = values.text_anchor;
let offset = anchor_offset(node, cascaded, draw_ctx, anchor, usetextonly);
if let Some(self_x) = self.x.get() {
*x = self_x.normalize(values, ¶ms);
if !vertical {
*x -= offset;
dx = match anchor {
TextAnchor::Start => dx,
TextAnchor::Middle => dx / 2f64,
_ => 0f64,
}
}
}
*x += dx;
if let Some(self_y) = self.y.get() {
*y = self_y.normalize(values, ¶ms);
if vertical {
*y -= offset;
dy = match anchor {
TextAnchor::Start => dy,
TextAnchor::Middle => dy / 2f64,
_ => 0f64,
}
}
}
*y += dy;
render_children(node, cascaded, draw_ctx, x, y, usetextonly, clipping)
}
}
impl NodeTrait for NodeTSpan {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag<'_>) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::X => self
.x
.set(parse("x", value, LengthDir::Horizontal).map(Some)?),
Attribute::Y => self
.y
.set(parse("y", value, LengthDir::Vertical).map(Some)?),
Attribute::Dx => self.dx.set(parse("dx", value, LengthDir::Horizontal)?),
Attribute::Dy => self.dy.set(parse("dy", value, LengthDir::Vertical)?),
_ => (),
}
}
Ok(())
}
fn accept_chars(&self) -> bool {
true
}
}
fn to_pango_units(v: f64) -> i32 {
(v * f64::from(pango::SCALE) + 0.5) as i32
}
impl From<FontStyle> for pango::Style {
fn from(s: FontStyle) -> pango::Style {
match s {
FontStyle::Normal => pango::Style::Normal,
FontStyle::Italic => pango::Style::Italic,
FontStyle::Oblique => pango::Style::Oblique,
}
}
}
impl From<FontVariant> for pango::Variant {
fn from(v: FontVariant) -> pango::Variant {
match v {
FontVariant::Normal => pango::Variant::Normal,
FontVariant::SmallCaps => pango::Variant::SmallCaps,
}
}
}
impl From<FontStretch> for pango::Stretch {
fn from(s: FontStretch) -> pango::Stretch {
match s {
FontStretch::Normal => pango::Stretch::Normal,
FontStretch::Wider => pango::Stretch::Expanded, // not quite correct
FontStretch::Narrower => pango::Stretch::Condensed, // not quite correct
FontStretch::UltraCondensed => pango::Stretch::UltraCondensed,
FontStretch::ExtraCondensed => pango::Stretch::ExtraCondensed,
FontStretch::Condensed => pango::Stretch::Condensed,
FontStretch::SemiCondensed => pango::Stretch::SemiCondensed,
FontStretch::SemiExpanded => pango::Stretch::SemiExpanded,
FontStretch::Expanded => pango::Stretch::Expanded,
FontStretch::ExtraExpanded => pango::Stretch::ExtraExpanded,
FontStretch::UltraExpanded => pango::Stretch::UltraExpanded,
}
}
}
impl From<FontWeightSpec> for pango::Weight {
fn from(w: FontWeightSpec) -> pango::Weight {
match w {
FontWeightSpec::Normal => pango::Weight::Normal,
FontWeightSpec::Bold => pango::Weight::Bold,
FontWeightSpec::Bolder => pango::Weight::Ultrabold,
FontWeightSpec::Lighter => pango::Weight::Light,
FontWeightSpec::W100 => pango::Weight::Thin,
FontWeightSpec::W200 => pango::Weight::Ultralight,
FontWeightSpec::W300 => pango::Weight::Semilight,
FontWeightSpec::W400 => pango::Weight::Normal,
FontWeightSpec::W500 => pango::Weight::Medium,
FontWeightSpec::W600 => pango::Weight::Semibold,
FontWeightSpec::W700 => pango::Weight::Bold,
FontWeightSpec::W800 => pango::Weight::Ultrabold,
FontWeightSpec::W900 => pango::Weight::Heavy,
}
}
}
impl From<Direction> for pango::Direction {
fn from(d: Direction) -> pango::Direction {
match d {
Direction::Ltr => pango::Direction::Ltr,
Direction::Rtl => pango::Direction::Rtl,
}
}
}
impl From<Direction> for pango::Alignment {
fn from(d: Direction) -> pango::Alignment {
match d {
Direction::Ltr => pango::Alignment::Left,
Direction::Rtl => pango::Alignment::Right,
}
}
}
impl From<WritingMode> for pango::Direction {
fn from(m: WritingMode) -> pango::Direction {
match m {
WritingMode::LrTb | WritingMode::Lr | WritingMode::Tb | WritingMode::TbRl => {
pango::Direction::Ltr
}
WritingMode::RlTb | WritingMode::Rl => pango::Direction::Rtl,
}
}
}
impl From<WritingMode> for pango::Gravity {
fn from(m: WritingMode) -> pango::Gravity {
match m {
WritingMode::Tb | WritingMode::TbRl => pango::Gravity::East,
WritingMode::LrTb | WritingMode::Lr | WritingMode::RlTb | WritingMode::Rl => {
pango::Gravity::South
}
}
}
}
fn create_pango_layout(
draw_ctx: &DrawingCtx,
values: &ComputedValues,
text: &str,
) -> pango::Layout {
let pango_context = draw_ctx.get_pango_context();
// See the construction of the XmlLang property
// We use "" there as the default value; this means that the language is not set.
// If the language *is* set, we can use it here.
if !values.xml_lang.0.is_empty() {
let pango_lang = pango::Language::from_string(&values.xml_lang.0);
pango_context.set_language(&pango_lang);
}
pango_context.set_base_gravity(pango::Gravity::from(values.writing_mode));
match (values.unicode_bidi, values.direction) {
(UnicodeBidi::Override, _) | (UnicodeBidi::Embed, _) => {
pango_context.set_base_dir(pango::Direction::from(values.direction));
}
(_, direction) if direction != Direction::Ltr => {
pango_context.set_base_dir(pango::Direction::from(direction));
}
(_, _) => {
pango_context.set_base_dir(pango::Direction::from(values.writing_mode));
}
}
let mut font_desc = pango_context.get_font_description().unwrap();
font_desc.set_family(&(values.font_family.0).0);
font_desc.set_style(pango::Style::from(values.font_style));
font_desc.set_variant(pango::Variant::from(values.font_variant));
font_desc.set_weight(pango::Weight::from(values.font_weight.0));
font_desc.set_stretch(pango::Stretch::from(values.font_stretch));
let params = draw_ctx.get_view_params();
font_desc.set_size(to_pango_units(
values.font_size.0.normalize(values, ¶ms),
));
let layout = pango::Layout::new(&pango_context);
layout.set_font_description(&font_desc);
let attr_list = pango::AttrList::new();
attr_list.insert(
pango::Attribute::new_letter_spacing(to_pango_units(
values.letter_spacing.0.normalize(values, ¶ms),
))
.unwrap(),
);
if values.text_decoration.underline {
attr_list.insert(pango::Attribute::new_underline(pango::Underline::Single).unwrap());
}
if values.text_decoration.strike {
attr_list.insert(pango::Attribute::new_strikethrough(true).unwrap());
}
layout.set_attributes(&attr_list);
layout.set_alignment(pango::Alignment::from(values.direction));
layout.set_text(text);
layout
}
fn anchor_offset(
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
anchor: TextAnchor,
textonly: bool,
) -> f64 {
let mut offset = 0f64;
match anchor {
TextAnchor::Start => {}
TextAnchor::Middle => {
measure_children(node, cascaded, draw_ctx, &mut offset, textonly);
offset /= 2f64;
}
_ => {
measure_children(node, cascaded, draw_ctx, &mut offset, textonly);
}
}
offset
}
fn measure_children(
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
length: &mut f64,
textonly: bool,
) -> bool {
let mut done = false;
for child in node.children() {
done = measure_child(
&child,
&CascadedValues::new(cascaded, &child),
draw_ctx,
length,
textonly,
);
if done {
break;
}
}
done
}
fn measure_child(
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
length: &mut f64,
textonly: bool,
) -> bool {
let values = cascaded.get();
let mut done = false;
let cr = draw_ctx.get_cairo_context();
cr.save();
cr.transform(node.get_transform());
match (node.get_type(), textonly) {
(NodeType::Chars, _) => {
// here we use the values from the current element,
// instead of child_values because NodeChars does not
// represent a real SVG element - it is just our container
// for character data.
node.with_impl(|chars: &NodeChars| chars.measure(node, values, draw_ctx, length));
}
(_, true) => {
done = measure_children(
node,
&CascadedValues::new(cascaded, node),
draw_ctx,
length,
textonly,
);
}
(NodeType::TSpan, _) => {
node.with_impl(|tspan: &NodeTSpan| {
done = tspan.measure(
node,
&CascadedValues::new(cascaded, node),
draw_ctx,
length,
textonly,
);
});
}
(NodeType::TRef, _) => {
node.with_impl(|tref: &NodeTRef| {
done = tref.measure(node, &CascadedValues::new(cascaded, node), draw_ctx, length);
});
}
(_, _) => {}
}
cr.restore();
done
}
fn render_children(
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
x: &mut f64,
y: &mut f64,
textonly: bool,
clipping: bool,
) -> Result<(), RenderingError> {
let values = cascaded.get();
draw_ctx.with_discrete_layer(node, values, clipping, &mut |dc| {
for child in node.children() {
render_child(&child, cascaded, dc, x, y, textonly, clipping)?;
}
Ok(())
})
}
fn render_child(
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
x: &mut f64,
y: &mut f64,
textonly: bool,
clipping: bool,
) -> Result<(), RenderingError> {
let values = cascaded.get();
let cr = draw_ctx.get_cairo_context();
cr.save();
cr.transform(node.get_transform());
let res = match (node.get_type(), textonly) {
(NodeType::Chars, _) => {
node.with_impl(|chars: &NodeChars| {
// here we use the values from the current element,
// instead of child_values because NodeChars does not
// represent a real SVG element - it is just our container
// for character data.
chars.render(node, values, draw_ctx, x, y, clipping)
})
}
(_, true) => render_children(
node,
&CascadedValues::new(cascaded, node),
draw_ctx,
x,
y,
textonly,
clipping,
),
(NodeType::TSpan, _) => node.with_impl(|tspan: &NodeTSpan| {
tspan.render(
node,
&CascadedValues::new(cascaded, node),
draw_ctx,
x,
y,
textonly,
clipping,
)
}),
(NodeType::TRef, _) => node.with_impl(|tref: &NodeTRef| {
tref.render(
node,
&CascadedValues::new(cascaded, node),
draw_ctx,
x,
y,
clipping,
)
}),
(_, _) => Ok(()),
};
cr.restore();
res
}
NodeChars.measure(): Return a value; don't mutate an argument
Have the caller use the result to mutate its running length
use pango::{self, ContextExt, LayoutExt};
use std::cell::{Cell, RefCell};
use attributes::Attribute;
use defs::Fragment;
use drawing_ctx::DrawingCtx;
use error::{AttributeResultExt, RenderingError};
use font_props::FontWeightSpec;
use handle::RsvgHandle;
use length::*;
use node::{CascadedValues, NodeResult, NodeTrait, NodeType, RsvgNode};
use parsers::parse;
use property_bag::PropertyBag;
use space::{xml_space_normalize, NormalizeDefault, XmlSpaceNormalize};
use state::{
ComputedValues,
Direction,
FontStretch,
FontStyle,
FontVariant,
TextAnchor,
UnicodeBidi,
WritingMode,
XmlSpace,
};
/// In SVG text elements, we use `NodeChars` to store character data. For example,
/// an element like `<text>Foo Bar</text>` will be a `NodeText` with a single child,
/// and the child will be a `NodeChars` with "Foo Bar" for its contents.
///
/// Text elements can contain `<tspan>` sub-elements. In this case,
/// those `tspan` nodes will also contain `NodeChars` children.
///
/// A text or tspan element can contain more than one `NodeChars` child, for example,
/// if there is an XML comment that splits the character contents in two:
///
/// ```xml
/// <text>
/// This sentence will create a NodeChars.
/// <!-- this comment is ignored -->
/// This sentence will cretea another NodeChars.
/// </text>
/// ```
///
/// When rendering a text element, it will take care of concatenating the strings
/// in its `NodeChars` children as appropriate, depending on the
/// `xml:space="preserve"` attribute. A `NodeChars` stores the characters verbatim
/// as they come out of the XML parser, after ensuring that they are valid UTF-8.
pub struct NodeChars {
string: RefCell<String>,
space_normalized: RefCell<Option<String>>,
}
impl NodeChars {
pub fn new() -> NodeChars {
NodeChars {
string: RefCell::new(String::new()),
space_normalized: RefCell::new(None),
}
}
pub fn get_string(&self) -> String {
self.string.borrow().clone()
}
pub fn append(&self, s: &str) {
self.string.borrow_mut().push_str(s);
*self.space_normalized.borrow_mut() = None;
}
fn ensure_normalized_string(&self, node: &RsvgNode, values: &ComputedValues) {
let mut normalized = self.space_normalized.borrow_mut();
if (*normalized).is_none() {
let mode = match values.xml_space {
XmlSpace::Default => XmlSpaceNormalize::Default(NormalizeDefault {
has_element_before: node.has_previous_sibling(),
has_element_after: node.has_next_sibling(),
}),
XmlSpace::Preserve => XmlSpaceNormalize::Preserve,
};
*normalized = Some(xml_space_normalize(mode, &self.string.borrow()));
}
}
fn create_layout(
&self,
node: &RsvgNode,
values: &ComputedValues,
draw_ctx: &DrawingCtx,
) -> pango::Layout {
self.ensure_normalized_string(node, values);
let norm = self.space_normalized.borrow();
let s = norm.as_ref().unwrap();
create_pango_layout(draw_ctx, values, &s)
}
fn measure(&self, node: &RsvgNode, values: &ComputedValues, draw_ctx: &DrawingCtx) -> f64 {
let layout = self.create_layout(node, values, draw_ctx);
let (width, _) = layout.get_size();
f64::from(width) / f64::from(pango::SCALE)
}
fn render(
&self,
node: &RsvgNode,
values: &ComputedValues,
draw_ctx: &mut DrawingCtx,
x: &mut f64,
y: &mut f64,
clipping: bool,
) -> Result<(), RenderingError> {
let layout = self.create_layout(node, values, draw_ctx);
let (width, _) = layout.get_size();
let baseline = f64::from(layout.get_baseline()) / f64::from(pango::SCALE);
let offset = baseline
+ values
.baseline_shift
.0
.normalize(values, &draw_ctx.get_view_params());
if values.text_gravity_is_vertical() {
draw_ctx.draw_pango_layout(&layout, values, *x + offset, *y, clipping)?;
*y += f64::from(width) / f64::from(pango::SCALE);
} else {
draw_ctx.draw_pango_layout(&layout, values, *x, *y - offset, clipping)?;
*x += f64::from(width) / f64::from(pango::SCALE);
}
Ok(())
}
}
impl NodeTrait for NodeChars {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, _: &PropertyBag<'_>) -> NodeResult {
Ok(())
}
}
pub struct NodeText {
x: Cell<Length>,
y: Cell<Length>,
dx: Cell<Length>,
dy: Cell<Length>,
}
impl NodeText {
pub fn new() -> NodeText {
NodeText {
x: Cell::new(Length::default()),
y: Cell::new(Length::default()),
dx: Cell::new(Length::default()),
dy: Cell::new(Length::default()),
}
}
}
impl NodeTrait for NodeText {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag<'_>) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::X => self.x.set(parse("x", value, LengthDir::Horizontal)?),
Attribute::Y => self.y.set(parse("y", value, LengthDir::Vertical)?),
Attribute::Dx => self.dx.set(parse("dx", value, LengthDir::Horizontal)?),
Attribute::Dy => self.dy.set(parse("dy", value, LengthDir::Vertical)?),
_ => (),
}
}
Ok(())
}
fn accept_chars(&self) -> bool {
true
}
fn draw(
&self,
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
clipping: bool,
) -> Result<(), RenderingError> {
let values = cascaded.get();
let params = draw_ctx.get_view_params();
let mut x = self.x.get().normalize(values, ¶ms);
let mut y = self.y.get().normalize(values, ¶ms);
let mut dx = self.dx.get().normalize(values, ¶ms);
let mut dy = self.dy.get().normalize(values, ¶ms);
let anchor = values.text_anchor;
let offset = anchor_offset(node, cascaded, draw_ctx, anchor, false);
if values.text_gravity_is_vertical() {
y -= offset;
dy = match anchor {
TextAnchor::Start => dy,
TextAnchor::Middle => dy / 2f64,
_ => 0f64,
}
} else {
x -= offset;
dx = match anchor {
TextAnchor::Start => dx,
TextAnchor::Middle => dx / 2f64,
_ => 0f64,
}
}
x += dx;
y += dy;
render_children(node, cascaded, draw_ctx, &mut x, &mut y, false, clipping)
}
}
pub struct NodeTRef {
link: RefCell<Option<Fragment>>,
}
impl NodeTRef {
pub fn new() -> NodeTRef {
NodeTRef {
link: RefCell::new(Default::default()),
}
}
fn measure(
&self,
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
length: &mut f64,
) -> bool {
let link = self.link.borrow();
if link.is_none() {
return false;
}
let link = link.as_ref().unwrap();
let done = if let Some(acquired) = draw_ctx.get_acquired_node(link) {
let c = acquired.get();
measure_children(&c, cascaded, draw_ctx, length, true)
} else {
rsvg_log!(
"element {} references a nonexistent text source \"{}\"",
node.get_human_readable_name(),
link,
);
false
};
done
}
fn render(
&self,
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
x: &mut f64,
y: &mut f64,
clipping: bool,
) -> Result<(), RenderingError> {
let link = self.link.borrow();
if link.is_none() {
return Ok(());
}
let link = link.as_ref().unwrap();
if let Some(acquired) = draw_ctx.get_acquired_node(link) {
let c = acquired.get();
render_children(&c, cascaded, draw_ctx, x, y, true, clipping)?;
} else {
rsvg_log!(
"element {} references a nonexistent text source \"{}\"",
node.get_human_readable_name(),
link,
);
}
Ok(())
}
}
impl NodeTrait for NodeTRef {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag<'_>) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::XlinkHref => {
*self.link.borrow_mut() =
Some(Fragment::parse(value).attribute(Attribute::XlinkHref)?)
}
_ => (),
}
}
Ok(())
}
}
pub struct NodeTSpan {
x: Cell<Option<Length>>,
y: Cell<Option<Length>>,
dx: Cell<Length>,
dy: Cell<Length>,
}
impl NodeTSpan {
pub fn new() -> NodeTSpan {
NodeTSpan {
x: Cell::new(Default::default()),
y: Cell::new(Default::default()),
dx: Cell::new(Length::default()),
dy: Cell::new(Length::default()),
}
}
fn measure(
&self,
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
length: &mut f64,
usetextonly: bool,
) -> bool {
let values = cascaded.get();
if self.x.get().is_some() || self.y.get().is_some() {
return true;
}
let params = draw_ctx.get_view_params();
if values.text_gravity_is_vertical() {
*length += self.dy.get().normalize(values, ¶ms);
} else {
*length += self.dx.get().normalize(values, ¶ms);
}
measure_children(node, cascaded, draw_ctx, length, usetextonly)
}
fn render(
&self,
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
x: &mut f64,
y: &mut f64,
usetextonly: bool,
clipping: bool,
) -> Result<(), RenderingError> {
let values = cascaded.get();
let params = draw_ctx.get_view_params();
let mut dx = self.dx.get().normalize(values, ¶ms);
let mut dy = self.dy.get().normalize(values, ¶ms);
let vertical = values.text_gravity_is_vertical();
let anchor = values.text_anchor;
let offset = anchor_offset(node, cascaded, draw_ctx, anchor, usetextonly);
if let Some(self_x) = self.x.get() {
*x = self_x.normalize(values, ¶ms);
if !vertical {
*x -= offset;
dx = match anchor {
TextAnchor::Start => dx,
TextAnchor::Middle => dx / 2f64,
_ => 0f64,
}
}
}
*x += dx;
if let Some(self_y) = self.y.get() {
*y = self_y.normalize(values, ¶ms);
if vertical {
*y -= offset;
dy = match anchor {
TextAnchor::Start => dy,
TextAnchor::Middle => dy / 2f64,
_ => 0f64,
}
}
}
*y += dy;
render_children(node, cascaded, draw_ctx, x, y, usetextonly, clipping)
}
}
impl NodeTrait for NodeTSpan {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag<'_>) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::X => self
.x
.set(parse("x", value, LengthDir::Horizontal).map(Some)?),
Attribute::Y => self
.y
.set(parse("y", value, LengthDir::Vertical).map(Some)?),
Attribute::Dx => self.dx.set(parse("dx", value, LengthDir::Horizontal)?),
Attribute::Dy => self.dy.set(parse("dy", value, LengthDir::Vertical)?),
_ => (),
}
}
Ok(())
}
fn accept_chars(&self) -> bool {
true
}
}
fn to_pango_units(v: f64) -> i32 {
(v * f64::from(pango::SCALE) + 0.5) as i32
}
impl From<FontStyle> for pango::Style {
fn from(s: FontStyle) -> pango::Style {
match s {
FontStyle::Normal => pango::Style::Normal,
FontStyle::Italic => pango::Style::Italic,
FontStyle::Oblique => pango::Style::Oblique,
}
}
}
impl From<FontVariant> for pango::Variant {
fn from(v: FontVariant) -> pango::Variant {
match v {
FontVariant::Normal => pango::Variant::Normal,
FontVariant::SmallCaps => pango::Variant::SmallCaps,
}
}
}
impl From<FontStretch> for pango::Stretch {
fn from(s: FontStretch) -> pango::Stretch {
match s {
FontStretch::Normal => pango::Stretch::Normal,
FontStretch::Wider => pango::Stretch::Expanded, // not quite correct
FontStretch::Narrower => pango::Stretch::Condensed, // not quite correct
FontStretch::UltraCondensed => pango::Stretch::UltraCondensed,
FontStretch::ExtraCondensed => pango::Stretch::ExtraCondensed,
FontStretch::Condensed => pango::Stretch::Condensed,
FontStretch::SemiCondensed => pango::Stretch::SemiCondensed,
FontStretch::SemiExpanded => pango::Stretch::SemiExpanded,
FontStretch::Expanded => pango::Stretch::Expanded,
FontStretch::ExtraExpanded => pango::Stretch::ExtraExpanded,
FontStretch::UltraExpanded => pango::Stretch::UltraExpanded,
}
}
}
impl From<FontWeightSpec> for pango::Weight {
fn from(w: FontWeightSpec) -> pango::Weight {
match w {
FontWeightSpec::Normal => pango::Weight::Normal,
FontWeightSpec::Bold => pango::Weight::Bold,
FontWeightSpec::Bolder => pango::Weight::Ultrabold,
FontWeightSpec::Lighter => pango::Weight::Light,
FontWeightSpec::W100 => pango::Weight::Thin,
FontWeightSpec::W200 => pango::Weight::Ultralight,
FontWeightSpec::W300 => pango::Weight::Semilight,
FontWeightSpec::W400 => pango::Weight::Normal,
FontWeightSpec::W500 => pango::Weight::Medium,
FontWeightSpec::W600 => pango::Weight::Semibold,
FontWeightSpec::W700 => pango::Weight::Bold,
FontWeightSpec::W800 => pango::Weight::Ultrabold,
FontWeightSpec::W900 => pango::Weight::Heavy,
}
}
}
impl From<Direction> for pango::Direction {
fn from(d: Direction) -> pango::Direction {
match d {
Direction::Ltr => pango::Direction::Ltr,
Direction::Rtl => pango::Direction::Rtl,
}
}
}
impl From<Direction> for pango::Alignment {
fn from(d: Direction) -> pango::Alignment {
match d {
Direction::Ltr => pango::Alignment::Left,
Direction::Rtl => pango::Alignment::Right,
}
}
}
impl From<WritingMode> for pango::Direction {
fn from(m: WritingMode) -> pango::Direction {
match m {
WritingMode::LrTb | WritingMode::Lr | WritingMode::Tb | WritingMode::TbRl => {
pango::Direction::Ltr
}
WritingMode::RlTb | WritingMode::Rl => pango::Direction::Rtl,
}
}
}
impl From<WritingMode> for pango::Gravity {
fn from(m: WritingMode) -> pango::Gravity {
match m {
WritingMode::Tb | WritingMode::TbRl => pango::Gravity::East,
WritingMode::LrTb | WritingMode::Lr | WritingMode::RlTb | WritingMode::Rl => {
pango::Gravity::South
}
}
}
}
fn create_pango_layout(
draw_ctx: &DrawingCtx,
values: &ComputedValues,
text: &str,
) -> pango::Layout {
let pango_context = draw_ctx.get_pango_context();
// See the construction of the XmlLang property
// We use "" there as the default value; this means that the language is not set.
// If the language *is* set, we can use it here.
if !values.xml_lang.0.is_empty() {
let pango_lang = pango::Language::from_string(&values.xml_lang.0);
pango_context.set_language(&pango_lang);
}
pango_context.set_base_gravity(pango::Gravity::from(values.writing_mode));
match (values.unicode_bidi, values.direction) {
(UnicodeBidi::Override, _) | (UnicodeBidi::Embed, _) => {
pango_context.set_base_dir(pango::Direction::from(values.direction));
}
(_, direction) if direction != Direction::Ltr => {
pango_context.set_base_dir(pango::Direction::from(direction));
}
(_, _) => {
pango_context.set_base_dir(pango::Direction::from(values.writing_mode));
}
}
let mut font_desc = pango_context.get_font_description().unwrap();
font_desc.set_family(&(values.font_family.0).0);
font_desc.set_style(pango::Style::from(values.font_style));
font_desc.set_variant(pango::Variant::from(values.font_variant));
font_desc.set_weight(pango::Weight::from(values.font_weight.0));
font_desc.set_stretch(pango::Stretch::from(values.font_stretch));
let params = draw_ctx.get_view_params();
font_desc.set_size(to_pango_units(
values.font_size.0.normalize(values, ¶ms),
));
let layout = pango::Layout::new(&pango_context);
layout.set_font_description(&font_desc);
let attr_list = pango::AttrList::new();
attr_list.insert(
pango::Attribute::new_letter_spacing(to_pango_units(
values.letter_spacing.0.normalize(values, ¶ms),
))
.unwrap(),
);
if values.text_decoration.underline {
attr_list.insert(pango::Attribute::new_underline(pango::Underline::Single).unwrap());
}
if values.text_decoration.strike {
attr_list.insert(pango::Attribute::new_strikethrough(true).unwrap());
}
layout.set_attributes(&attr_list);
layout.set_alignment(pango::Alignment::from(values.direction));
layout.set_text(text);
layout
}
fn anchor_offset(
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
anchor: TextAnchor,
textonly: bool,
) -> f64 {
let mut offset = 0f64;
match anchor {
TextAnchor::Start => {}
TextAnchor::Middle => {
measure_children(node, cascaded, draw_ctx, &mut offset, textonly);
offset /= 2f64;
}
_ => {
measure_children(node, cascaded, draw_ctx, &mut offset, textonly);
}
}
offset
}
fn measure_children(
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
length: &mut f64,
textonly: bool,
) -> bool {
let mut done = false;
for child in node.children() {
done = measure_child(
&child,
&CascadedValues::new(cascaded, &child),
draw_ctx,
length,
textonly,
);
if done {
break;
}
}
done
}
fn measure_child(
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
length: &mut f64,
textonly: bool,
) -> bool {
let values = cascaded.get();
let mut done = false;
let cr = draw_ctx.get_cairo_context();
cr.save();
cr.transform(node.get_transform());
match (node.get_type(), textonly) {
(NodeType::Chars, _) => {
// here we use the values from the current element,
// instead of child_values because NodeChars does not
// represent a real SVG element - it is just our container
// for character data.
*length += node.with_impl(|chars: &NodeChars| chars.measure(node, values, draw_ctx));
}
(_, true) => {
done = measure_children(
node,
&CascadedValues::new(cascaded, node),
draw_ctx,
length,
textonly,
);
}
(NodeType::TSpan, _) => {
node.with_impl(|tspan: &NodeTSpan| {
done = tspan.measure(
node,
&CascadedValues::new(cascaded, node),
draw_ctx,
length,
textonly,
);
});
}
(NodeType::TRef, _) => {
node.with_impl(|tref: &NodeTRef| {
done = tref.measure(node, &CascadedValues::new(cascaded, node), draw_ctx, length);
});
}
(_, _) => {}
}
cr.restore();
done
}
fn render_children(
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
x: &mut f64,
y: &mut f64,
textonly: bool,
clipping: bool,
) -> Result<(), RenderingError> {
let values = cascaded.get();
draw_ctx.with_discrete_layer(node, values, clipping, &mut |dc| {
for child in node.children() {
render_child(&child, cascaded, dc, x, y, textonly, clipping)?;
}
Ok(())
})
}
fn render_child(
node: &RsvgNode,
cascaded: &CascadedValues<'_>,
draw_ctx: &mut DrawingCtx,
x: &mut f64,
y: &mut f64,
textonly: bool,
clipping: bool,
) -> Result<(), RenderingError> {
let values = cascaded.get();
let cr = draw_ctx.get_cairo_context();
cr.save();
cr.transform(node.get_transform());
let res = match (node.get_type(), textonly) {
(NodeType::Chars, _) => {
node.with_impl(|chars: &NodeChars| {
// here we use the values from the current element,
// instead of child_values because NodeChars does not
// represent a real SVG element - it is just our container
// for character data.
chars.render(node, values, draw_ctx, x, y, clipping)
})
}
(_, true) => render_children(
node,
&CascadedValues::new(cascaded, node),
draw_ctx,
x,
y,
textonly,
clipping,
),
(NodeType::TSpan, _) => node.with_impl(|tspan: &NodeTSpan| {
tspan.render(
node,
&CascadedValues::new(cascaded, node),
draw_ctx,
x,
y,
textonly,
clipping,
)
}),
(NodeType::TRef, _) => node.with_impl(|tref: &NodeTRef| {
tref.render(
node,
&CascadedValues::new(cascaded, node),
draw_ctx,
x,
y,
clipping,
)
}),
(_, _) => Ok(()),
};
cr.restore();
res
}
|
use libc;
use pango::{self, ContextExt, LayoutExt};
use std;
use std::cell::{Cell, RefCell};
use std::str;
use attributes::Attribute;
use draw::draw_pango_layout;
use drawing_ctx::{self, RsvgDrawingCtx};
use handle::RsvgHandle;
use length::*;
use node::{
boxed_node_new,
rsvg_node_get_state,
NodeResult,
NodeTrait,
NodeType,
RsvgCNodeImpl,
RsvgNode,
};
use parsers::parse;
use property_bag::PropertyBag;
use space::xml_space_normalize;
use state::{
self,
ComputedValues,
Direction,
FontStretch,
FontStyle,
FontVariant,
FontWeight,
TextAnchor,
UnicodeBidi,
WritingMode,
};
/// In SVG text elements, we use `NodeChars` to store character data. For example,
/// an element like `<text>Foo Bar</text>` will be a `NodeText` with a single child,
/// and the child will be a `NodeChars` with "Foo Bar" for its contents.
///
/// Text elements can contain `<tspan>` sub-elements. In this case,
/// those `tspan` nodes will also contain `NodeChars` children.
///
/// A text or tspan element can contain more than one `NodeChars` child, for example,
/// if there is an XML comment that splits the character contents in two:
///
/// ```xml
/// <text>
/// This sentence will create a NodeChars.
/// <!-- this comment is ignored -->
/// This sentence will cretea another NodeChars.
/// </text>
/// ```
///
/// When rendering a text element, it will take care of concatenating the strings
/// in its `NodeChars` children as appropriate, depending on the
/// `xml:space="preserve"` attribute. A `NodeChars` stores the characters verbatim
/// as they come out of the XML parser, after ensuring that they are valid UTF-8.
struct NodeChars {
string: RefCell<String>,
}
impl NodeChars {
fn new() -> NodeChars {
NodeChars {
string: RefCell::new(String::new()),
}
}
fn append(&self, s: &str) {
self.string.borrow_mut().push_str(s);
}
fn measure(&self, draw_ctx: *const RsvgDrawingCtx, values: &ComputedValues, length: &mut f64) {
let s = self.string.borrow();
let layout = create_pango_layout(draw_ctx, values, &s);
let (width, _) = layout.get_size();
*length = f64::from(width) / f64::from(pango::SCALE);
}
fn render(
&self,
draw_ctx: *mut RsvgDrawingCtx,
values: &ComputedValues,
x: &mut f64,
y: &mut f64,
clipping: bool,
) {
let s = self.string.borrow();
let layout = create_pango_layout(draw_ctx, values, &s);
let (width, _) = layout.get_size();
let baseline = f64::from(layout.get_baseline()) / f64::from(pango::SCALE);
let offset = baseline + drawing_ctx::get_accumulated_baseline_shift(draw_ctx);
if values.text_gravity_is_vertical() {
draw_pango_layout(draw_ctx, values, &layout, *x + offset, *y, clipping);
*y += f64::from(width) / f64::from(pango::SCALE);
} else {
draw_pango_layout(draw_ctx, values, &layout, *x, *y - offset, clipping);
*x += f64::from(width) / f64::from(pango::SCALE);
}
}
}
impl NodeTrait for NodeChars {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, _: &PropertyBag) -> NodeResult {
Ok(())
}
fn draw(&self, _: &RsvgNode, _: *mut RsvgDrawingCtx, _: i32, _: bool) {
// nothing
}
fn get_c_impl(&self) -> *const RsvgCNodeImpl {
unreachable!();
}
}
struct NodeText {
x: Cell<RsvgLength>,
y: Cell<RsvgLength>,
dx: Cell<RsvgLength>,
dy: Cell<RsvgLength>,
}
impl NodeText {
fn new() -> NodeText {
NodeText {
x: Cell::new(RsvgLength::default()),
y: Cell::new(RsvgLength::default()),
dx: Cell::new(RsvgLength::default()),
dy: Cell::new(RsvgLength::default()),
}
}
}
impl NodeTrait for NodeText {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::X => self.x.set(parse("x", value, LengthDir::Horizontal, None)?),
Attribute::Y => self.y.set(parse("y", value, LengthDir::Vertical, None)?),
Attribute::Dx => self
.dx
.set(parse("dx", value, LengthDir::Horizontal, None)?),
Attribute::Dy => self.dy.set(parse("dy", value, LengthDir::Vertical, None)?),
_ => (),
}
}
Ok(())
}
fn draw(&self, node: &RsvgNode, draw_ctx: *mut RsvgDrawingCtx, _dominate: i32, clipping: bool) {
let values = &node.get_computed_values();
let mut x = self.x.get().normalize(draw_ctx);
let mut y = self.y.get().normalize(draw_ctx);
let mut dx = self.dx.get().normalize(draw_ctx);
let mut dy = self.dy.get().normalize(draw_ctx);
let anchor = values.text_anchor;
let offset = anchor_offset(node, draw_ctx, values, anchor, false);
if values.text_gravity_is_vertical() {
y -= offset;
dy = match anchor {
TextAnchor::Start => dy,
TextAnchor::Middle => dy / 2f64,
_ => 0f64,
}
} else {
x -= offset;
dx = match anchor {
TextAnchor::Start => dx,
TextAnchor::Middle => dx / 2f64,
_ => 0f64,
}
}
x += dx;
y += dy;
render_children(node, draw_ctx, values, &mut x, &mut y, false, clipping);
}
fn get_c_impl(&self) -> *const RsvgCNodeImpl {
unreachable!();
}
}
struct NodeTRef {
link: RefCell<Option<String>>,
}
impl NodeTRef {
fn new() -> NodeTRef {
NodeTRef {
link: RefCell::new(Default::default()),
}
}
fn measure(
&self,
draw_ctx: *mut RsvgDrawingCtx,
values: &ComputedValues,
length: &mut f64,
) -> bool {
let l = self.link.borrow();
if l.is_none() {
return false;
}
let done =
if let Some(acquired) = drawing_ctx::get_acquired_node(draw_ctx, l.as_ref().unwrap()) {
let c = acquired.get();
measure_children(&c, draw_ctx, values, length, true)
} else {
false
};
done
}
fn render(
&self,
draw_ctx: *mut RsvgDrawingCtx,
values: &ComputedValues,
x: &mut f64,
y: &mut f64,
clipping: bool,
) {
let l = self.link.borrow();
if l.is_none() {
return;
}
if let Some(acquired) = drawing_ctx::get_acquired_node(draw_ctx, l.as_ref().unwrap()) {
let c = acquired.get();
render_children(&c, draw_ctx, values, x, y, true, clipping)
}
}
}
impl NodeTrait for NodeTRef {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::XlinkHref => *self.link.borrow_mut() = Some(value.to_owned()),
_ => (),
}
}
Ok(())
}
fn draw(&self, _: &RsvgNode, _: *mut RsvgDrawingCtx, _: i32, _: bool) {
// nothing
}
fn get_c_impl(&self) -> *const RsvgCNodeImpl {
unreachable!();
}
}
struct NodeTSpan {
x: Cell<Option<RsvgLength>>,
y: Cell<Option<RsvgLength>>,
dx: Cell<RsvgLength>,
dy: Cell<RsvgLength>,
}
impl NodeTSpan {
fn new() -> NodeTSpan {
NodeTSpan {
x: Cell::new(Default::default()),
y: Cell::new(Default::default()),
dx: Cell::new(RsvgLength::default()),
dy: Cell::new(RsvgLength::default()),
}
}
fn measure(
&self,
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
values: &ComputedValues,
length: &mut f64,
usetextonly: bool,
) -> bool {
if self.x.get().is_some() || self.y.get().is_some() {
return true;
}
if values.text_gravity_is_vertical() {
*length += self.dy.get().normalize(draw_ctx);
} else {
*length += self.dx.get().normalize(draw_ctx);
}
measure_children(node, draw_ctx, values, length, usetextonly)
}
fn render(
&self,
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
_values: &ComputedValues,
x: &mut f64,
y: &mut f64,
usetextonly: bool,
clipping: bool,
) {
drawing_ctx::state_push(draw_ctx);
drawing_ctx::state_reinherit_top(draw_ctx, node.get_state(), 0);
let state = drawing_ctx::get_current_state(draw_ctx).unwrap();
let computed = state.get_computed_values();
let mut dx = self.dx.get().normalize(draw_ctx);
let mut dy = self.dy.get().normalize(draw_ctx);
let vertical = computed.text_gravity_is_vertical();
let anchor = computed.text_anchor;
let offset = anchor_offset(node, draw_ctx, &computed, anchor, usetextonly);
if let Some(self_x) = self.x.get() {
*x = self_x.normalize(draw_ctx);
if !vertical {
*x -= offset;
dx = match anchor {
TextAnchor::Start => dx,
TextAnchor::Middle => dx / 2f64,
_ => 0f64,
}
}
}
*x += dx;
if let Some(self_y) = self.y.get() {
*y = self_y.normalize(draw_ctx);
if vertical {
*y -= offset;
dy = match anchor {
TextAnchor::Start => dy,
TextAnchor::Middle => dy / 2f64,
_ => 0f64,
}
}
}
*y += dy;
render_children(node, draw_ctx, &computed, x, y, usetextonly, clipping);
drawing_ctx::state_pop(draw_ctx);
}
}
impl NodeTrait for NodeTSpan {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::X => self
.x
.set(parse("x", value, LengthDir::Horizontal, None).map(Some)?),
Attribute::Y => self
.y
.set(parse("y", value, LengthDir::Vertical, None).map(Some)?),
Attribute::Dx => self
.dx
.set(parse("dx", value, LengthDir::Horizontal, None)?),
Attribute::Dy => self.dy.set(parse("dy", value, LengthDir::Vertical, None)?),
_ => (),
}
}
Ok(())
}
fn draw(&self, _: &RsvgNode, _: *mut RsvgDrawingCtx, _: i32, _: bool) {
// nothing
}
fn get_c_impl(&self) -> *const RsvgCNodeImpl {
unreachable!();
}
}
fn to_pango_units(v: f64) -> i32 {
(v * f64::from(pango::SCALE)) as i32
}
impl From<FontStyle> for pango::Style {
fn from(s: FontStyle) -> pango::Style {
match s {
FontStyle::Normal => pango::Style::Normal,
FontStyle::Italic => pango::Style::Italic,
FontStyle::Oblique => pango::Style::Oblique,
}
}
}
impl From<FontVariant> for pango::Variant {
fn from(v: FontVariant) -> pango::Variant {
match v {
FontVariant::Normal => pango::Variant::Normal,
FontVariant::SmallCaps => pango::Variant::SmallCaps,
}
}
}
impl From<FontStretch> for pango::Stretch {
fn from(s: FontStretch) -> pango::Stretch {
match s {
FontStretch::Normal => pango::Stretch::Normal,
FontStretch::Wider => pango::Stretch::Expanded, // not quite correct
FontStretch::Narrower => pango::Stretch::Condensed, // not quite correct
FontStretch::UltraCondensed => pango::Stretch::UltraCondensed,
FontStretch::ExtraCondensed => pango::Stretch::ExtraCondensed,
FontStretch::Condensed => pango::Stretch::Condensed,
FontStretch::SemiCondensed => pango::Stretch::SemiCondensed,
FontStretch::SemiExpanded => pango::Stretch::SemiExpanded,
FontStretch::Expanded => pango::Stretch::Expanded,
FontStretch::ExtraExpanded => pango::Stretch::ExtraExpanded,
FontStretch::UltraExpanded => pango::Stretch::UltraExpanded,
}
}
}
impl From<FontWeight> for pango::Weight {
fn from(w: FontWeight) -> pango::Weight {
match w {
FontWeight::Normal => pango::Weight::Normal,
FontWeight::Bold => pango::Weight::Bold,
FontWeight::Bolder => pango::Weight::Ultrabold,
FontWeight::Lighter => pango::Weight::Light,
FontWeight::W100 => pango::Weight::Thin,
FontWeight::W200 => pango::Weight::Ultralight,
FontWeight::W300 => pango::Weight::Semilight,
FontWeight::W400 => pango::Weight::Normal,
FontWeight::W500 => pango::Weight::Medium,
FontWeight::W600 => pango::Weight::Semibold,
FontWeight::W700 => pango::Weight::Bold,
FontWeight::W800 => pango::Weight::Ultrabold,
FontWeight::W900 => pango::Weight::Heavy,
}
}
}
impl From<Direction> for pango::Direction {
fn from(d: Direction) -> pango::Direction {
match d {
Direction::Ltr => pango::Direction::Ltr,
Direction::Rtl => pango::Direction::Rtl,
}
}
}
impl From<Direction> for pango::Alignment {
fn from(d: Direction) -> pango::Alignment {
match d {
Direction::Ltr => pango::Alignment::Left,
Direction::Rtl => pango::Alignment::Right,
}
}
}
impl From<WritingMode> for pango::Direction {
fn from(m: WritingMode) -> pango::Direction {
match m {
WritingMode::LrTb | WritingMode::Lr | WritingMode::Tb | WritingMode::TbRl => {
pango::Direction::Ltr
}
WritingMode::RlTb | WritingMode::Rl => pango::Direction::Rtl,
}
}
}
impl From<WritingMode> for pango::Gravity {
fn from(m: WritingMode) -> pango::Gravity {
match m {
WritingMode::Tb | WritingMode::TbRl => pango::Gravity::East,
WritingMode::LrTb | WritingMode::Lr | WritingMode::RlTb | WritingMode::Rl => {
pango::Gravity::South
}
}
}
}
fn create_pango_layout(
draw_ctx: *const RsvgDrawingCtx,
values: &ComputedValues,
text: &str,
) -> pango::Layout {
let pango_context = drawing_ctx::get_pango_context(draw_ctx);
// See the construction of the XmlLang property
// We use "" there as the default value; this means that the language is not set.
// If the language *is* set, we can use it here.
if !values.xml_lang.0.is_empty() {
let pango_lang = pango::Language::from_string(&values.xml_lang.0);
pango_context.set_language(&pango_lang);
}
pango_context.set_base_gravity(pango::Gravity::from(values.writing_mode));
match (values.unicode_bidi, values.direction) {
(UnicodeBidi::Override, _) | (UnicodeBidi::Embed, _) => {
pango_context.set_base_dir(pango::Direction::from(values.direction));
}
(_, direction) if direction != Direction::Ltr => {
pango_context.set_base_dir(pango::Direction::from(direction));
}
(_, _) => {
pango_context.set_base_dir(pango::Direction::from(values.writing_mode));
}
}
let mut font_desc = pango_context.get_font_description().unwrap();
font_desc.set_family(&values.font_family.0);
font_desc.set_style(pango::Style::from(values.font_style));
font_desc.set_variant(pango::Variant::from(values.font_variant));
font_desc.set_weight(pango::Weight::from(values.font_weight));
font_desc.set_stretch(pango::Stretch::from(values.font_stretch));
let (_, dpi_y) = drawing_ctx::get_dpi(draw_ctx);
font_desc.set_size(to_pango_units(
drawing_ctx::get_normalized_font_size(draw_ctx) / dpi_y * 72.0,
));
let layout = pango::Layout::new(&pango_context);
layout.set_font_description(&font_desc);
let attr_list = pango::AttrList::new();
attr_list.insert(
pango::Attribute::new_letter_spacing(to_pango_units(
values.letter_spacing.0.normalize(draw_ctx),
)).unwrap(),
);
if values.text_decoration.underline {
attr_list.insert(pango::Attribute::new_underline(pango::Underline::Single).unwrap());
}
if values.text_decoration.strike {
attr_list.insert(pango::Attribute::new_strikethrough(true).unwrap());
}
layout.set_attributes(&attr_list);
layout.set_alignment(pango::Alignment::from(values.direction));
let t = xml_space_normalize(values.xml_space, text);
layout.set_text(&t);
layout
}
fn anchor_offset(
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
values: &ComputedValues,
anchor: TextAnchor,
textonly: bool,
) -> f64 {
let mut offset = 0f64;
match anchor {
TextAnchor::Start => {}
TextAnchor::Middle => {
measure_children(node, draw_ctx, values, &mut offset, textonly);
offset /= 2f64;
}
_ => {
measure_children(node, draw_ctx, values, &mut offset, textonly);
}
}
offset
}
fn measure_children(
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
values: &ComputedValues,
length: &mut f64,
textonly: bool,
) -> bool {
let mut done = false;
for child in node.children() {
done = measure_child(&child, draw_ctx, values, length, textonly);
if done {
break;
}
}
done
}
fn measure_child(
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
_values: &ComputedValues,
length: &mut f64,
textonly: bool,
) -> bool {
let mut done = false;
drawing_ctx::state_push(draw_ctx);
drawing_ctx::state_reinherit_top(draw_ctx, node.get_state(), 0);
let state = drawing_ctx::get_current_state(draw_ctx).unwrap();
let computed = state.get_computed_values();
match (node.get_type(), textonly) {
(NodeType::Chars, _) => {
node.with_impl(|chars: &NodeChars| chars.measure(draw_ctx, &computed, length));
}
(_, true) => {
done = measure_children(node, draw_ctx, &computed, length, textonly);
}
(NodeType::TSpan, _) => {
node.with_impl(|tspan: &NodeTSpan| {
done = tspan.measure(node, draw_ctx, &computed, length, textonly);
});
}
(NodeType::TRef, _) => {
node.with_impl(|tref: &NodeTRef| {
done = tref.measure(draw_ctx, &computed, length);
});
}
(_, _) => {}
}
drawing_ctx::state_pop(draw_ctx);
done
}
fn render_children(
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
values: &ComputedValues,
x: &mut f64,
y: &mut f64,
textonly: bool,
clipping: bool,
) {
drawing_ctx::push_discrete_layer(draw_ctx, values, clipping);
for child in node.children() {
render_child(&child, draw_ctx, values, x, y, textonly, clipping);
}
drawing_ctx::pop_discrete_layer(draw_ctx, values, clipping);
}
fn render_child(
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
values: &ComputedValues,
x: &mut f64,
y: &mut f64,
textonly: bool,
clipping: bool,
) {
match (node.get_type(), textonly) {
(NodeType::Chars, _) => {
node.with_impl(|chars: &NodeChars| chars.render(draw_ctx, values, x, y, clipping));
}
(_, true) => {
render_children(node, draw_ctx, values, x, y, textonly, clipping);
}
(NodeType::TSpan, _) => {
node.with_impl(|tspan: &NodeTSpan| {
tspan.render(node, draw_ctx, values, x, y, textonly, clipping);
});
}
(NodeType::TRef, _) => {
node.with_impl(|tref: &NodeTRef| {
tref.render(draw_ctx, values, x, y, clipping);
});
}
(_, _) => {}
}
}
#[no_mangle]
pub extern "C" fn rsvg_node_chars_new(raw_parent: *const RsvgNode) -> *const RsvgNode {
let node = boxed_node_new(NodeType::Chars, raw_parent, Box::new(NodeChars::new()));
let state = state::from_c_mut(rsvg_node_get_state(node));
state.cond = false;
node
}
#[no_mangle]
pub extern "C" fn rsvg_node_chars_append(
raw_node: *const RsvgNode,
text: *const libc::c_char,
len: isize,
) {
assert!(!raw_node.is_null());
let node: &RsvgNode = unsafe { &*raw_node };
assert!(!text.is_null());
assert!(len >= 0);
// libxml2 already validated the incoming string as UTF-8. Note that
// it is *not* nul-terminated; this is why we create a byte slice first.
let bytes = unsafe { std::slice::from_raw_parts(text as *const u8, len as usize) };
let utf8 = unsafe { str::from_utf8_unchecked(bytes) };
node.with_impl(|chars: &NodeChars| {
chars.append(utf8);
});
}
#[no_mangle]
pub extern "C" fn rsvg_node_text_new(
_: *const libc::c_char,
raw_parent: *const RsvgNode,
) -> *const RsvgNode {
boxed_node_new(NodeType::Text, raw_parent, Box::new(NodeText::new()))
}
#[no_mangle]
pub extern "C" fn rsvg_node_tref_new(
_: *const libc::c_char,
raw_parent: *const RsvgNode,
) -> *const RsvgNode {
boxed_node_new(NodeType::TRef, raw_parent, Box::new(NodeTRef::new()))
}
#[no_mangle]
pub extern "C" fn rsvg_node_tspan_new(
_: *const libc::c_char,
raw_parent: *const RsvgNode,
) -> *const RsvgNode {
boxed_node_new(NodeType::TSpan, raw_parent, Box::new(NodeTSpan::new()))
}
text.rs: Obtain the computed values from the nodes; don't pass them down
use libc;
use pango::{self, ContextExt, LayoutExt};
use std;
use std::cell::{Cell, RefCell};
use std::str;
use attributes::Attribute;
use draw::draw_pango_layout;
use drawing_ctx::{self, RsvgDrawingCtx};
use handle::RsvgHandle;
use length::*;
use node::{
boxed_node_new,
rsvg_node_get_state,
NodeResult,
NodeTrait,
NodeType,
RsvgCNodeImpl,
RsvgNode,
};
use parsers::parse;
use property_bag::PropertyBag;
use space::xml_space_normalize;
use state::{
self,
ComputedValues,
Direction,
FontStretch,
FontStyle,
FontVariant,
FontWeight,
TextAnchor,
UnicodeBidi,
WritingMode,
};
/// In SVG text elements, we use `NodeChars` to store character data. For example,
/// an element like `<text>Foo Bar</text>` will be a `NodeText` with a single child,
/// and the child will be a `NodeChars` with "Foo Bar" for its contents.
///
/// Text elements can contain `<tspan>` sub-elements. In this case,
/// those `tspan` nodes will also contain `NodeChars` children.
///
/// A text or tspan element can contain more than one `NodeChars` child, for example,
/// if there is an XML comment that splits the character contents in two:
///
/// ```xml
/// <text>
/// This sentence will create a NodeChars.
/// <!-- this comment is ignored -->
/// This sentence will cretea another NodeChars.
/// </text>
/// ```
///
/// When rendering a text element, it will take care of concatenating the strings
/// in its `NodeChars` children as appropriate, depending on the
/// `xml:space="preserve"` attribute. A `NodeChars` stores the characters verbatim
/// as they come out of the XML parser, after ensuring that they are valid UTF-8.
struct NodeChars {
string: RefCell<String>,
}
impl NodeChars {
fn new() -> NodeChars {
NodeChars {
string: RefCell::new(String::new()),
}
}
fn append(&self, s: &str) {
self.string.borrow_mut().push_str(s);
}
fn measure(&self, node: &RsvgNode, draw_ctx: *const RsvgDrawingCtx, length: &mut f64) {
let values = &node.get_computed_values();
let s = self.string.borrow();
let layout = create_pango_layout(draw_ctx, values, &s);
let (width, _) = layout.get_size();
*length = f64::from(width) / f64::from(pango::SCALE);
}
fn render(
&self,
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
x: &mut f64,
y: &mut f64,
clipping: bool,
) {
let values = &node.get_computed_values();
let s = self.string.borrow();
let layout = create_pango_layout(draw_ctx, values, &s);
let (width, _) = layout.get_size();
let baseline = f64::from(layout.get_baseline()) / f64::from(pango::SCALE);
let offset = baseline + drawing_ctx::get_accumulated_baseline_shift(draw_ctx);
if values.text_gravity_is_vertical() {
draw_pango_layout(draw_ctx, values, &layout, *x + offset, *y, clipping);
*y += f64::from(width) / f64::from(pango::SCALE);
} else {
draw_pango_layout(draw_ctx, values, &layout, *x, *y - offset, clipping);
*x += f64::from(width) / f64::from(pango::SCALE);
}
}
}
impl NodeTrait for NodeChars {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, _: &PropertyBag) -> NodeResult {
Ok(())
}
fn draw(&self, _: &RsvgNode, _: *mut RsvgDrawingCtx, _: i32, _: bool) {
// nothing
}
fn get_c_impl(&self) -> *const RsvgCNodeImpl {
unreachable!();
}
}
struct NodeText {
x: Cell<RsvgLength>,
y: Cell<RsvgLength>,
dx: Cell<RsvgLength>,
dy: Cell<RsvgLength>,
}
impl NodeText {
fn new() -> NodeText {
NodeText {
x: Cell::new(RsvgLength::default()),
y: Cell::new(RsvgLength::default()),
dx: Cell::new(RsvgLength::default()),
dy: Cell::new(RsvgLength::default()),
}
}
}
impl NodeTrait for NodeText {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::X => self.x.set(parse("x", value, LengthDir::Horizontal, None)?),
Attribute::Y => self.y.set(parse("y", value, LengthDir::Vertical, None)?),
Attribute::Dx => self
.dx
.set(parse("dx", value, LengthDir::Horizontal, None)?),
Attribute::Dy => self.dy.set(parse("dy", value, LengthDir::Vertical, None)?),
_ => (),
}
}
Ok(())
}
fn draw(&self, node: &RsvgNode, draw_ctx: *mut RsvgDrawingCtx, _dominate: i32, clipping: bool) {
let values = &node.get_computed_values();
let mut x = self.x.get().normalize(draw_ctx);
let mut y = self.y.get().normalize(draw_ctx);
let mut dx = self.dx.get().normalize(draw_ctx);
let mut dy = self.dy.get().normalize(draw_ctx);
let anchor = values.text_anchor;
let offset = anchor_offset(node, draw_ctx, anchor, false);
if values.text_gravity_is_vertical() {
y -= offset;
dy = match anchor {
TextAnchor::Start => dy,
TextAnchor::Middle => dy / 2f64,
_ => 0f64,
}
} else {
x -= offset;
dx = match anchor {
TextAnchor::Start => dx,
TextAnchor::Middle => dx / 2f64,
_ => 0f64,
}
}
x += dx;
y += dy;
render_children(node, draw_ctx, &mut x, &mut y, false, clipping);
}
fn get_c_impl(&self) -> *const RsvgCNodeImpl {
unreachable!();
}
}
struct NodeTRef {
link: RefCell<Option<String>>,
}
impl NodeTRef {
fn new() -> NodeTRef {
NodeTRef {
link: RefCell::new(Default::default()),
}
}
fn measure(&self, _node: &RsvgNode, draw_ctx: *mut RsvgDrawingCtx, length: &mut f64) -> bool {
let l = self.link.borrow();
if l.is_none() {
return false;
}
let done =
if let Some(acquired) = drawing_ctx::get_acquired_node(draw_ctx, l.as_ref().unwrap()) {
let c = acquired.get();
measure_children(&c, draw_ctx, length, true)
} else {
false
};
done
}
fn render(
&self,
_node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
x: &mut f64,
y: &mut f64,
clipping: bool,
) {
let l = self.link.borrow();
if l.is_none() {
return;
}
if let Some(acquired) = drawing_ctx::get_acquired_node(draw_ctx, l.as_ref().unwrap()) {
let c = acquired.get();
render_children(&c, draw_ctx, x, y, true, clipping)
}
}
}
impl NodeTrait for NodeTRef {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::XlinkHref => *self.link.borrow_mut() = Some(value.to_owned()),
_ => (),
}
}
Ok(())
}
fn draw(&self, _: &RsvgNode, _: *mut RsvgDrawingCtx, _: i32, _: bool) {
// nothing
}
fn get_c_impl(&self) -> *const RsvgCNodeImpl {
unreachable!();
}
}
struct NodeTSpan {
x: Cell<Option<RsvgLength>>,
y: Cell<Option<RsvgLength>>,
dx: Cell<RsvgLength>,
dy: Cell<RsvgLength>,
}
impl NodeTSpan {
fn new() -> NodeTSpan {
NodeTSpan {
x: Cell::new(Default::default()),
y: Cell::new(Default::default()),
dx: Cell::new(RsvgLength::default()),
dy: Cell::new(RsvgLength::default()),
}
}
fn measure(
&self,
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
length: &mut f64,
usetextonly: bool,
) -> bool {
let values = &node.get_computed_values();
if self.x.get().is_some() || self.y.get().is_some() {
return true;
}
if values.text_gravity_is_vertical() {
*length += self.dy.get().normalize(draw_ctx);
} else {
*length += self.dx.get().normalize(draw_ctx);
}
measure_children(node, draw_ctx, length, usetextonly)
}
fn render(
&self,
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
x: &mut f64,
y: &mut f64,
usetextonly: bool,
clipping: bool,
) {
let values = &node.get_computed_values();
drawing_ctx::state_push(draw_ctx);
drawing_ctx::state_reinherit_top(draw_ctx, node.get_state(), 0);
let mut dx = self.dx.get().normalize(draw_ctx);
let mut dy = self.dy.get().normalize(draw_ctx);
let vertical = values.text_gravity_is_vertical();
let anchor = values.text_anchor;
let offset = anchor_offset(node, draw_ctx, anchor, usetextonly);
if let Some(self_x) = self.x.get() {
*x = self_x.normalize(draw_ctx);
if !vertical {
*x -= offset;
dx = match anchor {
TextAnchor::Start => dx,
TextAnchor::Middle => dx / 2f64,
_ => 0f64,
}
}
}
*x += dx;
if let Some(self_y) = self.y.get() {
*y = self_y.normalize(draw_ctx);
if vertical {
*y -= offset;
dy = match anchor {
TextAnchor::Start => dy,
TextAnchor::Middle => dy / 2f64,
_ => 0f64,
}
}
}
*y += dy;
render_children(node, draw_ctx, x, y, usetextonly, clipping);
drawing_ctx::state_pop(draw_ctx);
}
}
impl NodeTrait for NodeTSpan {
fn set_atts(&self, _: &RsvgNode, _: *const RsvgHandle, pbag: &PropertyBag) -> NodeResult {
for (_key, attr, value) in pbag.iter() {
match attr {
Attribute::X => self
.x
.set(parse("x", value, LengthDir::Horizontal, None).map(Some)?),
Attribute::Y => self
.y
.set(parse("y", value, LengthDir::Vertical, None).map(Some)?),
Attribute::Dx => self
.dx
.set(parse("dx", value, LengthDir::Horizontal, None)?),
Attribute::Dy => self.dy.set(parse("dy", value, LengthDir::Vertical, None)?),
_ => (),
}
}
Ok(())
}
fn draw(&self, _: &RsvgNode, _: *mut RsvgDrawingCtx, _: i32, _: bool) {
// nothing
}
fn get_c_impl(&self) -> *const RsvgCNodeImpl {
unreachable!();
}
}
fn to_pango_units(v: f64) -> i32 {
(v * f64::from(pango::SCALE)) as i32
}
impl From<FontStyle> for pango::Style {
fn from(s: FontStyle) -> pango::Style {
match s {
FontStyle::Normal => pango::Style::Normal,
FontStyle::Italic => pango::Style::Italic,
FontStyle::Oblique => pango::Style::Oblique,
}
}
}
impl From<FontVariant> for pango::Variant {
fn from(v: FontVariant) -> pango::Variant {
match v {
FontVariant::Normal => pango::Variant::Normal,
FontVariant::SmallCaps => pango::Variant::SmallCaps,
}
}
}
impl From<FontStretch> for pango::Stretch {
fn from(s: FontStretch) -> pango::Stretch {
match s {
FontStretch::Normal => pango::Stretch::Normal,
FontStretch::Wider => pango::Stretch::Expanded, // not quite correct
FontStretch::Narrower => pango::Stretch::Condensed, // not quite correct
FontStretch::UltraCondensed => pango::Stretch::UltraCondensed,
FontStretch::ExtraCondensed => pango::Stretch::ExtraCondensed,
FontStretch::Condensed => pango::Stretch::Condensed,
FontStretch::SemiCondensed => pango::Stretch::SemiCondensed,
FontStretch::SemiExpanded => pango::Stretch::SemiExpanded,
FontStretch::Expanded => pango::Stretch::Expanded,
FontStretch::ExtraExpanded => pango::Stretch::ExtraExpanded,
FontStretch::UltraExpanded => pango::Stretch::UltraExpanded,
}
}
}
impl From<FontWeight> for pango::Weight {
fn from(w: FontWeight) -> pango::Weight {
match w {
FontWeight::Normal => pango::Weight::Normal,
FontWeight::Bold => pango::Weight::Bold,
FontWeight::Bolder => pango::Weight::Ultrabold,
FontWeight::Lighter => pango::Weight::Light,
FontWeight::W100 => pango::Weight::Thin,
FontWeight::W200 => pango::Weight::Ultralight,
FontWeight::W300 => pango::Weight::Semilight,
FontWeight::W400 => pango::Weight::Normal,
FontWeight::W500 => pango::Weight::Medium,
FontWeight::W600 => pango::Weight::Semibold,
FontWeight::W700 => pango::Weight::Bold,
FontWeight::W800 => pango::Weight::Ultrabold,
FontWeight::W900 => pango::Weight::Heavy,
}
}
}
impl From<Direction> for pango::Direction {
fn from(d: Direction) -> pango::Direction {
match d {
Direction::Ltr => pango::Direction::Ltr,
Direction::Rtl => pango::Direction::Rtl,
}
}
}
impl From<Direction> for pango::Alignment {
fn from(d: Direction) -> pango::Alignment {
match d {
Direction::Ltr => pango::Alignment::Left,
Direction::Rtl => pango::Alignment::Right,
}
}
}
impl From<WritingMode> for pango::Direction {
fn from(m: WritingMode) -> pango::Direction {
match m {
WritingMode::LrTb | WritingMode::Lr | WritingMode::Tb | WritingMode::TbRl => {
pango::Direction::Ltr
}
WritingMode::RlTb | WritingMode::Rl => pango::Direction::Rtl,
}
}
}
impl From<WritingMode> for pango::Gravity {
fn from(m: WritingMode) -> pango::Gravity {
match m {
WritingMode::Tb | WritingMode::TbRl => pango::Gravity::East,
WritingMode::LrTb | WritingMode::Lr | WritingMode::RlTb | WritingMode::Rl => {
pango::Gravity::South
}
}
}
}
fn create_pango_layout(
draw_ctx: *const RsvgDrawingCtx,
values: &ComputedValues,
text: &str,
) -> pango::Layout {
let pango_context = drawing_ctx::get_pango_context(draw_ctx);
// See the construction of the XmlLang property
// We use "" there as the default value; this means that the language is not set.
// If the language *is* set, we can use it here.
if !values.xml_lang.0.is_empty() {
let pango_lang = pango::Language::from_string(&values.xml_lang.0);
pango_context.set_language(&pango_lang);
}
pango_context.set_base_gravity(pango::Gravity::from(values.writing_mode));
match (values.unicode_bidi, values.direction) {
(UnicodeBidi::Override, _) | (UnicodeBidi::Embed, _) => {
pango_context.set_base_dir(pango::Direction::from(values.direction));
}
(_, direction) if direction != Direction::Ltr => {
pango_context.set_base_dir(pango::Direction::from(direction));
}
(_, _) => {
pango_context.set_base_dir(pango::Direction::from(values.writing_mode));
}
}
let mut font_desc = pango_context.get_font_description().unwrap();
font_desc.set_family(&values.font_family.0);
font_desc.set_style(pango::Style::from(values.font_style));
font_desc.set_variant(pango::Variant::from(values.font_variant));
font_desc.set_weight(pango::Weight::from(values.font_weight));
font_desc.set_stretch(pango::Stretch::from(values.font_stretch));
let (_, dpi_y) = drawing_ctx::get_dpi(draw_ctx);
font_desc.set_size(to_pango_units(
drawing_ctx::get_normalized_font_size(draw_ctx) / dpi_y * 72.0,
));
let layout = pango::Layout::new(&pango_context);
layout.set_font_description(&font_desc);
let attr_list = pango::AttrList::new();
attr_list.insert(
pango::Attribute::new_letter_spacing(to_pango_units(
values.letter_spacing.0.normalize(draw_ctx),
)).unwrap(),
);
if values.text_decoration.underline {
attr_list.insert(pango::Attribute::new_underline(pango::Underline::Single).unwrap());
}
if values.text_decoration.strike {
attr_list.insert(pango::Attribute::new_strikethrough(true).unwrap());
}
layout.set_attributes(&attr_list);
layout.set_alignment(pango::Alignment::from(values.direction));
let t = xml_space_normalize(values.xml_space, text);
layout.set_text(&t);
layout
}
fn anchor_offset(
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
anchor: TextAnchor,
textonly: bool,
) -> f64 {
let mut offset = 0f64;
match anchor {
TextAnchor::Start => {}
TextAnchor::Middle => {
measure_children(node, draw_ctx, &mut offset, textonly);
offset /= 2f64;
}
_ => {
measure_children(node, draw_ctx, &mut offset, textonly);
}
}
offset
}
fn measure_children(
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
length: &mut f64,
textonly: bool,
) -> bool {
let mut done = false;
for child in node.children() {
done = measure_child(&child, draw_ctx, length, textonly);
if done {
break;
}
}
done
}
fn measure_child(
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
length: &mut f64,
textonly: bool,
) -> bool {
let mut done = false;
drawing_ctx::state_push(draw_ctx);
match (node.get_type(), textonly) {
(NodeType::Chars, _) => {
node.with_impl(|chars: &NodeChars| chars.measure(node, draw_ctx, length));
}
(_, true) => {
done = measure_children(node, draw_ctx, length, textonly);
}
(NodeType::TSpan, _) => {
node.with_impl(|tspan: &NodeTSpan| {
done = tspan.measure(node, draw_ctx, length, textonly);
});
}
(NodeType::TRef, _) => {
node.with_impl(|tref: &NodeTRef| {
done = tref.measure(node, draw_ctx, length);
});
}
(_, _) => {}
}
drawing_ctx::state_pop(draw_ctx);
done
}
fn render_children(
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
x: &mut f64,
y: &mut f64,
textonly: bool,
clipping: bool,
) {
let values = &node.get_computed_values();
drawing_ctx::push_discrete_layer(draw_ctx, values, clipping);
for child in node.children() {
render_child(&child, draw_ctx, x, y, textonly, clipping);
}
drawing_ctx::pop_discrete_layer(draw_ctx, values, clipping);
}
fn render_child(
node: &RsvgNode,
draw_ctx: *mut RsvgDrawingCtx,
x: &mut f64,
y: &mut f64,
textonly: bool,
clipping: bool,
) {
match (node.get_type(), textonly) {
(NodeType::Chars, _) => {
node.with_impl(|chars: &NodeChars| chars.render(node, draw_ctx, x, y, clipping));
}
(_, true) => {
render_children(node, draw_ctx, x, y, textonly, clipping);
}
(NodeType::TSpan, _) => {
node.with_impl(|tspan: &NodeTSpan| {
tspan.render(node, draw_ctx, x, y, textonly, clipping);
});
}
(NodeType::TRef, _) => {
node.with_impl(|tref: &NodeTRef| {
tref.render(node, draw_ctx, x, y, clipping);
});
}
(_, _) => {}
}
}
#[no_mangle]
pub extern "C" fn rsvg_node_chars_new(raw_parent: *const RsvgNode) -> *const RsvgNode {
let node = boxed_node_new(NodeType::Chars, raw_parent, Box::new(NodeChars::new()));
let state = state::from_c_mut(rsvg_node_get_state(node));
state.cond = false;
node
}
#[no_mangle]
pub extern "C" fn rsvg_node_chars_append(
raw_node: *const RsvgNode,
text: *const libc::c_char,
len: isize,
) {
assert!(!raw_node.is_null());
let node: &RsvgNode = unsafe { &*raw_node };
assert!(!text.is_null());
assert!(len >= 0);
// libxml2 already validated the incoming string as UTF-8. Note that
// it is *not* nul-terminated; this is why we create a byte slice first.
let bytes = unsafe { std::slice::from_raw_parts(text as *const u8, len as usize) };
let utf8 = unsafe { str::from_utf8_unchecked(bytes) };
node.with_impl(|chars: &NodeChars| {
chars.append(utf8);
});
}
#[no_mangle]
pub extern "C" fn rsvg_node_text_new(
_: *const libc::c_char,
raw_parent: *const RsvgNode,
) -> *const RsvgNode {
boxed_node_new(NodeType::Text, raw_parent, Box::new(NodeText::new()))
}
#[no_mangle]
pub extern "C" fn rsvg_node_tref_new(
_: *const libc::c_char,
raw_parent: *const RsvgNode,
) -> *const RsvgNode {
boxed_node_new(NodeType::TRef, raw_parent, Box::new(NodeTRef::new()))
}
#[no_mangle]
pub extern "C" fn rsvg_node_tspan_new(
_: *const libc::c_char,
raw_parent: *const RsvgNode,
) -> *const RsvgNode {
boxed_node_new(NodeType::TSpan, raw_parent, Box::new(NodeTSpan::new()))
}
|
use std::fmt::{Debug,Formatter,Result};
use value::{Value,ToValue,Tuple};
use self::EveFn::*;
use value::Value::Float;
// Enums...
// Expression Enum ------------------------------------------------------------
#[derive(Clone)]
pub enum Expression {
Constant(Constant),
Variable(Variable),
Call(Call),
Match(Match),
Value(Value),
}
impl Debug for Expression {
fn fmt(&self, f: &mut Formatter) -> Result {
match *self {
Expression::Constant(ref x) => write!(f,"{:?}",*x),
Expression::Call(ref x) => write!(f,"{:?}",*x),
_ => unimplemented!(),
}
}
}
pub trait ToExpression { fn to_expr(self) -> Expression; }
impl ToExpression for Expression { fn to_expr(self) -> Expression { self } }
impl ToExpression for Constant { fn to_expr(self) -> Expression { Expression::Constant(self) } }
impl ToExpression for Call { fn to_expr(self) -> Expression { Expression::Call(self) } }
impl ToExpression for i32 { fn to_expr(self) -> Expression { Expression::Value(self.to_value()) } }
impl ToExpression for f64 { fn to_expr(self) -> Expression { Expression::Value(self.to_value()) } }
impl<'a> ToExpression for &'a str { fn to_expr(self) -> Expression { Expression::Value(self.to_value()) } }
impl ToExpression for Value { fn to_expr(self) -> Expression { Expression::Value(self) } }
// End Expression Enum --------------------------------------------------------
#[derive(Clone,Debug)]
pub enum EveFn {
// Basic ops
Add,Subtract,Multiply,Divide,Exponentiate,
// General math
Sqrt,Log,Log10,Log2,Ln,Abs,Sign,Exp,
//Trig
Sin,Cos,Tan,ASin,ACos,ATan,ATan2,
// Aggregates
Sum,Prod,
// Strings
StrConcat,StrUpper,StrLower,StrLength,StrReplace,StrSplit,
}
#[derive(Clone)]
pub enum Variable {
Variable(String),
}
#[derive(Clone)]
pub enum Pattern {
Constant(Constant),
Tuple(Tuple),
}
/*
#[derive(Clone)]
pub enum Tuple {
Patterns(PatternVec),
}
*/
// Constant Enum --------------------------------------------------------------
#[derive(Clone,PartialEq)]
pub enum Constant {
StringConstant(String),
Value(Value),
}
impl Debug for Constant {
fn fmt(&self, f: &mut Formatter) -> Result {
match *self {
Constant::StringConstant(ref x) => write!(f,"{:?}",*x),
_ => unimplemented!(),
}
}
}
// End Constant Enum ----------------------------------------------------------
// Structs...
#[derive(Clone,Debug)]
pub struct Call {
pub fun: EveFn,
pub args: ExpressionVec,
}
#[derive(Clone)]
pub struct Match {
pub patterns: PatternVec,
pub handlers: ExpressionVec,
}
// Some type aliases
pub type PatternVec = Vec<Pattern>;
pub type ExpressionVec = Vec<Expression>;
// Macro for creating expression vectors
#[macro_export]
macro_rules! exprvec {
( $( $x:expr ),* ) => {
{
let mut temp_vec = ExpressionVec::new();
$(
temp_vec.push($x.to_expr());
)*
temp_vec
}
};
}
// This is the main interface to the interpreter. Pass in an expression, get a value back
pub fn calculate(e: & Expression) -> Value {
process_expression(e)
}
fn process_expression(e: & Expression) -> Value {
match *e {
//Expression::Constant(ref x) => x.clone(),
Expression::Call(ref x) => process_call(x),
//Expression::Constant(ref x) => process_constant(x),
Expression::Value(ref x) => x.clone(),
_ => unimplemented!(),
}
}
/*
fn process_constant(c: & Constant) -> &Value {
match *c {
Constant::NumericConstant(ref x) => unwrap_numeric(x).to_value(),
Constant::StringConstant(ref x) => x.to_value(),
_ => unimplemented!(),
}
}
*/
fn process_call(c: &Call) -> Value {
let args: Vec<Value> = c.args.iter().map(process_expression).collect();
match(&c.fun, &args[..]) {
// Basic Math
(&Add,[Float(x),Float(y)]) => Float(x+y),
(&Subtract,[Float(x),Float(y)]) => Float(x-y),
(&Multiply,[Float(x),Float(y)]) => Float(x*y),
(&Divide,[Float(x),Float(y)]) => Float(x/y),
(&Exponentiate,[Float(x),Float(y)]) => Float(x.powf(y)),
// Some general math functions
(&Abs,[Float(x)]) => Float(x.abs()),
(&Sqrt,[Float(x)]) => Float(x.sqrt()),
(&Sign,[Float(x)]) => Float(x.signum()),
(&Exp,[Float(x)]) => Float(x.exp()),
(&Ln,[Float(x)]) => Float(x.ln()),
(&Log10,[Float(x)]) => Float(x.log10()),
(&Log2,[Float(x)]) => Float(x.log2()),
// Trig functions
(&Sin,[Float(x)]) => Float(x.sin()),
(&Cos,[Float(x)]) => Float(x.cos()),
(&Tan,[Float(x)]) => Float(x.tan()),
(&ASin,[Float(x)]) => Float(x.asin()),
(&ACos,[Float(x)]) => Float(x.acos()),
(&ATan,[Float(x)]) => Float(x.atan()),
(&ATan2,[Float(x),Float(y)]) => Float(x.atan2(y)),
// String functions
(&StrConcat,[Value::String(ref s1),Value::String(ref s2)]) => Value::String(s1.to_string()+&s2[..]),
(&StrUpper,[Value::String(ref s)]) => Value::String(s.to_uppercase()),
(&StrLower,[Value::String(ref s)]) => Value::String(s.to_lowercase()),
(&StrLength,[Value::String(ref s)]) => Float(s.len() as f64),
(&StrReplace,[Value::String(ref s),Value::String(ref q),Value::String(ref r)]) => Value::String(s.replace(&q[..],&r[..])),
(&StrSplit,[Value::String(ref s)]) => {
let w: Vec<Value> = s.words().map(|x| x.to_value()).collect();
Value::Tuple(w)
},
// Aggregate functions
//&Sum => general_agg(|x,y|{x+y},0f64,&c.args),
//&Prod => general_agg(|x,y|{x*y},1f64,&c.args),
// Returns an empty string for the purpose of handling incomplete function
(_, _) => Value::String("".to_string()),
}
}
/*
// Aggregate Functions --------------------------------------------------------
fn general_agg<F: Fn(f64,f64) -> f64>(f: F, base: f64, args: &ExpressionVec) -> Value {
// Some fold magic!
let acc = args.iter().fold(base,|acc,next_arg| f(acc,process_expression(next_arg).to_f64().unwrap()) );
acc.to_value()
}
// End Aggregate Functions ----------------------------------------------------
*/
Handle division by 0 explicitly. We will probably push through an error
to the UI instead of a panic
use std::fmt::{Debug,Formatter,Result};
use value::{Value,ToValue,Tuple};
use self::EveFn::*;
use value::Value::Float;
// Enums...
// Expression Enum ------------------------------------------------------------
#[derive(Clone)]
pub enum Expression {
Constant(Constant),
Variable(Variable),
Call(Call),
Match(Match),
Value(Value),
}
impl Debug for Expression {
fn fmt(&self, f: &mut Formatter) -> Result {
match *self {
Expression::Constant(ref x) => write!(f,"{:?}",*x),
Expression::Call(ref x) => write!(f,"{:?}",*x),
_ => unimplemented!(),
}
}
}
pub trait ToExpression { fn to_expr(self) -> Expression; }
impl ToExpression for Expression { fn to_expr(self) -> Expression { self } }
impl ToExpression for Constant { fn to_expr(self) -> Expression { Expression::Constant(self) } }
impl ToExpression for Call { fn to_expr(self) -> Expression { Expression::Call(self) } }
impl ToExpression for i32 { fn to_expr(self) -> Expression { Expression::Value(self.to_value()) } }
impl ToExpression for f64 { fn to_expr(self) -> Expression { Expression::Value(self.to_value()) } }
impl<'a> ToExpression for &'a str { fn to_expr(self) -> Expression { Expression::Value(self.to_value()) } }
impl ToExpression for Value { fn to_expr(self) -> Expression { Expression::Value(self) } }
// End Expression Enum --------------------------------------------------------
#[derive(Clone,Debug)]
pub enum EveFn {
// Basic ops
Add,Subtract,Multiply,Divide,Exponentiate,
// General math
Sqrt,Log,Log10,Log2,Ln,Abs,Sign,Exp,
//Trig
Sin,Cos,Tan,ASin,ACos,ATan,ATan2,
// Aggregates
Sum,Prod,
// Strings
StrConcat,StrUpper,StrLower,StrLength,StrReplace,StrSplit,
}
#[derive(Clone)]
pub enum Variable {
Variable(String),
}
#[derive(Clone)]
pub enum Pattern {
Constant(Constant),
Tuple(Tuple),
}
/*
#[derive(Clone)]
pub enum Tuple {
Patterns(PatternVec),
}
*/
// Constant Enum --------------------------------------------------------------
#[derive(Clone,PartialEq)]
pub enum Constant {
StringConstant(String),
Value(Value),
}
impl Debug for Constant {
fn fmt(&self, f: &mut Formatter) -> Result {
match *self {
Constant::StringConstant(ref x) => write!(f,"{:?}",*x),
_ => unimplemented!(),
}
}
}
// End Constant Enum ----------------------------------------------------------
// Structs...
#[derive(Clone,Debug)]
pub struct Call {
pub fun: EveFn,
pub args: ExpressionVec,
}
#[derive(Clone)]
pub struct Match {
pub patterns: PatternVec,
pub handlers: ExpressionVec,
}
// Some type aliases
pub type PatternVec = Vec<Pattern>;
pub type ExpressionVec = Vec<Expression>;
// Macro for creating expression vectors
#[macro_export]
macro_rules! exprvec {
( $( $x:expr ),* ) => {
{
let mut temp_vec = ExpressionVec::new();
$(
temp_vec.push($x.to_expr());
)*
temp_vec
}
};
}
// This is the main interface to the interpreter. Pass in an expression, get a value back
pub fn calculate(e: & Expression) -> Value {
process_expression(e)
}
fn process_expression(e: & Expression) -> Value {
match *e {
//Expression::Constant(ref x) => x.clone(),
Expression::Call(ref x) => process_call(x),
//Expression::Constant(ref x) => process_constant(x),
Expression::Value(ref x) => x.clone(),
_ => unimplemented!(),
}
}
/*
fn process_constant(c: & Constant) -> &Value {
match *c {
Constant::NumericConstant(ref x) => unwrap_numeric(x).to_value(),
Constant::StringConstant(ref x) => x.to_value(),
_ => unimplemented!(),
}
}
*/
fn process_call(c: &Call) -> Value {
let args: Vec<Value> = c.args.iter().map(process_expression).collect();
match(&c.fun, &args[..]) {
// Basic Math
(&Add,[Float(x),Float(y)]) => Float(x+y),
(&Subtract,[Float(x),Float(y)]) => Float(x-y),
(&Multiply,[Float(x),Float(y)]) => Float(x*y),
(&Divide,[Float(x),Float(y)]) => {
if y == 0f64 { panic!("Error: Division by 0"); }
Float(x/y)
},
(&Exponentiate,[Float(x),Float(y)]) => Float(x.powf(y)),
// Some general math functions
(&Abs,[Float(x)]) => Float(x.abs()),
(&Sqrt,[Float(x)]) => Float(x.sqrt()),
(&Sign,[Float(x)]) => Float(x.signum()),
(&Exp,[Float(x)]) => Float(x.exp()),
(&Ln,[Float(x)]) => Float(x.ln()),
(&Log10,[Float(x)]) => Float(x.log10()),
(&Log2,[Float(x)]) => Float(x.log2()),
// Trig functions
(&Sin,[Float(x)]) => Float(x.sin()),
(&Cos,[Float(x)]) => Float(x.cos()),
(&Tan,[Float(x)]) => Float(x.tan()),
(&ASin,[Float(x)]) => Float(x.asin()),
(&ACos,[Float(x)]) => Float(x.acos()),
(&ATan,[Float(x)]) => Float(x.atan()),
(&ATan2,[Float(x),Float(y)]) => Float(x.atan2(y)),
// String functions
(&StrConcat,[Value::String(ref s1),Value::String(ref s2)]) => Value::String(s1.to_string()+&s2[..]),
(&StrUpper,[Value::String(ref s)]) => Value::String(s.to_uppercase()),
(&StrLower,[Value::String(ref s)]) => Value::String(s.to_lowercase()),
(&StrLength,[Value::String(ref s)]) => Float(s.len() as f64),
(&StrReplace,[Value::String(ref s),Value::String(ref q),Value::String(ref r)]) => Value::String(s.replace(&q[..],&r[..])),
(&StrSplit,[Value::String(ref s)]) => {
let w: Vec<Value> = s.words().map(|x| x.to_value()).collect();
Value::Tuple(w)
},
// Aggregate functions
//&Sum => general_agg(|x,y|{x+y},0f64,&c.args),
//&Prod => general_agg(|x,y|{x*y},1f64,&c.args),
// Returns an empty string for the purpose of handling incomplete function
(_, _) => Value::String("".to_string()),
}
}
/*
// Aggregate Functions --------------------------------------------------------
fn general_agg<F: Fn(f64,f64) -> f64>(f: F, base: f64, args: &ExpressionVec) -> Value {
// Some fold magic!
let acc = args.iter().fold(base,|acc,next_arg| f(acc,process_expression(next_arg).to_f64().unwrap()) );
acc.to_value()
}
// End Aggregate Functions ----------------------------------------------------
*/ |
fn main() {
let s1 = String::from("hello");
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
}
// ANCHOR: here
fn calculate_length(s: &String) -> usize { // s is a reference to a String
s.len()
} // 这里,s 离开了作用域。但因为它并不拥有引用值的所有权,
// 所以什么也不会发生
// ANCHOR_END: here
comment没有翻译
添加翻译
fn main() {
let s1 = String::from("hello");
let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);
}
// ANCHOR: here
fn calculate_length(s: &String) -> usize { // s是String的引用
s.len()
} // 这里,s 离开了作用域。但因为它并不拥有引用值的所有权,
// 所以什么也不会发生
// ANCHOR_END: here
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Common Parquet errors and macros.
use std::{cell, convert, io, result};
use quick_error::quick_error;
use snap;
use thrift;
quick_error! {
/// Set of errors that can be produced during different operations in Parquet.
#[derive(Debug, PartialEq)]
pub enum ParquetError {
/// General Parquet error.
/// Returned when code violates normal workflow of working with Parquet files.
General(message: String) {
display("Parquet error: {}", message)
description(message)
from(e: io::Error) -> (format!("underlying IO error: {}", e))
from(e: snap::Error) -> (format!("underlying snap error: {}", e))
from(e: thrift::Error) -> (format!("underlying Thrift error: {}", e))
from(e: cell::BorrowMutError) -> (format!("underlying borrow error: {}", e))
}
/// "Not yet implemented" Parquet error.
/// Returned when functionality is not yet available.
NYI(message: String) {
display("NYI: {}", message)
description(message)
}
/// "End of file" Parquet error.
/// Returned when IO related failures occur, e.g. when there are not enough bytes to
/// decode.
EOF(message: String) {
display("EOF: {}", message)
description(message)
}
/// Arrow error.
/// Returned when reading into arrow or writing from arrow.
ArrowError(message: String) {
display("Arrow: {}", message)
description(message)
}
}
}
/// A specialized `Result` for Parquet errors.
pub type Result<T> = result::Result<T, ParquetError>;
// ----------------------------------------------------------------------
// Conversion from `ParquetError` to other types of `Error`s
impl convert::From<ParquetError> for io::Error {
fn from(e: ParquetError) -> Self {
io::Error::new(io::ErrorKind::Other, e)
}
}
// ----------------------------------------------------------------------
// Convenient macros for different errors
macro_rules! general_err {
($fmt:expr) => (ParquetError::General($fmt.to_owned()));
($fmt:expr, $($args:expr),*) => (ParquetError::General(format!($fmt, $($args),*)));
($e:expr, $fmt:expr) => (ParquetError::General($fmt.to_owned(), $e));
($e:ident, $fmt:expr, $($args:tt),*) => (
ParquetError::General(&format!($fmt, $($args),*), $e));
}
macro_rules! nyi_err {
($fmt:expr) => (ParquetError::NYI($fmt.to_owned()));
($fmt:expr, $($args:expr),*) => (ParquetError::NYI(format!($fmt, $($args),*)));
}
macro_rules! eof_err {
($fmt:expr) => (ParquetError::EOF($fmt.to_owned()));
($fmt:expr, $($args:expr),*) => (ParquetError::EOF(format!($fmt, $($args),*)));
}
ARROW-4525: [Rust] [Parquet] Enable conversion of ArrowError to ParquetError
This is useful when integrating arrow with parquet, e.g. when reading parquet data into arrow.
Author: Renjie Liu <liurenjie2008@gmail.com>
Closes #3603 from liurenjie1024/arrow-error-to-parquet-error and squashes the following commits:
d4c379f3 <Renjie Liu> Enable conversion of ArrowError to ParquetError
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Common Parquet errors and macros.
use std::{cell, convert, io, result};
use arrow::error::ArrowError;
use quick_error::quick_error;
use snap;
use thrift;
quick_error! {
/// Set of errors that can be produced during different operations in Parquet.
#[derive(Debug, PartialEq)]
pub enum ParquetError {
/// General Parquet error.
/// Returned when code violates normal workflow of working with Parquet files.
General(message: String) {
display("Parquet error: {}", message)
description(message)
from(e: io::Error) -> (format!("underlying IO error: {}", e))
from(e: snap::Error) -> (format!("underlying snap error: {}", e))
from(e: thrift::Error) -> (format!("underlying Thrift error: {}", e))
from(e: cell::BorrowMutError) -> (format!("underlying borrow error: {}", e))
}
/// "Not yet implemented" Parquet error.
/// Returned when functionality is not yet available.
NYI(message: String) {
display("NYI: {}", message)
description(message)
}
/// "End of file" Parquet error.
/// Returned when IO related failures occur, e.g. when there are not enough bytes to
/// decode.
EOF(message: String) {
display("EOF: {}", message)
description(message)
}
/// Arrow error.
/// Returned when reading into arrow or writing from arrow.
ArrowError(message: String) {
display("Arrow: {}", message)
description(message)
from(e: ArrowError) -> (format!("underlying Arrow error: {:?}", e))
}
}
}
/// A specialized `Result` for Parquet errors.
pub type Result<T> = result::Result<T, ParquetError>;
// ----------------------------------------------------------------------
// Conversion from `ParquetError` to other types of `Error`s
impl convert::From<ParquetError> for io::Error {
fn from(e: ParquetError) -> Self {
io::Error::new(io::ErrorKind::Other, e)
}
}
// ----------------------------------------------------------------------
// Convenient macros for different errors
macro_rules! general_err {
($fmt:expr) => (ParquetError::General($fmt.to_owned()));
($fmt:expr, $($args:expr),*) => (ParquetError::General(format!($fmt, $($args),*)));
($e:expr, $fmt:expr) => (ParquetError::General($fmt.to_owned(), $e));
($e:ident, $fmt:expr, $($args:tt),*) => (
ParquetError::General(&format!($fmt, $($args),*), $e));
}
macro_rules! nyi_err {
($fmt:expr) => (ParquetError::NYI($fmt.to_owned()));
($fmt:expr, $($args:expr),*) => (ParquetError::NYI(format!($fmt, $($args),*)));
}
macro_rules! eof_err {
($fmt:expr) => (ParquetError::EOF($fmt.to_owned()));
($fmt:expr, $($args:expr),*) => (ParquetError::EOF(format!($fmt, $($args),*)));
}
|
///! Utilities for universally unique identifiers.
///!
///! The identifiers generated by this module:
///!
///! - are URL safe
///! - contain information on the type of object that they
///! are identifying
///! - have an extremely low probability of collision
///!
///! Generated identifiers have a fixed length of 32 characters made up
///! of two parts separated by a hyphen:
///!
///! - 2 characters in the range `[a-z]` that identifying the "family" of
///! identifiers, usually the type of object the identifier is for
///! e.g. `fi` = file, `re` = request
///!
///! - 20 characters in the range `[0-9A-Za-z]` that are randomly generated
///!
///! For project identifiers (those starting with 'pr') only lowercase
///! letters are used for compatibility with Docker image naming rules.
///!
///! The total size of the generated ids is 23 bytes which allows it to fit
///! inside a [`SmartString`](https://lib.rs/crates/smartstring) for better
///! performance that a plain old `String`.
///!
///! See
///! - https://segment.com/blog/a-brief-history-of-the-uuid/
///! - https://zelark.github.io/nano-id-cc/
///! - https://gist.github.com/fnky/76f533366f75cf75802c8052b577e2a5
use eyre::{bail, Result};
use nanoid::nanoid;
use regex::Regex;
use smartstring::{Compact, SmartString};
pub type Uuid = SmartString<Compact>;
/// The separator between the family and random parts of the identifier
///
/// A hyphen provides for better readability than a dot or colon when used
/// in pubsub topic strings and elsewhere.
const SEPARATOR: &str = "-";
/// The characters used in the random part of the identifier
const CHARACTERS: [char; 62] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B',
'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z',
];
/// Create a family of UUIDs
///
/// ```
/// use uuid_utils::uuid_family;
///
/// uuid_family!(MyId, "my");
/// let id = MyId::new();
/// ```
#[macro_export]
macro_rules! uuid_family {
($name:ident, $family:literal) => {
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
struct $name(uuid_utils::Uuid);
impl $name {
pub fn new() -> Self {
Self(uuid_utils::generate($family))
}
}
impl Default for $name {
fn default() -> Self {
Self::new()
}
}
use std::ops::Deref;
impl Deref for $name {
type Target = uuid_utils::Uuid;
fn deref(&self) -> &Self::Target {
&self.0
}
}
};
}
// Generate a universally unique identifier
pub fn generate(family: &str) -> Uuid {
let chars = nanoid!(20, &CHARACTERS);
[family, SEPARATOR, &chars].concat().into()
}
// Generate a universally unique identifier with only lowercase letters and digits
pub fn generate_lower(family: &str) -> Uuid {
let chars = nanoid!(20, &CHARACTERS[..36]);
[family, SEPARATOR, &chars].concat().into()
}
// Test whether a string is an identifer for a particular family
pub fn matches(family: &str, id: &str) -> bool {
let re = [family, SEPARATOR, "[0-9a-zA-Z]{20}"].concat();
let re = Regex::new(&re).expect("Should be a valid regex");
re.is_match(id)
}
// Assert that a `Uuid` is an identifer for a particular family
pub fn assert(family: &str, id: Uuid) -> Result<Uuid> {
match matches(family, &id) {
true => Ok(id),
false => bail!(
"Invalid UUID `{}`, family does not match `{}`",
family.to_string(),
id
),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn normal() {
let id = generate("no");
assert_eq!(id.len(), 23);
assert!(matches("no", &id));
assert("no", id).unwrap();
}
#[test]
fn lower() {
let id = generate_lower("pr");
assert_eq!(id.len(), 23);
assert!(matches("pr", &id));
assert("pr", id).unwrap();
}
}
fix(UUIDs): Add `Display` and `PartialEq` implementations
///! Utilities for universally unique identifiers.
///!
///! The identifiers generated by this module:
///!
///! - are URL safe
///! - contain information on the type of object that they
///! are identifying
///! - have an extremely low probability of collision
///!
///! Generated identifiers have a fixed length of 32 characters made up
///! of two parts separated by a hyphen:
///!
///! - 2 characters in the range `[a-z]` that identifying the "family" of
///! identifiers, usually the type of object the identifier is for
///! e.g. `fi` = file, `re` = request
///!
///! - 20 characters in the range `[0-9A-Za-z]` that are randomly generated
///!
///! For project identifiers (those starting with 'pr') only lowercase
///! letters are used for compatibility with Docker image naming rules.
///!
///! The total size of the generated ids is 23 bytes which allows it to fit
///! inside a [`SmartString`](https://lib.rs/crates/smartstring) for better
///! performance that a plain old `String`.
///!
///! See
///! - https://segment.com/blog/a-brief-history-of-the-uuid/
///! - https://zelark.github.io/nano-id-cc/
///! - https://gist.github.com/fnky/76f533366f75cf75802c8052b577e2a5
use eyre::{bail, Result};
use nanoid::nanoid;
use regex::Regex;
use smartstring::{Compact, SmartString};
pub type Uuid = SmartString<Compact>;
/// The separator between the family and random parts of the identifier
///
/// A hyphen provides for better readability than a dot or colon when used
/// in pubsub topic strings and elsewhere.
const SEPARATOR: &str = "-";
/// The characters used in the random part of the identifier
const CHARACTERS: [char; 62] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B',
'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z',
];
/// Create a family of UUIDs
///
/// ```
/// use uuid_utils::uuid_family;
///
/// uuid_family!(MyId, "my");
/// let id = MyId::new();
/// ```
#[macro_export]
macro_rules! uuid_family {
($name:ident, $family:literal) => {
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
struct $name(uuid_utils::Uuid);
impl $name {
pub fn new() -> Self {
Self(uuid_utils::generate($family))
}
}
impl Default for $name {
fn default() -> Self {
Self::new()
}
}
impl std::fmt::Display for $name {
fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(formatter, "{}", self.0.to_string())
}
}
impl std::cmp::PartialEq for $name {
fn eq(&self, other: &$name) -> bool {
self.0 == other.0
}
}
impl std::ops::Deref for $name {
type Target = uuid_utils::Uuid;
fn deref(&self) -> &Self::Target {
&self.0
}
}
};
}
// Generate a universally unique identifier
pub fn generate(family: &str) -> Uuid {
let chars = nanoid!(20, &CHARACTERS);
[family, SEPARATOR, &chars].concat().into()
}
// Generate a universally unique identifier with only lowercase letters and digits
pub fn generate_lower(family: &str) -> Uuid {
let chars = nanoid!(20, &CHARACTERS[..36]);
[family, SEPARATOR, &chars].concat().into()
}
// Test whether a string is an identifer for a particular family
pub fn matches(family: &str, id: &str) -> bool {
let re = [family, SEPARATOR, "[0-9a-zA-Z]{20}"].concat();
let re = Regex::new(&re).expect("Should be a valid regex");
re.is_match(id)
}
// Assert that a `Uuid` is an identifer for a particular family
pub fn assert(family: &str, id: Uuid) -> Result<Uuid> {
match matches(family, &id) {
true => Ok(id),
false => bail!(
"Invalid UUID `{}`, family does not match `{}`",
family.to_string(),
id
),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn normal() {
let id = generate("no");
assert_eq!(id.len(), 23);
assert!(matches("no", &id));
assert("no", id).unwrap();
}
#[test]
fn lower() {
let id = generate_lower("pr");
assert_eq!(id.len(), 23);
assert!(matches("pr", &id));
assert("pr", id).unwrap();
}
}
|
/*!
The types module provides a way of associating globs on file names to file
types.
This can be used to match specific types of files. For example, among
the default file types provided, the Rust file type is defined to be `*.rs`
with name `rust`. Similarly, the C file type is defined to be `*.{c,h}` with
name `c`.
Note that the set of default types may change over time.
# Example
This shows how to create and use a simple file type matcher using the default
file types defined in this crate.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.select("rust");
let matcher = builder.build().unwrap();
assert!(matcher.matched("foo.rs", false).is_whitelist());
assert!(matcher.matched("foo.c", false).is_ignore());
```
# Example: negation
This is like the previous example, but shows how negating a file type works.
That is, this will let us match file paths that *don't* correspond to a
particular file type.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.negate("c");
let matcher = builder.build().unwrap();
assert!(matcher.matched("foo.rs", false).is_none());
assert!(matcher.matched("foo.c", false).is_ignore());
```
# Example: custom file type definitions
This shows how to extend this library default file type definitions with
your own.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.add("foo", "*.foo");
// Another way of adding a file type definition.
// This is useful when accepting input from an end user.
builder.add_def("bar:*.bar");
// Note: we only select `foo`, not `bar`.
builder.select("foo");
let matcher = builder.build().unwrap();
assert!(matcher.matched("x.foo", false).is_whitelist());
// This is ignored because we only selected the `foo` file type.
assert!(matcher.matched("x.bar", false).is_ignore());
```
*/
use std::cell::RefCell;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Arc;
use globset::{GlobBuilder, GlobSet, GlobSetBuilder};
use thread_local::ThreadLocal;
use pathutil::file_name;
use {Error, Match};
const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
("agda", &["*.agda", "*.lagda"]),
("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]),
("asm", &["*.asm", "*.s", "*.S"]),
("awk", &["*.awk"]),
("c", &["*.c", "*.h", "*.H"]),
("cbor", &["*.cbor"]),
("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
("cmake", &["*.cmake", "CMakeLists.txt"]),
("coffeescript", &["*.coffee"]),
("config", &["*.config"]),
("cpp", &[
"*.C", "*.cc", "*.cpp", "*.cxx",
"*.h", "*.H", "*.hh", "*.hpp",
]),
("csharp", &["*.cs"]),
("css", &["*.css"]),
("cython", &["*.pyx"]),
("dart", &["*.dart"]),
("d", &["*.d"]),
("elisp", &["*.el"]),
("erlang", &["*.erl", "*.hrl"]),
("fish", &["*.fish"]),
("fortran", &[
"*.f", "*.F", "*.f77", "*.F77", "*.pfo",
"*.f90", "*.F90", "*.f95", "*.F95",
]),
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
("go", &["*.go"]),
("groovy", &["*.groovy", "*.gradle"]),
("hbs", &["*.hbs"]),
("haskell", &["*.hs", "*.lhs"]),
("html", &["*.htm", "*.html"]),
("java", &["*.java"]),
("jinja", &["*.jinja", "*.jinja2"]),
("js", &[
"*.js", "*.jsx", "*.vue",
]),
("json", &["*.json"]),
("jsonl", &["*.jsonl"]),
("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
("lua", &["*.lua"]),
("m4", &["*.ac", "*.m4"]),
("make", &["gnumakefile", "Gnumakefile", "makefile", "Makefile", "*.mk"]),
("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
("matlab", &["*.m"]),
("mk", &["mkfile"]),
("ml", &["*.ml"]),
("nim", &["*.nim"]),
("objc", &["*.h", "*.m"]),
("objcpp", &["*.h", "*.mm"]),
("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]),
("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm"]),
("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]),
("py", &["*.py"]),
("readme", &["README*", "*README"]),
("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]),
("rst", &["*.rst"]),
("ruby", &["*.rb"]),
("rust", &["*.rs"]),
("scala", &["*.scala"]),
("sh", &["*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh"]),
("spark", &["*.spark"]),
("sql", &["*.sql"]),
("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
("swift", &["*.swift"]),
("taskpaper", &["*.taskpaper"]),
("tcl", &["*.tcl"]),
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib"]),
("ts", &["*.ts", "*.tsx"]),
("txt", &["*.txt"]),
("toml", &["*.toml", "Cargo.lock"]),
("vala", &["*.vala"]),
("vb", &["*.vb"]),
("vimscript", &["*.vim"]),
("xml", &["*.xml"]),
("yacc", &["*.y"]),
("yaml", &["*.yaml", "*.yml"]),
];
/// Glob represents a single glob in a set of file type definitions.
///
/// There may be more than one glob for a particular file type.
///
/// This is used to report information about the highest precedent glob
/// that matched.
///
/// Note that not all matches necessarily correspond to a specific glob.
/// For example, if there are one or more selections and a file path doesn't
/// match any of those selections, then the file path is considered to be
/// ignored.
///
/// The lifetime `'a` refers to the lifetime of the underlying file type
/// definition, which corresponds to the lifetime of the file type matcher.
#[derive(Clone, Debug)]
pub struct Glob<'a>(GlobInner<'a>);
#[derive(Clone, Debug)]
enum GlobInner<'a> {
/// No glob matched, but the file path should still be ignored.
UnmatchedIgnore,
/// A glob matched.
Matched {
/// The file type definition which provided the glob.
def: &'a FileTypeDef,
/// The index of the glob that matched inside the file type definition.
which: usize,
/// Whether the selection was negated or not.
negated: bool,
}
}
impl<'a> Glob<'a> {
fn unmatched() -> Glob<'a> {
Glob(GlobInner::UnmatchedIgnore)
}
}
/// A single file type definition.
///
/// File type definitions can be retrieved in aggregate from a file type
/// matcher. File type definitions are also reported when its responsible
/// for a match.
#[derive(Clone, Debug)]
pub struct FileTypeDef {
name: String,
globs: Vec<String>,
}
impl FileTypeDef {
/// Return the name of this file type.
pub fn name(&self) -> &str {
&self.name
}
/// Return the globs used to recognize this file type.
pub fn globs(&self) -> &[String] {
&self.globs
}
}
/// Types is a file type matcher.
#[derive(Clone, Debug)]
pub struct Types {
/// All of the file type definitions, sorted lexicographically by name.
defs: Vec<FileTypeDef>,
/// All of the selections made by the user.
selections: Vec<Selection<FileTypeDef>>,
/// Whether there is at least one Selection::Select in our selections.
/// When this is true, a Match::None is converted to Match::Ignore.
has_selected: bool,
/// A mapping from glob index in the set to two indices. The first is an
/// index into `selections` and the second is an index into the
/// corresponding file type definition's list of globs.
glob_to_selection: Vec<(usize, usize)>,
/// The set of all glob selections, used for actual matching.
set: GlobSet,
/// Temporary storage for globs that match.
matches: Arc<ThreadLocal<RefCell<Vec<usize>>>>,
}
/// Indicates the type of a selection for a particular file type.
#[derive(Clone, Debug)]
enum Selection<T> {
Select(String, T),
Negate(String, T),
}
impl<T> Selection<T> {
fn is_negated(&self) -> bool {
match *self {
Selection::Select(..) => false,
Selection::Negate(..) => true,
}
}
fn name(&self) -> &str {
match *self {
Selection::Select(ref name, _) => name,
Selection::Negate(ref name, _) => name,
}
}
fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Selection<U> {
match self {
Selection::Select(name, inner) => {
Selection::Select(name, f(inner))
}
Selection::Negate(name, inner) => {
Selection::Negate(name, f(inner))
}
}
}
fn inner(&self) -> &T {
match *self {
Selection::Select(_, ref inner) => inner,
Selection::Negate(_, ref inner) => inner,
}
}
}
impl Types {
/// Creates a new file type matcher that never matches any path and
/// contains no file type definitions.
pub fn empty() -> Types {
Types {
defs: vec![],
selections: vec![],
has_selected: false,
glob_to_selection: vec![],
set: GlobSetBuilder::new().build().unwrap(),
matches: Arc::new(ThreadLocal::default()),
}
}
/// Returns true if and only if this matcher has zero selections.
pub fn is_empty(&self) -> bool {
self.selections.is_empty()
}
/// Returns the number of selections used in this matcher.
pub fn len(&self) -> usize {
self.selections.len()
}
/// Return the set of current file type definitions.
///
/// Definitions and globs are sorted.
pub fn definitions(&self) -> &[FileTypeDef] {
&self.defs
}
/// Returns a match for the given path against this file type matcher.
///
/// The path is considered whitelisted if it matches a selected file type.
/// The path is considered ignored if it matches a negated file type.
/// If at least one file type is selected and `path` doesn't match, then
/// the path is also considered ignored.
pub fn matched<'a, P: AsRef<Path>>(
&'a self,
path: P,
is_dir: bool,
) -> Match<Glob<'a>> {
// File types don't apply to directories, and we can't do anything
// if our glob set is empty.
if is_dir || self.set.is_empty() {
return Match::None;
}
// We only want to match against the file name, so extract it.
// If one doesn't exist, then we can't match it.
let name = match file_name(path.as_ref()) {
Some(name) => name,
None if self.has_selected => {
return Match::Ignore(Glob::unmatched());
}
None => {
return Match::None;
}
};
let mut matches = self.matches.get_default().borrow_mut();
self.set.matches_into(name, &mut *matches);
// The highest precedent match is the last one.
if let Some(&i) = matches.last() {
let (isel, iglob) = self.glob_to_selection[i];
let sel = &self.selections[isel];
let glob = Glob(GlobInner::Matched {
def: sel.inner(),
which: iglob,
negated: sel.is_negated(),
});
return if sel.is_negated() {
Match::Ignore(glob)
} else {
Match::Whitelist(glob)
};
}
if self.has_selected {
Match::Ignore(Glob::unmatched())
} else {
Match::None
}
}
}
/// TypesBuilder builds a type matcher from a set of file type definitions and
/// a set of file type selections.
pub struct TypesBuilder {
types: HashMap<String, FileTypeDef>,
selections: Vec<Selection<()>>,
}
impl TypesBuilder {
/// Create a new builder for a file type matcher.
///
/// The builder contains *no* type definitions to start with. A set
/// of default type definitions can be added with `add_defaults`, and
/// additional type definitions can be added with `select` and `negate`.
pub fn new() -> TypesBuilder {
TypesBuilder {
types: HashMap::new(),
selections: vec![],
}
}
/// Build the current set of file type definitions *and* selections into
/// a file type matcher.
pub fn build(&self) -> Result<Types, Error> {
let defs = self.definitions();
let has_selected = self.selections.iter().any(|s| !s.is_negated());
let mut selections = vec![];
let mut glob_to_selection = vec![];
let mut build_set = GlobSetBuilder::new();
for (isel, selection) in self.selections.iter().enumerate() {
let def = match self.types.get(selection.name()) {
Some(def) => def.clone(),
None => {
let name = selection.name().to_string();
return Err(Error::UnrecognizedFileType(name));
}
};
for (iglob, glob) in def.globs.iter().enumerate() {
build_set.add(try!(
GlobBuilder::new(glob)
.literal_separator(true)
.build()
.map_err(|err| Error::Glob(err.to_string()))));
glob_to_selection.push((isel, iglob));
}
selections.push(selection.clone().map(move |_| def));
}
let set = try!(build_set.build().map_err(|err| {
Error::Glob(err.to_string())
}));
Ok(Types {
defs: defs,
selections: selections,
has_selected: has_selected,
glob_to_selection: glob_to_selection,
set: set,
matches: Arc::new(ThreadLocal::default()),
})
}
/// Return the set of current file type definitions.
///
/// Definitions and globs are sorted.
pub fn definitions(&self) -> Vec<FileTypeDef> {
let mut defs = vec![];
for def in self.types.values() {
let mut def = def.clone();
def.globs.sort();
defs.push(def);
}
defs.sort_by(|def1, def2| def1.name().cmp(def2.name()));
defs
}
/// Select the file type given by `name`.
///
/// If `name` is `all`, then all file types currently defined are selected.
pub fn select(&mut self, name: &str) -> &mut TypesBuilder {
if name == "all" {
for name in self.types.keys() {
self.selections.push(Selection::Select(name.to_string(), ()));
}
} else {
self.selections.push(Selection::Select(name.to_string(), ()));
}
self
}
/// Ignore the file type given by `name`.
///
/// If `name` is `all`, then all file types currently defined are negated.
pub fn negate(&mut self, name: &str) -> &mut TypesBuilder {
if name == "all" {
for name in self.types.keys() {
self.selections.push(Selection::Negate(name.to_string(), ()));
}
} else {
self.selections.push(Selection::Negate(name.to_string(), ()));
}
self
}
/// Clear any file type definitions for the type name given.
pub fn clear(&mut self, name: &str) -> &mut TypesBuilder {
self.types.remove(name);
self
}
/// Add a new file type definition. `name` can be arbitrary and `pat`
/// should be a glob recognizing file paths belonging to the `name` type.
///
/// If `name` is `all` or otherwise contains a `:`, then an error is
/// returned.
pub fn add(&mut self, name: &str, glob: &str) -> Result<(), Error> {
if name == "all" || name.contains(':') {
return Err(Error::InvalidDefinition);
}
let (key, glob) = (name.to_string(), glob.to_string());
self.types.entry(key).or_insert_with(|| {
FileTypeDef { name: name.to_string(), globs: vec![] }
}).globs.push(glob);
Ok(())
}
/// Add a new file type definition specified in string form. The format
/// is `name:glob`. Names may not include a colon.
pub fn add_def(&mut self, def: &str) -> Result<(), Error> {
let name: String = def.chars().take_while(|&c| c != ':').collect();
let pat: String = def.chars().skip(name.chars().count() + 1).collect();
if name.is_empty() || pat.is_empty() {
return Err(Error::InvalidDefinition);
}
self.add(&name, &pat)
}
/// Add a set of default file type definitions.
pub fn add_defaults(&mut self) -> &mut TypesBuilder {
static MSG: &'static str = "adding a default type should never fail";
for &(name, exts) in DEFAULT_TYPES {
for ext in exts {
self.add(name, ext).expect(MSG);
}
}
self
}
}
#[cfg(test)]
mod tests {
use super::TypesBuilder;
macro_rules! matched {
($name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr) => {
matched!($name, $types, $sel, $selnot, $path, true);
};
(not, $name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr) => {
matched!($name, $types, $sel, $selnot, $path, false);
};
($name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr, $matched:expr) => {
#[test]
fn $name() {
let mut btypes = TypesBuilder::new();
for tydef in $types {
btypes.add_def(tydef).unwrap();
}
for sel in $sel {
btypes.select(sel);
}
for selnot in $selnot {
btypes.negate(selnot);
}
let types = btypes.build().unwrap();
let mat = types.matched($path, false);
assert_eq!($matched, !mat.is_ignore());
}
};
}
fn types() -> Vec<&'static str> {
vec![
"html:*.html",
"html:*.htm",
"rust:*.rs",
"js:*.js",
"foo:*.{rs,foo}",
]
}
matched!(match1, types(), vec!["rust"], vec![], "lib.rs");
matched!(match2, types(), vec!["html"], vec![], "index.html");
matched!(match3, types(), vec!["html"], vec![], "index.htm");
matched!(match4, types(), vec!["html", "rust"], vec![], "main.rs");
matched!(match5, types(), vec![], vec![], "index.html");
matched!(match6, types(), vec![], vec!["rust"], "index.html");
matched!(match7, types(), vec!["foo"], vec!["rust"], "main.foo");
matched!(not, matchnot1, types(), vec!["rust"], vec![], "index.html");
matched!(not, matchnot2, types(), vec![], vec!["rust"], "main.rs");
matched!(not, matchnot3, types(), vec!["foo"], vec!["rust"], "main.rs");
matched!(not, matchnot4, types(), vec!["rust"], vec!["foo"], "main.rs");
matched!(not, matchnot5, types(), vec!["rust"], vec!["foo"], "main.foo");
}
Add textile filetype
/*!
The types module provides a way of associating globs on file names to file
types.
This can be used to match specific types of files. For example, among
the default file types provided, the Rust file type is defined to be `*.rs`
with name `rust`. Similarly, the C file type is defined to be `*.{c,h}` with
name `c`.
Note that the set of default types may change over time.
# Example
This shows how to create and use a simple file type matcher using the default
file types defined in this crate.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.select("rust");
let matcher = builder.build().unwrap();
assert!(matcher.matched("foo.rs", false).is_whitelist());
assert!(matcher.matched("foo.c", false).is_ignore());
```
# Example: negation
This is like the previous example, but shows how negating a file type works.
That is, this will let us match file paths that *don't* correspond to a
particular file type.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.negate("c");
let matcher = builder.build().unwrap();
assert!(matcher.matched("foo.rs", false).is_none());
assert!(matcher.matched("foo.c", false).is_ignore());
```
# Example: custom file type definitions
This shows how to extend this library default file type definitions with
your own.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.add("foo", "*.foo");
// Another way of adding a file type definition.
// This is useful when accepting input from an end user.
builder.add_def("bar:*.bar");
// Note: we only select `foo`, not `bar`.
builder.select("foo");
let matcher = builder.build().unwrap();
assert!(matcher.matched("x.foo", false).is_whitelist());
// This is ignored because we only selected the `foo` file type.
assert!(matcher.matched("x.bar", false).is_ignore());
```
*/
use std::cell::RefCell;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Arc;
use globset::{GlobBuilder, GlobSet, GlobSetBuilder};
use thread_local::ThreadLocal;
use pathutil::file_name;
use {Error, Match};
const DEFAULT_TYPES: &'static [(&'static str, &'static [&'static str])] = &[
("agda", &["*.agda", "*.lagda"]),
("asciidoc", &["*.adoc", "*.asc", "*.asciidoc"]),
("asm", &["*.asm", "*.s", "*.S"]),
("awk", &["*.awk"]),
("c", &["*.c", "*.h", "*.H"]),
("cbor", &["*.cbor"]),
("clojure", &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
("cmake", &["*.cmake", "CMakeLists.txt"]),
("coffeescript", &["*.coffee"]),
("config", &["*.config"]),
("cpp", &[
"*.C", "*.cc", "*.cpp", "*.cxx",
"*.h", "*.H", "*.hh", "*.hpp",
]),
("csharp", &["*.cs"]),
("css", &["*.css"]),
("cython", &["*.pyx"]),
("dart", &["*.dart"]),
("d", &["*.d"]),
("elisp", &["*.el"]),
("erlang", &["*.erl", "*.hrl"]),
("fish", &["*.fish"]),
("fortran", &[
"*.f", "*.F", "*.f77", "*.F77", "*.pfo",
"*.f90", "*.F90", "*.f95", "*.F95",
]),
("fsharp", &["*.fs", "*.fsx", "*.fsi"]),
("go", &["*.go"]),
("groovy", &["*.groovy", "*.gradle"]),
("hbs", &["*.hbs"]),
("haskell", &["*.hs", "*.lhs"]),
("html", &["*.htm", "*.html"]),
("java", &["*.java"]),
("jinja", &["*.jinja", "*.jinja2"]),
("js", &[
"*.js", "*.jsx", "*.vue",
]),
("json", &["*.json"]),
("jsonl", &["*.jsonl"]),
("lisp", &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
("lua", &["*.lua"]),
("m4", &["*.ac", "*.m4"]),
("make", &["gnumakefile", "Gnumakefile", "makefile", "Makefile", "*.mk"]),
("markdown", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
("md", &["*.markdown", "*.md", "*.mdown", "*.mkdn"]),
("matlab", &["*.m"]),
("mk", &["mkfile"]),
("ml", &["*.ml"]),
("nim", &["*.nim"]),
("objc", &["*.h", "*.m"]),
("objcpp", &["*.h", "*.mm"]),
("ocaml", &["*.ml", "*.mli", "*.mll", "*.mly"]),
("perl", &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm"]),
("php", &["*.php", "*.php3", "*.php4", "*.php5", "*.phtml"]),
("py", &["*.py"]),
("readme", &["README*", "*README"]),
("r", &["*.R", "*.r", "*.Rmd", "*.Rnw"]),
("rst", &["*.rst"]),
("ruby", &["*.rb"]),
("rust", &["*.rs"]),
("scala", &["*.scala"]),
("sh", &["*.bash", "*.csh", "*.ksh", "*.sh", "*.tcsh"]),
("spark", &["*.spark"]),
("sql", &["*.sql"]),
("sv", &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
("swift", &["*.swift"]),
("taskpaper", &["*.taskpaper"]),
("tcl", &["*.tcl"]),
("tex", &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib"]),
("textile", &["*.textile"]),
("ts", &["*.ts", "*.tsx"]),
("txt", &["*.txt"]),
("toml", &["*.toml", "Cargo.lock"]),
("vala", &["*.vala"]),
("vb", &["*.vb"]),
("vimscript", &["*.vim"]),
("xml", &["*.xml"]),
("yacc", &["*.y"]),
("yaml", &["*.yaml", "*.yml"]),
];
/// Glob represents a single glob in a set of file type definitions.
///
/// There may be more than one glob for a particular file type.
///
/// This is used to report information about the highest precedent glob
/// that matched.
///
/// Note that not all matches necessarily correspond to a specific glob.
/// For example, if there are one or more selections and a file path doesn't
/// match any of those selections, then the file path is considered to be
/// ignored.
///
/// The lifetime `'a` refers to the lifetime of the underlying file type
/// definition, which corresponds to the lifetime of the file type matcher.
#[derive(Clone, Debug)]
pub struct Glob<'a>(GlobInner<'a>);
#[derive(Clone, Debug)]
enum GlobInner<'a> {
/// No glob matched, but the file path should still be ignored.
UnmatchedIgnore,
/// A glob matched.
Matched {
/// The file type definition which provided the glob.
def: &'a FileTypeDef,
/// The index of the glob that matched inside the file type definition.
which: usize,
/// Whether the selection was negated or not.
negated: bool,
}
}
impl<'a> Glob<'a> {
fn unmatched() -> Glob<'a> {
Glob(GlobInner::UnmatchedIgnore)
}
}
/// A single file type definition.
///
/// File type definitions can be retrieved in aggregate from a file type
/// matcher. File type definitions are also reported when its responsible
/// for a match.
#[derive(Clone, Debug)]
pub struct FileTypeDef {
name: String,
globs: Vec<String>,
}
impl FileTypeDef {
/// Return the name of this file type.
pub fn name(&self) -> &str {
&self.name
}
/// Return the globs used to recognize this file type.
pub fn globs(&self) -> &[String] {
&self.globs
}
}
/// Types is a file type matcher.
#[derive(Clone, Debug)]
pub struct Types {
/// All of the file type definitions, sorted lexicographically by name.
defs: Vec<FileTypeDef>,
/// All of the selections made by the user.
selections: Vec<Selection<FileTypeDef>>,
/// Whether there is at least one Selection::Select in our selections.
/// When this is true, a Match::None is converted to Match::Ignore.
has_selected: bool,
/// A mapping from glob index in the set to two indices. The first is an
/// index into `selections` and the second is an index into the
/// corresponding file type definition's list of globs.
glob_to_selection: Vec<(usize, usize)>,
/// The set of all glob selections, used for actual matching.
set: GlobSet,
/// Temporary storage for globs that match.
matches: Arc<ThreadLocal<RefCell<Vec<usize>>>>,
}
/// Indicates the type of a selection for a particular file type.
#[derive(Clone, Debug)]
enum Selection<T> {
Select(String, T),
Negate(String, T),
}
impl<T> Selection<T> {
fn is_negated(&self) -> bool {
match *self {
Selection::Select(..) => false,
Selection::Negate(..) => true,
}
}
fn name(&self) -> &str {
match *self {
Selection::Select(ref name, _) => name,
Selection::Negate(ref name, _) => name,
}
}
fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Selection<U> {
match self {
Selection::Select(name, inner) => {
Selection::Select(name, f(inner))
}
Selection::Negate(name, inner) => {
Selection::Negate(name, f(inner))
}
}
}
fn inner(&self) -> &T {
match *self {
Selection::Select(_, ref inner) => inner,
Selection::Negate(_, ref inner) => inner,
}
}
}
impl Types {
/// Creates a new file type matcher that never matches any path and
/// contains no file type definitions.
pub fn empty() -> Types {
Types {
defs: vec![],
selections: vec![],
has_selected: false,
glob_to_selection: vec![],
set: GlobSetBuilder::new().build().unwrap(),
matches: Arc::new(ThreadLocal::default()),
}
}
/// Returns true if and only if this matcher has zero selections.
pub fn is_empty(&self) -> bool {
self.selections.is_empty()
}
/// Returns the number of selections used in this matcher.
pub fn len(&self) -> usize {
self.selections.len()
}
/// Return the set of current file type definitions.
///
/// Definitions and globs are sorted.
pub fn definitions(&self) -> &[FileTypeDef] {
&self.defs
}
/// Returns a match for the given path against this file type matcher.
///
/// The path is considered whitelisted if it matches a selected file type.
/// The path is considered ignored if it matches a negated file type.
/// If at least one file type is selected and `path` doesn't match, then
/// the path is also considered ignored.
pub fn matched<'a, P: AsRef<Path>>(
&'a self,
path: P,
is_dir: bool,
) -> Match<Glob<'a>> {
// File types don't apply to directories, and we can't do anything
// if our glob set is empty.
if is_dir || self.set.is_empty() {
return Match::None;
}
// We only want to match against the file name, so extract it.
// If one doesn't exist, then we can't match it.
let name = match file_name(path.as_ref()) {
Some(name) => name,
None if self.has_selected => {
return Match::Ignore(Glob::unmatched());
}
None => {
return Match::None;
}
};
let mut matches = self.matches.get_default().borrow_mut();
self.set.matches_into(name, &mut *matches);
// The highest precedent match is the last one.
if let Some(&i) = matches.last() {
let (isel, iglob) = self.glob_to_selection[i];
let sel = &self.selections[isel];
let glob = Glob(GlobInner::Matched {
def: sel.inner(),
which: iglob,
negated: sel.is_negated(),
});
return if sel.is_negated() {
Match::Ignore(glob)
} else {
Match::Whitelist(glob)
};
}
if self.has_selected {
Match::Ignore(Glob::unmatched())
} else {
Match::None
}
}
}
/// TypesBuilder builds a type matcher from a set of file type definitions and
/// a set of file type selections.
pub struct TypesBuilder {
types: HashMap<String, FileTypeDef>,
selections: Vec<Selection<()>>,
}
impl TypesBuilder {
/// Create a new builder for a file type matcher.
///
/// The builder contains *no* type definitions to start with. A set
/// of default type definitions can be added with `add_defaults`, and
/// additional type definitions can be added with `select` and `negate`.
pub fn new() -> TypesBuilder {
TypesBuilder {
types: HashMap::new(),
selections: vec![],
}
}
/// Build the current set of file type definitions *and* selections into
/// a file type matcher.
pub fn build(&self) -> Result<Types, Error> {
let defs = self.definitions();
let has_selected = self.selections.iter().any(|s| !s.is_negated());
let mut selections = vec![];
let mut glob_to_selection = vec![];
let mut build_set = GlobSetBuilder::new();
for (isel, selection) in self.selections.iter().enumerate() {
let def = match self.types.get(selection.name()) {
Some(def) => def.clone(),
None => {
let name = selection.name().to_string();
return Err(Error::UnrecognizedFileType(name));
}
};
for (iglob, glob) in def.globs.iter().enumerate() {
build_set.add(try!(
GlobBuilder::new(glob)
.literal_separator(true)
.build()
.map_err(|err| Error::Glob(err.to_string()))));
glob_to_selection.push((isel, iglob));
}
selections.push(selection.clone().map(move |_| def));
}
let set = try!(build_set.build().map_err(|err| {
Error::Glob(err.to_string())
}));
Ok(Types {
defs: defs,
selections: selections,
has_selected: has_selected,
glob_to_selection: glob_to_selection,
set: set,
matches: Arc::new(ThreadLocal::default()),
})
}
/// Return the set of current file type definitions.
///
/// Definitions and globs are sorted.
pub fn definitions(&self) -> Vec<FileTypeDef> {
let mut defs = vec![];
for def in self.types.values() {
let mut def = def.clone();
def.globs.sort();
defs.push(def);
}
defs.sort_by(|def1, def2| def1.name().cmp(def2.name()));
defs
}
/// Select the file type given by `name`.
///
/// If `name` is `all`, then all file types currently defined are selected.
pub fn select(&mut self, name: &str) -> &mut TypesBuilder {
if name == "all" {
for name in self.types.keys() {
self.selections.push(Selection::Select(name.to_string(), ()));
}
} else {
self.selections.push(Selection::Select(name.to_string(), ()));
}
self
}
/// Ignore the file type given by `name`.
///
/// If `name` is `all`, then all file types currently defined are negated.
pub fn negate(&mut self, name: &str) -> &mut TypesBuilder {
if name == "all" {
for name in self.types.keys() {
self.selections.push(Selection::Negate(name.to_string(), ()));
}
} else {
self.selections.push(Selection::Negate(name.to_string(), ()));
}
self
}
/// Clear any file type definitions for the type name given.
pub fn clear(&mut self, name: &str) -> &mut TypesBuilder {
self.types.remove(name);
self
}
/// Add a new file type definition. `name` can be arbitrary and `pat`
/// should be a glob recognizing file paths belonging to the `name` type.
///
/// If `name` is `all` or otherwise contains a `:`, then an error is
/// returned.
pub fn add(&mut self, name: &str, glob: &str) -> Result<(), Error> {
if name == "all" || name.contains(':') {
return Err(Error::InvalidDefinition);
}
let (key, glob) = (name.to_string(), glob.to_string());
self.types.entry(key).or_insert_with(|| {
FileTypeDef { name: name.to_string(), globs: vec![] }
}).globs.push(glob);
Ok(())
}
/// Add a new file type definition specified in string form. The format
/// is `name:glob`. Names may not include a colon.
pub fn add_def(&mut self, def: &str) -> Result<(), Error> {
let name: String = def.chars().take_while(|&c| c != ':').collect();
let pat: String = def.chars().skip(name.chars().count() + 1).collect();
if name.is_empty() || pat.is_empty() {
return Err(Error::InvalidDefinition);
}
self.add(&name, &pat)
}
/// Add a set of default file type definitions.
pub fn add_defaults(&mut self) -> &mut TypesBuilder {
static MSG: &'static str = "adding a default type should never fail";
for &(name, exts) in DEFAULT_TYPES {
for ext in exts {
self.add(name, ext).expect(MSG);
}
}
self
}
}
#[cfg(test)]
mod tests {
use super::TypesBuilder;
macro_rules! matched {
($name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr) => {
matched!($name, $types, $sel, $selnot, $path, true);
};
(not, $name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr) => {
matched!($name, $types, $sel, $selnot, $path, false);
};
($name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr, $matched:expr) => {
#[test]
fn $name() {
let mut btypes = TypesBuilder::new();
for tydef in $types {
btypes.add_def(tydef).unwrap();
}
for sel in $sel {
btypes.select(sel);
}
for selnot in $selnot {
btypes.negate(selnot);
}
let types = btypes.build().unwrap();
let mat = types.matched($path, false);
assert_eq!($matched, !mat.is_ignore());
}
};
}
fn types() -> Vec<&'static str> {
vec![
"html:*.html",
"html:*.htm",
"rust:*.rs",
"js:*.js",
"foo:*.{rs,foo}",
]
}
matched!(match1, types(), vec!["rust"], vec![], "lib.rs");
matched!(match2, types(), vec!["html"], vec![], "index.html");
matched!(match3, types(), vec!["html"], vec![], "index.htm");
matched!(match4, types(), vec!["html", "rust"], vec![], "main.rs");
matched!(match5, types(), vec![], vec![], "index.html");
matched!(match6, types(), vec![], vec!["rust"], "index.html");
matched!(match7, types(), vec!["foo"], vec!["rust"], "main.foo");
matched!(not, matchnot1, types(), vec!["rust"], vec![], "index.html");
matched!(not, matchnot2, types(), vec![], vec!["rust"], "main.rs");
matched!(not, matchnot3, types(), vec!["foo"], vec!["rust"], "main.rs");
matched!(not, matchnot4, types(), vec!["rust"], vec!["foo"], "main.rs");
matched!(not, matchnot5, types(), vec!["rust"], vec!["foo"], "main.foo");
}
|
use std::borrow::Cow;
use std::ops::Range;
use crate::utils::{snippet_with_applicability, span_lint, span_lint_and_sugg, span_lint_and_then};
use rustc_ast::ast::{Expr, ExprKind, Item, ItemKind, MacCall, StrLit, StrStyle};
use rustc_ast::token;
use rustc_ast::tokenstream::TokenStream;
use rustc_errors::Applicability;
use rustc_lexer::unescape::{self, EscapeError};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_parse::parser;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::{sym, BytePos, Span, Symbol};
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `println!("")` to
/// print a newline.
///
/// **Why is this bad?** You should use `println!()`, which is simpler.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// // Bad
/// println!("");
///
/// // Good
/// println!();
/// ```
pub PRINTLN_EMPTY_STRING,
style,
"using `println!(\"\")` with an empty string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `print!()` with a format
/// string that ends in a newline.
///
/// **Why is this bad?** You should use `println!()` instead, which appends the
/// newline.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # let name = "World";
/// print!("Hello {}!\n", name);
/// ```
/// use println!() instead
/// ```rust
/// # let name = "World";
/// println!("Hello {}!", name);
/// ```
pub PRINT_WITH_NEWLINE,
style,
"using `print!()` with a format string that ends in a single newline"
}
declare_clippy_lint! {
/// **What it does:** Checks for printing on *stdout*. The purpose of this lint
/// is to catch debugging remnants.
///
/// **Why is this bad?** People often print on *stdout* while debugging an
/// application and might forget to remove those prints afterward.
///
/// **Known problems:** Only catches `print!` and `println!` calls.
///
/// **Example:**
/// ```rust
/// println!("Hello world!");
/// ```
pub PRINT_STDOUT,
restriction,
"printing on stdout"
}
declare_clippy_lint! {
/// **What it does:** Checks for use of `Debug` formatting. The purpose of this
/// lint is to catch debugging remnants.
///
/// **Why is this bad?** The purpose of the `Debug` trait is to facilitate
/// debugging Rust code. It should not be used in user-facing output.
///
/// **Example:**
/// ```rust
/// # let foo = "bar";
/// println!("{:?}", foo);
/// ```
pub USE_DEBUG,
restriction,
"use of `Debug`-based formatting"
}
declare_clippy_lint! {
/// **What it does:** This lint warns about the use of literals as `print!`/`println!` args.
///
/// **Why is this bad?** Using literals as `println!` args is inefficient
/// (c.f., https://github.com/matthiaskrgr/rust-str-bench) and unnecessary
/// (i.e., just put the literal in the format string)
///
/// **Known problems:** Will also warn with macro calls as arguments that expand to literals
/// -- e.g., `println!("{}", env!("FOO"))`.
///
/// **Example:**
/// ```rust
/// println!("{}", "foo");
/// ```
/// use the literal without formatting:
/// ```rust
/// println!("foo");
/// ```
pub PRINT_LITERAL,
style,
"printing a literal with a format string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `writeln!(buf, "")` to
/// print a newline.
///
/// **Why is this bad?** You should use `writeln!(buf)`, which is simpler.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
///
/// // Bad
/// writeln!(buf, "");
///
/// // Good
/// writeln!(buf);
/// ```
pub WRITELN_EMPTY_STRING,
style,
"using `writeln!(buf, \"\")` with an empty string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `write!()` with a format
/// string that
/// ends in a newline.
///
/// **Why is this bad?** You should use `writeln!()` instead, which appends the
/// newline.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
/// # let name = "World";
///
/// // Bad
/// write!(buf, "Hello {}!\n", name);
///
/// // Good
/// writeln!(buf, "Hello {}!", name);
/// ```
pub WRITE_WITH_NEWLINE,
style,
"using `write!()` with a format string that ends in a single newline"
}
declare_clippy_lint! {
/// **What it does:** This lint warns about the use of literals as `write!`/`writeln!` args.
///
/// **Why is this bad?** Using literals as `writeln!` args is inefficient
/// (c.f., https://github.com/matthiaskrgr/rust-str-bench) and unnecessary
/// (i.e., just put the literal in the format string)
///
/// **Known problems:** Will also warn with macro calls as arguments that expand to literals
/// -- e.g., `writeln!(buf, "{}", env!("FOO"))`.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
///
/// // Bad
/// writeln!(buf, "{}", "foo");
///
/// // Good
/// writeln!(buf, "foo");
/// ```
pub WRITE_LITERAL,
style,
"writing a literal with a format string"
}
#[derive(Default)]
pub struct Write {
in_debug_impl: bool,
}
impl_lint_pass!(Write => [
PRINT_WITH_NEWLINE,
PRINTLN_EMPTY_STRING,
PRINT_STDOUT,
USE_DEBUG,
PRINT_LITERAL,
WRITE_WITH_NEWLINE,
WRITELN_EMPTY_STRING,
WRITE_LITERAL
]);
impl EarlyLintPass for Write {
fn check_item(&mut self, _: &EarlyContext<'_>, item: &Item) {
if let ItemKind::Impl {
of_trait: Some(trait_ref),
..
} = &item.kind
{
let trait_name = trait_ref
.path
.segments
.iter()
.last()
.expect("path has at least one segment")
.ident
.name;
if trait_name == sym::Debug {
self.in_debug_impl = true;
}
}
}
fn check_item_post(&mut self, _: &EarlyContext<'_>, _: &Item) {
self.in_debug_impl = false;
}
fn check_mac(&mut self, cx: &EarlyContext<'_>, mac: &MacCall) {
fn is_build_script(cx: &EarlyContext<'_>) -> bool {
// Cargo sets the crate name for build scripts to `build_script_build`
cx.sess
.opts
.crate_name
.as_ref()
.map_or(false, |crate_name| crate_name == "build_script_build")
}
if mac.path == sym!(println) {
if !is_build_script(cx) {
span_lint(cx, PRINT_STDOUT, mac.span(), "use of `println!`");
}
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), false) {
if fmt_str.symbol == Symbol::intern("") {
span_lint_and_sugg(
cx,
PRINTLN_EMPTY_STRING,
mac.span(),
"using `println!(\"\")`",
"replace it with",
"println!()".to_string(),
Applicability::MachineApplicable,
);
}
}
} else if mac.path == sym!(print) {
if !is_build_script(cx) {
span_lint(cx, PRINT_STDOUT, mac.span(), "use of `print!`");
}
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), false) {
if check_newlines(&fmt_str) {
span_lint_and_then(
cx,
PRINT_WITH_NEWLINE,
mac.span(),
"using `print!()` with a format string that ends in a single newline",
|err| {
err.multipart_suggestion(
"use `println!` instead",
vec![
(mac.path.span, String::from("println")),
(newline_span(&fmt_str), String::new()),
],
Applicability::MachineApplicable,
);
},
);
}
}
} else if mac.path == sym!(write) {
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), true) {
if check_newlines(&fmt_str) {
span_lint_and_then(
cx,
WRITE_WITH_NEWLINE,
mac.span(),
"using `write!()` with a format string that ends in a single newline",
|err| {
err.multipart_suggestion(
"use `writeln!()` instead",
vec![
(mac.path.span, String::from("writeln")),
(newline_span(&fmt_str), String::new()),
],
Applicability::MachineApplicable,
);
},
)
}
}
} else if mac.path == sym!(writeln) {
if let (Some(fmt_str), expr) = self.check_tts(cx, mac.args.inner_tokens(), true) {
if fmt_str.symbol == Symbol::intern("") {
let mut applicability = Applicability::MachineApplicable;
// FIXME: remove this `#[allow(...)]` once the issue #5822 gets fixed
#[allow(clippy::option_if_let_else)]
let suggestion = if let Some(e) = expr {
snippet_with_applicability(cx, e.span, "v", &mut applicability)
} else {
applicability = Applicability::HasPlaceholders;
Cow::Borrowed("v")
};
span_lint_and_sugg(
cx,
WRITELN_EMPTY_STRING,
mac.span(),
format!("using `writeln!({}, \"\")`", suggestion).as_str(),
"replace it with",
format!("writeln!({})", suggestion),
applicability,
);
}
}
}
}
}
/// Given a format string that ends in a newline and its span, calculates the span of the
/// newline, or the format string itself if the format string consists solely of a newline.
fn newline_span(fmtstr: &StrLit) -> Span {
let sp = fmtstr.span;
let contents = &fmtstr.symbol.as_str();
if *contents == r"\n" {
return sp;
}
let newline_sp_hi = sp.hi()
- match fmtstr.style {
StrStyle::Cooked => BytePos(1),
StrStyle::Raw(hashes) => BytePos((1 + hashes).into()),
};
let newline_sp_len = if contents.ends_with('\n') {
BytePos(1)
} else if contents.ends_with(r"\n") {
BytePos(2)
} else {
panic!("expected format string to contain a newline");
};
sp.with_lo(newline_sp_hi - newline_sp_len).with_hi(newline_sp_hi)
}
impl Write {
/// Checks the arguments of `print[ln]!` and `write[ln]!` calls. It will return a tuple of two
/// `Option`s. The first `Option` of the tuple is the macro's format string. It includes
/// the contents of the string, whether it's a raw string, and the span of the literal in the
/// source. The second `Option` in the tuple is, in the `write[ln]!` case, the expression the
/// `format_str` should be written to.
///
/// Example:
///
/// Calling this function on
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
/// # let something = "something";
/// writeln!(buf, "string to write: {}", something);
/// ```
/// will return
/// ```rust,ignore
/// (Some("string to write: {}"), Some(buf))
/// ```
#[allow(clippy::too_many_lines)]
fn check_tts<'a>(&self, cx: &EarlyContext<'a>, tts: TokenStream, is_write: bool) -> (Option<StrLit>, Option<Expr>) {
use rustc_parse_format::{
AlignUnknown, ArgumentImplicitlyIs, ArgumentIs, ArgumentNamed, CountImplied, FormatSpec, ParseMode, Parser,
Piece,
};
let mut parser = parser::Parser::new(&cx.sess.parse_sess, tts, false, None);
let mut expr: Option<Expr> = None;
if is_write {
expr = match parser.parse_expr().map_err(|mut err| err.cancel()) {
Ok(p) => Some(p.into_inner()),
Err(_) => return (None, None),
};
// might be `writeln!(foo)`
if parser.expect(&token::Comma).map_err(|mut err| err.cancel()).is_err() {
return (None, expr);
}
}
let fmtstr = match parser.parse_str_lit() {
Ok(fmtstr) => fmtstr,
Err(_) => return (None, expr),
};
let tmp = fmtstr.symbol.as_str();
let mut args = vec![];
let mut fmt_parser = Parser::new(&tmp, None, None, false, ParseMode::Format);
while let Some(piece) = fmt_parser.next() {
if !fmt_parser.errors.is_empty() {
return (None, expr);
}
if let Piece::NextArgument(arg) = piece {
if !self.in_debug_impl && arg.format.ty == "?" {
// FIXME: modify rustc's fmt string parser to give us the current span
span_lint(cx, USE_DEBUG, parser.prev_token.span, "use of `Debug`-based formatting");
}
args.push(arg);
}
}
let lint = if is_write { WRITE_LITERAL } else { PRINT_LITERAL };
let mut idx = 0;
loop {
const SIMPLE: FormatSpec<'_> = FormatSpec {
fill: None,
align: AlignUnknown,
flags: 0,
precision: CountImplied,
precision_span: None,
width: CountImplied,
width_span: None,
ty: "",
ty_span: None,
};
if !parser.eat(&token::Comma) {
return (Some(fmtstr), expr);
}
let token_expr = if let Ok(expr) = parser.parse_expr().map_err(|mut err| err.cancel()) {
expr
} else {
return (Some(fmtstr), None);
};
match &token_expr.kind {
ExprKind::Lit(_) => {
let mut all_simple = true;
let mut seen = false;
for arg in &args {
match arg.position {
ArgumentImplicitlyIs(n) | ArgumentIs(n) => {
if n == idx {
all_simple &= arg.format == SIMPLE;
seen = true;
}
},
ArgumentNamed(_) => {},
}
}
if all_simple && seen {
span_lint(cx, lint, token_expr.span, "literal with an empty format string");
}
idx += 1;
},
ExprKind::Assign(lhs, rhs, _) => {
if let ExprKind::Lit(_) = rhs.kind {
if let ExprKind::Path(_, p) = &lhs.kind {
let mut all_simple = true;
let mut seen = false;
for arg in &args {
match arg.position {
ArgumentImplicitlyIs(_) | ArgumentIs(_) => {},
ArgumentNamed(name) => {
if *p == name {
seen = true;
all_simple &= arg.format == SIMPLE;
}
},
}
}
if all_simple && seen {
span_lint(cx, lint, rhs.span, "literal with an empty format string");
}
}
}
},
_ => idx += 1,
}
}
}
}
/// Checks if the format string contains a single newline that terminates it.
///
/// Literal and escaped newlines are both checked (only literal for raw strings).
fn check_newlines(fmtstr: &StrLit) -> bool {
let mut has_internal_newline = false;
let mut last_was_cr = false;
let mut should_lint = false;
let contents = &fmtstr.symbol.as_str();
let mut cb = |r: Range<usize>, c: Result<char, EscapeError>| {
let c = c.unwrap();
if r.end == contents.len() && c == '\n' && !last_was_cr && !has_internal_newline {
should_lint = true;
} else {
last_was_cr = c == '\r';
if c == '\n' {
has_internal_newline = true;
}
}
};
match fmtstr.style {
StrStyle::Cooked => unescape::unescape_literal(contents, unescape::Mode::Str, &mut cb),
StrStyle::Raw(_) => unescape::unescape_literal(contents, unescape::Mode::RawStr, &mut cb),
}
should_lint
}
Fix false positive in write_literal and print_literal due to numeric literals
use std::borrow::Cow;
use std::ops::Range;
use crate::utils::{snippet_with_applicability, span_lint, span_lint_and_sugg, span_lint_and_then};
use if_chain::if_chain;
use rustc_ast::ast::{Expr, ExprKind, Item, ItemKind, LitKind, MacCall, StrLit, StrStyle};
use rustc_ast::token;
use rustc_ast::tokenstream::TokenStream;
use rustc_errors::Applicability;
use rustc_lexer::unescape::{self, EscapeError};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_parse::parser;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::{sym, BytePos, Span, Symbol};
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `println!("")` to
/// print a newline.
///
/// **Why is this bad?** You should use `println!()`, which is simpler.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// // Bad
/// println!("");
///
/// // Good
/// println!();
/// ```
pub PRINTLN_EMPTY_STRING,
style,
"using `println!(\"\")` with an empty string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `print!()` with a format
/// string that ends in a newline.
///
/// **Why is this bad?** You should use `println!()` instead, which appends the
/// newline.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # let name = "World";
/// print!("Hello {}!\n", name);
/// ```
/// use println!() instead
/// ```rust
/// # let name = "World";
/// println!("Hello {}!", name);
/// ```
pub PRINT_WITH_NEWLINE,
style,
"using `print!()` with a format string that ends in a single newline"
}
declare_clippy_lint! {
/// **What it does:** Checks for printing on *stdout*. The purpose of this lint
/// is to catch debugging remnants.
///
/// **Why is this bad?** People often print on *stdout* while debugging an
/// application and might forget to remove those prints afterward.
///
/// **Known problems:** Only catches `print!` and `println!` calls.
///
/// **Example:**
/// ```rust
/// println!("Hello world!");
/// ```
pub PRINT_STDOUT,
restriction,
"printing on stdout"
}
declare_clippy_lint! {
/// **What it does:** Checks for use of `Debug` formatting. The purpose of this
/// lint is to catch debugging remnants.
///
/// **Why is this bad?** The purpose of the `Debug` trait is to facilitate
/// debugging Rust code. It should not be used in user-facing output.
///
/// **Example:**
/// ```rust
/// # let foo = "bar";
/// println!("{:?}", foo);
/// ```
pub USE_DEBUG,
restriction,
"use of `Debug`-based formatting"
}
declare_clippy_lint! {
/// **What it does:** This lint warns about the use of literals as `print!`/`println!` args.
///
/// **Why is this bad?** Using literals as `println!` args is inefficient
/// (c.f., https://github.com/matthiaskrgr/rust-str-bench) and unnecessary
/// (i.e., just put the literal in the format string)
///
/// **Known problems:** Will also warn with macro calls as arguments that expand to literals
/// -- e.g., `println!("{}", env!("FOO"))`.
///
/// **Example:**
/// ```rust
/// println!("{}", "foo");
/// ```
/// use the literal without formatting:
/// ```rust
/// println!("foo");
/// ```
pub PRINT_LITERAL,
style,
"printing a literal with a format string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `writeln!(buf, "")` to
/// print a newline.
///
/// **Why is this bad?** You should use `writeln!(buf)`, which is simpler.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
///
/// // Bad
/// writeln!(buf, "");
///
/// // Good
/// writeln!(buf);
/// ```
pub WRITELN_EMPTY_STRING,
style,
"using `writeln!(buf, \"\")` with an empty string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `write!()` with a format
/// string that
/// ends in a newline.
///
/// **Why is this bad?** You should use `writeln!()` instead, which appends the
/// newline.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
/// # let name = "World";
///
/// // Bad
/// write!(buf, "Hello {}!\n", name);
///
/// // Good
/// writeln!(buf, "Hello {}!", name);
/// ```
pub WRITE_WITH_NEWLINE,
style,
"using `write!()` with a format string that ends in a single newline"
}
declare_clippy_lint! {
/// **What it does:** This lint warns about the use of literals as `write!`/`writeln!` args.
///
/// **Why is this bad?** Using literals as `writeln!` args is inefficient
/// (c.f., https://github.com/matthiaskrgr/rust-str-bench) and unnecessary
/// (i.e., just put the literal in the format string)
///
/// **Known problems:** Will also warn with macro calls as arguments that expand to literals
/// -- e.g., `writeln!(buf, "{}", env!("FOO"))`.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
///
/// // Bad
/// writeln!(buf, "{}", "foo");
///
/// // Good
/// writeln!(buf, "foo");
/// ```
pub WRITE_LITERAL,
style,
"writing a literal with a format string"
}
#[derive(Default)]
pub struct Write {
in_debug_impl: bool,
}
impl_lint_pass!(Write => [
PRINT_WITH_NEWLINE,
PRINTLN_EMPTY_STRING,
PRINT_STDOUT,
USE_DEBUG,
PRINT_LITERAL,
WRITE_WITH_NEWLINE,
WRITELN_EMPTY_STRING,
WRITE_LITERAL
]);
impl EarlyLintPass for Write {
fn check_item(&mut self, _: &EarlyContext<'_>, item: &Item) {
if let ItemKind::Impl {
of_trait: Some(trait_ref),
..
} = &item.kind
{
let trait_name = trait_ref
.path
.segments
.iter()
.last()
.expect("path has at least one segment")
.ident
.name;
if trait_name == sym::Debug {
self.in_debug_impl = true;
}
}
}
fn check_item_post(&mut self, _: &EarlyContext<'_>, _: &Item) {
self.in_debug_impl = false;
}
fn check_mac(&mut self, cx: &EarlyContext<'_>, mac: &MacCall) {
fn is_build_script(cx: &EarlyContext<'_>) -> bool {
// Cargo sets the crate name for build scripts to `build_script_build`
cx.sess
.opts
.crate_name
.as_ref()
.map_or(false, |crate_name| crate_name == "build_script_build")
}
if mac.path == sym!(println) {
if !is_build_script(cx) {
span_lint(cx, PRINT_STDOUT, mac.span(), "use of `println!`");
}
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), false) {
if fmt_str.symbol == Symbol::intern("") {
span_lint_and_sugg(
cx,
PRINTLN_EMPTY_STRING,
mac.span(),
"using `println!(\"\")`",
"replace it with",
"println!()".to_string(),
Applicability::MachineApplicable,
);
}
}
} else if mac.path == sym!(print) {
if !is_build_script(cx) {
span_lint(cx, PRINT_STDOUT, mac.span(), "use of `print!`");
}
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), false) {
if check_newlines(&fmt_str) {
span_lint_and_then(
cx,
PRINT_WITH_NEWLINE,
mac.span(),
"using `print!()` with a format string that ends in a single newline",
|err| {
err.multipart_suggestion(
"use `println!` instead",
vec![
(mac.path.span, String::from("println")),
(newline_span(&fmt_str), String::new()),
],
Applicability::MachineApplicable,
);
},
);
}
}
} else if mac.path == sym!(write) {
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), true) {
if check_newlines(&fmt_str) {
span_lint_and_then(
cx,
WRITE_WITH_NEWLINE,
mac.span(),
"using `write!()` with a format string that ends in a single newline",
|err| {
err.multipart_suggestion(
"use `writeln!()` instead",
vec![
(mac.path.span, String::from("writeln")),
(newline_span(&fmt_str), String::new()),
],
Applicability::MachineApplicable,
);
},
)
}
}
} else if mac.path == sym!(writeln) {
if let (Some(fmt_str), expr) = self.check_tts(cx, mac.args.inner_tokens(), true) {
if fmt_str.symbol == Symbol::intern("") {
let mut applicability = Applicability::MachineApplicable;
// FIXME: remove this `#[allow(...)]` once the issue #5822 gets fixed
#[allow(clippy::option_if_let_else)]
let suggestion = if let Some(e) = expr {
snippet_with_applicability(cx, e.span, "v", &mut applicability)
} else {
applicability = Applicability::HasPlaceholders;
Cow::Borrowed("v")
};
span_lint_and_sugg(
cx,
WRITELN_EMPTY_STRING,
mac.span(),
format!("using `writeln!({}, \"\")`", suggestion).as_str(),
"replace it with",
format!("writeln!({})", suggestion),
applicability,
);
}
}
}
}
}
/// Given a format string that ends in a newline and its span, calculates the span of the
/// newline, or the format string itself if the format string consists solely of a newline.
fn newline_span(fmtstr: &StrLit) -> Span {
let sp = fmtstr.span;
let contents = &fmtstr.symbol.as_str();
if *contents == r"\n" {
return sp;
}
let newline_sp_hi = sp.hi()
- match fmtstr.style {
StrStyle::Cooked => BytePos(1),
StrStyle::Raw(hashes) => BytePos((1 + hashes).into()),
};
let newline_sp_len = if contents.ends_with('\n') {
BytePos(1)
} else if contents.ends_with(r"\n") {
BytePos(2)
} else {
panic!("expected format string to contain a newline");
};
sp.with_lo(newline_sp_hi - newline_sp_len).with_hi(newline_sp_hi)
}
impl Write {
/// Checks the arguments of `print[ln]!` and `write[ln]!` calls. It will return a tuple of two
/// `Option`s. The first `Option` of the tuple is the macro's format string. It includes
/// the contents of the string, whether it's a raw string, and the span of the literal in the
/// source. The second `Option` in the tuple is, in the `write[ln]!` case, the expression the
/// `format_str` should be written to.
///
/// Example:
///
/// Calling this function on
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
/// # let something = "something";
/// writeln!(buf, "string to write: {}", something);
/// ```
/// will return
/// ```rust,ignore
/// (Some("string to write: {}"), Some(buf))
/// ```
#[allow(clippy::too_many_lines)]
fn check_tts<'a>(&self, cx: &EarlyContext<'a>, tts: TokenStream, is_write: bool) -> (Option<StrLit>, Option<Expr>) {
use rustc_parse_format::{
AlignUnknown, ArgumentImplicitlyIs, ArgumentIs, ArgumentNamed, CountImplied, FormatSpec, ParseMode, Parser,
Piece,
};
let mut parser = parser::Parser::new(&cx.sess.parse_sess, tts, false, None);
let mut expr: Option<Expr> = None;
if is_write {
expr = match parser.parse_expr().map_err(|mut err| err.cancel()) {
Ok(p) => Some(p.into_inner()),
Err(_) => return (None, None),
};
// might be `writeln!(foo)`
if parser.expect(&token::Comma).map_err(|mut err| err.cancel()).is_err() {
return (None, expr);
}
}
let fmtstr = match parser.parse_str_lit() {
Ok(fmtstr) => fmtstr,
Err(_) => return (None, expr),
};
let tmp = fmtstr.symbol.as_str();
let mut args = vec![];
let mut fmt_parser = Parser::new(&tmp, None, None, false, ParseMode::Format);
while let Some(piece) = fmt_parser.next() {
if !fmt_parser.errors.is_empty() {
return (None, expr);
}
if let Piece::NextArgument(arg) = piece {
if !self.in_debug_impl && arg.format.ty == "?" {
// FIXME: modify rustc's fmt string parser to give us the current span
span_lint(cx, USE_DEBUG, parser.prev_token.span, "use of `Debug`-based formatting");
}
args.push(arg);
}
}
let lint = if is_write { WRITE_LITERAL } else { PRINT_LITERAL };
let mut idx = 0;
loop {
const SIMPLE: FormatSpec<'_> = FormatSpec {
fill: None,
align: AlignUnknown,
flags: 0,
precision: CountImplied,
precision_span: None,
width: CountImplied,
width_span: None,
ty: "",
ty_span: None,
};
if !parser.eat(&token::Comma) {
return (Some(fmtstr), expr);
}
let token_expr = if let Ok(expr) = parser.parse_expr().map_err(|mut err| err.cancel()) {
expr
} else {
return (Some(fmtstr), None);
};
match &token_expr.kind {
ExprKind::Lit(lit)
if match lit.kind {
LitKind::Int(_, _) | LitKind::Float(_, _) => false,
_ => true,
} =>
{
let mut all_simple = true;
let mut seen = false;
for arg in &args {
match arg.position {
ArgumentImplicitlyIs(n) | ArgumentIs(n) => {
if n == idx {
all_simple &= arg.format == SIMPLE;
seen = true;
}
},
ArgumentNamed(_) => {},
}
}
if all_simple && seen {
span_lint(cx, lint, token_expr.span, "literal with an empty format string");
}
idx += 1;
}
ExprKind::Assign(lhs, rhs, _) => {
if_chain! {
if let ExprKind::Lit(ref lit) = rhs.kind;
if match lit.kind {
LitKind::Int(_, _) | LitKind::Float(_, _) => false,
_ => true,
};
if let ExprKind::Path(_, p) = &lhs.kind;
then {
let mut all_simple = true;
let mut seen = false;
for arg in &args {
match arg.position {
ArgumentImplicitlyIs(_) | ArgumentIs(_) => {},
ArgumentNamed(name) => {
if *p == name {
seen = true;
all_simple &= arg.format == SIMPLE;
}
},
}
}
if all_simple && seen {
span_lint(cx, lint, rhs.span, "literal with an empty format string");
}
}
}
},
_ => idx += 1,
}
}
}
}
/// Checks if the format string contains a single newline that terminates it.
///
/// Literal and escaped newlines are both checked (only literal for raw strings).
fn check_newlines(fmtstr: &StrLit) -> bool {
let mut has_internal_newline = false;
let mut last_was_cr = false;
let mut should_lint = false;
let contents = &fmtstr.symbol.as_str();
let mut cb = |r: Range<usize>, c: Result<char, EscapeError>| {
let c = c.unwrap();
if r.end == contents.len() && c == '\n' && !last_was_cr && !has_internal_newline {
should_lint = true;
} else {
last_was_cr = c == '\r';
if c == '\n' {
has_internal_newline = true;
}
}
};
match fmtstr.style {
StrStyle::Cooked => unescape::unescape_literal(contents, unescape::Mode::Str, &mut cb),
StrStyle::Raw(_) => unescape::unescape_literal(contents, unescape::Mode::RawStr, &mut cb),
}
should_lint
}
|
use std::borrow::Cow;
use std::ops::Range;
use crate::utils::{snippet_with_applicability, span_lint, span_lint_and_sugg, span_lint_and_then};
use if_chain::if_chain;
use rustc_ast::ast::{Expr, ExprKind, Item, ItemKind, LitKind, MacCall, StrLit, StrStyle};
use rustc_ast::token;
use rustc_ast::tokenstream::TokenStream;
use rustc_errors::Applicability;
use rustc_lexer::unescape::{self, EscapeError};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_parse::parser;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::{sym, BytePos, Span, Symbol};
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `println!("")` to
/// print a newline.
///
/// **Why is this bad?** You should use `println!()`, which is simpler.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// // Bad
/// println!("");
///
/// // Good
/// println!();
/// ```
pub PRINTLN_EMPTY_STRING,
style,
"using `println!(\"\")` with an empty string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `print!()` with a format
/// string that ends in a newline.
///
/// **Why is this bad?** You should use `println!()` instead, which appends the
/// newline.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # let name = "World";
/// print!("Hello {}!\n", name);
/// ```
/// use println!() instead
/// ```rust
/// # let name = "World";
/// println!("Hello {}!", name);
/// ```
pub PRINT_WITH_NEWLINE,
style,
"using `print!()` with a format string that ends in a single newline"
}
declare_clippy_lint! {
/// **What it does:** Checks for printing on *stdout*. The purpose of this lint
/// is to catch debugging remnants.
///
/// **Why is this bad?** People often print on *stdout* while debugging an
/// application and might forget to remove those prints afterward.
///
/// **Known problems:** Only catches `print!` and `println!` calls.
///
/// **Example:**
/// ```rust
/// println!("Hello world!");
/// ```
pub PRINT_STDOUT,
restriction,
"printing on stdout"
}
declare_clippy_lint! {
/// **What it does:** Checks for use of `Debug` formatting. The purpose of this
/// lint is to catch debugging remnants.
///
/// **Why is this bad?** The purpose of the `Debug` trait is to facilitate
/// debugging Rust code. It should not be used in user-facing output.
///
/// **Example:**
/// ```rust
/// # let foo = "bar";
/// println!("{:?}", foo);
/// ```
pub USE_DEBUG,
restriction,
"use of `Debug`-based formatting"
}
declare_clippy_lint! {
/// **What it does:** This lint warns about the use of literals as `print!`/`println!` args.
///
/// **Why is this bad?** Using literals as `println!` args is inefficient
/// (c.f., https://github.com/matthiaskrgr/rust-str-bench) and unnecessary
/// (i.e., just put the literal in the format string)
///
/// **Known problems:** Will also warn with macro calls as arguments that expand to literals
/// -- e.g., `println!("{}", env!("FOO"))`.
///
/// **Example:**
/// ```rust
/// println!("{}", "foo");
/// ```
/// use the literal without formatting:
/// ```rust
/// println!("foo");
/// ```
pub PRINT_LITERAL,
style,
"printing a literal with a format string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `writeln!(buf, "")` to
/// print a newline.
///
/// **Why is this bad?** You should use `writeln!(buf)`, which is simpler.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
///
/// // Bad
/// writeln!(buf, "");
///
/// // Good
/// writeln!(buf);
/// ```
pub WRITELN_EMPTY_STRING,
style,
"using `writeln!(buf, \"\")` with an empty string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `write!()` with a format
/// string that
/// ends in a newline.
///
/// **Why is this bad?** You should use `writeln!()` instead, which appends the
/// newline.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
/// # let name = "World";
///
/// // Bad
/// write!(buf, "Hello {}!\n", name);
///
/// // Good
/// writeln!(buf, "Hello {}!", name);
/// ```
pub WRITE_WITH_NEWLINE,
style,
"using `write!()` with a format string that ends in a single newline"
}
declare_clippy_lint! {
/// **What it does:** This lint warns about the use of literals as `write!`/`writeln!` args.
///
/// **Why is this bad?** Using literals as `writeln!` args is inefficient
/// (c.f., https://github.com/matthiaskrgr/rust-str-bench) and unnecessary
/// (i.e., just put the literal in the format string)
///
/// **Known problems:** Will also warn with macro calls as arguments that expand to literals
/// -- e.g., `writeln!(buf, "{}", env!("FOO"))`.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
///
/// // Bad
/// writeln!(buf, "{}", "foo");
///
/// // Good
/// writeln!(buf, "foo");
/// ```
pub WRITE_LITERAL,
style,
"writing a literal with a format string"
}
#[derive(Default)]
pub struct Write {
in_debug_impl: bool,
}
impl_lint_pass!(Write => [
PRINT_WITH_NEWLINE,
PRINTLN_EMPTY_STRING,
PRINT_STDOUT,
USE_DEBUG,
PRINT_LITERAL,
WRITE_WITH_NEWLINE,
WRITELN_EMPTY_STRING,
WRITE_LITERAL
]);
impl EarlyLintPass for Write {
fn check_item(&mut self, _: &EarlyContext<'_>, item: &Item) {
if let ItemKind::Impl {
of_trait: Some(trait_ref),
..
} = &item.kind
{
let trait_name = trait_ref
.path
.segments
.iter()
.last()
.expect("path has at least one segment")
.ident
.name;
if trait_name == sym::Debug {
self.in_debug_impl = true;
}
}
}
fn check_item_post(&mut self, _: &EarlyContext<'_>, _: &Item) {
self.in_debug_impl = false;
}
fn check_mac(&mut self, cx: &EarlyContext<'_>, mac: &MacCall) {
fn is_build_script(cx: &EarlyContext<'_>) -> bool {
// Cargo sets the crate name for build scripts to `build_script_build`
cx.sess
.opts
.crate_name
.as_ref()
.map_or(false, |crate_name| crate_name == "build_script_build")
}
if mac.path == sym!(println) {
if !is_build_script(cx) {
span_lint(cx, PRINT_STDOUT, mac.span(), "use of `println!`");
}
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), false) {
if fmt_str.symbol == Symbol::intern("") {
span_lint_and_sugg(
cx,
PRINTLN_EMPTY_STRING,
mac.span(),
"using `println!(\"\")`",
"replace it with",
"println!()".to_string(),
Applicability::MachineApplicable,
);
}
}
} else if mac.path == sym!(print) {
if !is_build_script(cx) {
span_lint(cx, PRINT_STDOUT, mac.span(), "use of `print!`");
}
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), false) {
if check_newlines(&fmt_str) {
span_lint_and_then(
cx,
PRINT_WITH_NEWLINE,
mac.span(),
"using `print!()` with a format string that ends in a single newline",
|err| {
err.multipart_suggestion(
"use `println!` instead",
vec![
(mac.path.span, String::from("println")),
(newline_span(&fmt_str), String::new()),
],
Applicability::MachineApplicable,
);
},
);
}
}
} else if mac.path == sym!(write) {
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), true) {
if check_newlines(&fmt_str) {
span_lint_and_then(
cx,
WRITE_WITH_NEWLINE,
mac.span(),
"using `write!()` with a format string that ends in a single newline",
|err| {
err.multipart_suggestion(
"use `writeln!()` instead",
vec![
(mac.path.span, String::from("writeln")),
(newline_span(&fmt_str), String::new()),
],
Applicability::MachineApplicable,
);
},
)
}
}
} else if mac.path == sym!(writeln) {
if let (Some(fmt_str), expr) = self.check_tts(cx, mac.args.inner_tokens(), true) {
if fmt_str.symbol == Symbol::intern("") {
let mut applicability = Applicability::MachineApplicable;
// FIXME: remove this `#[allow(...)]` once the issue #5822 gets fixed
#[allow(clippy::option_if_let_else)]
let suggestion = if let Some(e) = expr {
snippet_with_applicability(cx, e.span, "v", &mut applicability)
} else {
applicability = Applicability::HasPlaceholders;
Cow::Borrowed("v")
};
span_lint_and_sugg(
cx,
WRITELN_EMPTY_STRING,
mac.span(),
format!("using `writeln!({}, \"\")`", suggestion).as_str(),
"replace it with",
format!("writeln!({})", suggestion),
applicability,
);
}
}
}
}
}
/// Given a format string that ends in a newline and its span, calculates the span of the
/// newline, or the format string itself if the format string consists solely of a newline.
fn newline_span(fmtstr: &StrLit) -> Span {
let sp = fmtstr.span;
let contents = &fmtstr.symbol.as_str();
if *contents == r"\n" {
return sp;
}
let newline_sp_hi = sp.hi()
- match fmtstr.style {
StrStyle::Cooked => BytePos(1),
StrStyle::Raw(hashes) => BytePos((1 + hashes).into()),
};
let newline_sp_len = if contents.ends_with('\n') {
BytePos(1)
} else if contents.ends_with(r"\n") {
BytePos(2)
} else {
panic!("expected format string to contain a newline");
};
sp.with_lo(newline_sp_hi - newline_sp_len).with_hi(newline_sp_hi)
}
impl Write {
/// Checks the arguments of `print[ln]!` and `write[ln]!` calls. It will return a tuple of two
/// `Option`s. The first `Option` of the tuple is the macro's format string. It includes
/// the contents of the string, whether it's a raw string, and the span of the literal in the
/// source. The second `Option` in the tuple is, in the `write[ln]!` case, the expression the
/// `format_str` should be written to.
///
/// Example:
///
/// Calling this function on
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
/// # let something = "something";
/// writeln!(buf, "string to write: {}", something);
/// ```
/// will return
/// ```rust,ignore
/// (Some("string to write: {}"), Some(buf))
/// ```
#[allow(clippy::too_many_lines)]
fn check_tts<'a>(&self, cx: &EarlyContext<'a>, tts: TokenStream, is_write: bool) -> (Option<StrLit>, Option<Expr>) {
use rustc_parse_format::{
AlignUnknown, ArgumentImplicitlyIs, ArgumentIs, ArgumentNamed, CountImplied, FormatSpec, ParseMode, Parser,
Piece,
};
let mut parser = parser::Parser::new(&cx.sess.parse_sess, tts, false, None);
let mut expr: Option<Expr> = None;
if is_write {
expr = match parser.parse_expr().map_err(|mut err| err.cancel()) {
Ok(p) => Some(p.into_inner()),
Err(_) => return (None, None),
};
// might be `writeln!(foo)`
if parser.expect(&token::Comma).map_err(|mut err| err.cancel()).is_err() {
return (None, expr);
}
}
let fmtstr = match parser.parse_str_lit() {
Ok(fmtstr) => fmtstr,
Err(_) => return (None, expr),
};
let tmp = fmtstr.symbol.as_str();
let mut args = vec![];
let mut fmt_parser = Parser::new(&tmp, None, None, false, ParseMode::Format);
while let Some(piece) = fmt_parser.next() {
if !fmt_parser.errors.is_empty() {
return (None, expr);
}
if let Piece::NextArgument(arg) = piece {
if !self.in_debug_impl && arg.format.ty == "?" {
// FIXME: modify rustc's fmt string parser to give us the current span
span_lint(cx, USE_DEBUG, parser.prev_token.span, "use of `Debug`-based formatting");
}
args.push(arg);
}
}
let lint = if is_write { WRITE_LITERAL } else { PRINT_LITERAL };
let mut idx = 0;
loop {
const SIMPLE: FormatSpec<'_> = FormatSpec {
fill: None,
align: AlignUnknown,
flags: 0,
precision: CountImplied,
precision_span: None,
width: CountImplied,
width_span: None,
ty: "",
ty_span: None,
};
if !parser.eat(&token::Comma) {
return (Some(fmtstr), expr);
}
let token_expr = if let Ok(expr) = parser.parse_expr().map_err(|mut err| err.cancel()) {
expr
} else {
return (Some(fmtstr), None);
};
match &token_expr.kind {
ExprKind::Lit(lit) if matches!(lit.kind, LitKind::Int(..) | LitKind::Float(..)) => {
let mut all_simple = true;
let mut seen = false;
for arg in &args {
match arg.position {
ArgumentImplicitlyIs(n) | ArgumentIs(n) => {
if n == idx {
all_simple &= arg.format == SIMPLE;
seen = true;
}
},
ArgumentNamed(_) => {},
}
}
if all_simple && seen {
span_lint(cx, lint, token_expr.span, "literal with an empty format string");
}
idx += 1;
}
ExprKind::Assign(lhs, rhs, _) => {
if_chain! {
if let ExprKind::Lit(ref lit) = rhs.kind;
if matches!(lit.kind, LitKind::Int(..) | LitKind::Float(..));
if let ExprKind::Path(_, p) = &lhs.kind;
then {
let mut all_simple = true;
let mut seen = false;
for arg in &args {
match arg.position {
ArgumentImplicitlyIs(_) | ArgumentIs(_) => {},
ArgumentNamed(name) => {
if *p == name {
seen = true;
all_simple &= arg.format == SIMPLE;
}
},
}
}
if all_simple && seen {
span_lint(cx, lint, rhs.span, "literal with an empty format string");
}
}
}
},
_ => idx += 1,
}
}
}
}
/// Checks if the format string contains a single newline that terminates it.
///
/// Literal and escaped newlines are both checked (only literal for raw strings).
fn check_newlines(fmtstr: &StrLit) -> bool {
let mut has_internal_newline = false;
let mut last_was_cr = false;
let mut should_lint = false;
let contents = &fmtstr.symbol.as_str();
let mut cb = |r: Range<usize>, c: Result<char, EscapeError>| {
let c = c.unwrap();
if r.end == contents.len() && c == '\n' && !last_was_cr && !has_internal_newline {
should_lint = true;
} else {
last_was_cr = c == '\r';
if c == '\n' {
has_internal_newline = true;
}
}
};
match fmtstr.style {
StrStyle::Cooked => unescape::unescape_literal(contents, unescape::Mode::Str, &mut cb),
StrStyle::Raw(_) => unescape::unescape_literal(contents, unescape::Mode::RawStr, &mut cb),
}
should_lint
}
Negate results of matches!
use std::borrow::Cow;
use std::ops::Range;
use crate::utils::{snippet_with_applicability, span_lint, span_lint_and_sugg, span_lint_and_then};
use if_chain::if_chain;
use rustc_ast::ast::{Expr, ExprKind, Item, ItemKind, LitKind, MacCall, StrLit, StrStyle};
use rustc_ast::token;
use rustc_ast::tokenstream::TokenStream;
use rustc_errors::Applicability;
use rustc_lexer::unescape::{self, EscapeError};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_parse::parser;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::{sym, BytePos, Span, Symbol};
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `println!("")` to
/// print a newline.
///
/// **Why is this bad?** You should use `println!()`, which is simpler.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// // Bad
/// println!("");
///
/// // Good
/// println!();
/// ```
pub PRINTLN_EMPTY_STRING,
style,
"using `println!(\"\")` with an empty string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `print!()` with a format
/// string that ends in a newline.
///
/// **Why is this bad?** You should use `println!()` instead, which appends the
/// newline.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # let name = "World";
/// print!("Hello {}!\n", name);
/// ```
/// use println!() instead
/// ```rust
/// # let name = "World";
/// println!("Hello {}!", name);
/// ```
pub PRINT_WITH_NEWLINE,
style,
"using `print!()` with a format string that ends in a single newline"
}
declare_clippy_lint! {
/// **What it does:** Checks for printing on *stdout*. The purpose of this lint
/// is to catch debugging remnants.
///
/// **Why is this bad?** People often print on *stdout* while debugging an
/// application and might forget to remove those prints afterward.
///
/// **Known problems:** Only catches `print!` and `println!` calls.
///
/// **Example:**
/// ```rust
/// println!("Hello world!");
/// ```
pub PRINT_STDOUT,
restriction,
"printing on stdout"
}
declare_clippy_lint! {
/// **What it does:** Checks for use of `Debug` formatting. The purpose of this
/// lint is to catch debugging remnants.
///
/// **Why is this bad?** The purpose of the `Debug` trait is to facilitate
/// debugging Rust code. It should not be used in user-facing output.
///
/// **Example:**
/// ```rust
/// # let foo = "bar";
/// println!("{:?}", foo);
/// ```
pub USE_DEBUG,
restriction,
"use of `Debug`-based formatting"
}
declare_clippy_lint! {
/// **What it does:** This lint warns about the use of literals as `print!`/`println!` args.
///
/// **Why is this bad?** Using literals as `println!` args is inefficient
/// (c.f., https://github.com/matthiaskrgr/rust-str-bench) and unnecessary
/// (i.e., just put the literal in the format string)
///
/// **Known problems:** Will also warn with macro calls as arguments that expand to literals
/// -- e.g., `println!("{}", env!("FOO"))`.
///
/// **Example:**
/// ```rust
/// println!("{}", "foo");
/// ```
/// use the literal without formatting:
/// ```rust
/// println!("foo");
/// ```
pub PRINT_LITERAL,
style,
"printing a literal with a format string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `writeln!(buf, "")` to
/// print a newline.
///
/// **Why is this bad?** You should use `writeln!(buf)`, which is simpler.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
///
/// // Bad
/// writeln!(buf, "");
///
/// // Good
/// writeln!(buf);
/// ```
pub WRITELN_EMPTY_STRING,
style,
"using `writeln!(buf, \"\")` with an empty string"
}
declare_clippy_lint! {
/// **What it does:** This lint warns when you use `write!()` with a format
/// string that
/// ends in a newline.
///
/// **Why is this bad?** You should use `writeln!()` instead, which appends the
/// newline.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
/// # let name = "World";
///
/// // Bad
/// write!(buf, "Hello {}!\n", name);
///
/// // Good
/// writeln!(buf, "Hello {}!", name);
/// ```
pub WRITE_WITH_NEWLINE,
style,
"using `write!()` with a format string that ends in a single newline"
}
declare_clippy_lint! {
/// **What it does:** This lint warns about the use of literals as `write!`/`writeln!` args.
///
/// **Why is this bad?** Using literals as `writeln!` args is inefficient
/// (c.f., https://github.com/matthiaskrgr/rust-str-bench) and unnecessary
/// (i.e., just put the literal in the format string)
///
/// **Known problems:** Will also warn with macro calls as arguments that expand to literals
/// -- e.g., `writeln!(buf, "{}", env!("FOO"))`.
///
/// **Example:**
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
///
/// // Bad
/// writeln!(buf, "{}", "foo");
///
/// // Good
/// writeln!(buf, "foo");
/// ```
pub WRITE_LITERAL,
style,
"writing a literal with a format string"
}
#[derive(Default)]
pub struct Write {
in_debug_impl: bool,
}
impl_lint_pass!(Write => [
PRINT_WITH_NEWLINE,
PRINTLN_EMPTY_STRING,
PRINT_STDOUT,
USE_DEBUG,
PRINT_LITERAL,
WRITE_WITH_NEWLINE,
WRITELN_EMPTY_STRING,
WRITE_LITERAL
]);
impl EarlyLintPass for Write {
fn check_item(&mut self, _: &EarlyContext<'_>, item: &Item) {
if let ItemKind::Impl {
of_trait: Some(trait_ref),
..
} = &item.kind
{
let trait_name = trait_ref
.path
.segments
.iter()
.last()
.expect("path has at least one segment")
.ident
.name;
if trait_name == sym::Debug {
self.in_debug_impl = true;
}
}
}
fn check_item_post(&mut self, _: &EarlyContext<'_>, _: &Item) {
self.in_debug_impl = false;
}
fn check_mac(&mut self, cx: &EarlyContext<'_>, mac: &MacCall) {
fn is_build_script(cx: &EarlyContext<'_>) -> bool {
// Cargo sets the crate name for build scripts to `build_script_build`
cx.sess
.opts
.crate_name
.as_ref()
.map_or(false, |crate_name| crate_name == "build_script_build")
}
if mac.path == sym!(println) {
if !is_build_script(cx) {
span_lint(cx, PRINT_STDOUT, mac.span(), "use of `println!`");
}
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), false) {
if fmt_str.symbol == Symbol::intern("") {
span_lint_and_sugg(
cx,
PRINTLN_EMPTY_STRING,
mac.span(),
"using `println!(\"\")`",
"replace it with",
"println!()".to_string(),
Applicability::MachineApplicable,
);
}
}
} else if mac.path == sym!(print) {
if !is_build_script(cx) {
span_lint(cx, PRINT_STDOUT, mac.span(), "use of `print!`");
}
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), false) {
if check_newlines(&fmt_str) {
span_lint_and_then(
cx,
PRINT_WITH_NEWLINE,
mac.span(),
"using `print!()` with a format string that ends in a single newline",
|err| {
err.multipart_suggestion(
"use `println!` instead",
vec![
(mac.path.span, String::from("println")),
(newline_span(&fmt_str), String::new()),
],
Applicability::MachineApplicable,
);
},
);
}
}
} else if mac.path == sym!(write) {
if let (Some(fmt_str), _) = self.check_tts(cx, mac.args.inner_tokens(), true) {
if check_newlines(&fmt_str) {
span_lint_and_then(
cx,
WRITE_WITH_NEWLINE,
mac.span(),
"using `write!()` with a format string that ends in a single newline",
|err| {
err.multipart_suggestion(
"use `writeln!()` instead",
vec![
(mac.path.span, String::from("writeln")),
(newline_span(&fmt_str), String::new()),
],
Applicability::MachineApplicable,
);
},
)
}
}
} else if mac.path == sym!(writeln) {
if let (Some(fmt_str), expr) = self.check_tts(cx, mac.args.inner_tokens(), true) {
if fmt_str.symbol == Symbol::intern("") {
let mut applicability = Applicability::MachineApplicable;
// FIXME: remove this `#[allow(...)]` once the issue #5822 gets fixed
#[allow(clippy::option_if_let_else)]
let suggestion = if let Some(e) = expr {
snippet_with_applicability(cx, e.span, "v", &mut applicability)
} else {
applicability = Applicability::HasPlaceholders;
Cow::Borrowed("v")
};
span_lint_and_sugg(
cx,
WRITELN_EMPTY_STRING,
mac.span(),
format!("using `writeln!({}, \"\")`", suggestion).as_str(),
"replace it with",
format!("writeln!({})", suggestion),
applicability,
);
}
}
}
}
}
/// Given a format string that ends in a newline and its span, calculates the span of the
/// newline, or the format string itself if the format string consists solely of a newline.
fn newline_span(fmtstr: &StrLit) -> Span {
let sp = fmtstr.span;
let contents = &fmtstr.symbol.as_str();
if *contents == r"\n" {
return sp;
}
let newline_sp_hi = sp.hi()
- match fmtstr.style {
StrStyle::Cooked => BytePos(1),
StrStyle::Raw(hashes) => BytePos((1 + hashes).into()),
};
let newline_sp_len = if contents.ends_with('\n') {
BytePos(1)
} else if contents.ends_with(r"\n") {
BytePos(2)
} else {
panic!("expected format string to contain a newline");
};
sp.with_lo(newline_sp_hi - newline_sp_len).with_hi(newline_sp_hi)
}
impl Write {
/// Checks the arguments of `print[ln]!` and `write[ln]!` calls. It will return a tuple of two
/// `Option`s. The first `Option` of the tuple is the macro's format string. It includes
/// the contents of the string, whether it's a raw string, and the span of the literal in the
/// source. The second `Option` in the tuple is, in the `write[ln]!` case, the expression the
/// `format_str` should be written to.
///
/// Example:
///
/// Calling this function on
/// ```rust
/// # use std::fmt::Write;
/// # let mut buf = String::new();
/// # let something = "something";
/// writeln!(buf, "string to write: {}", something);
/// ```
/// will return
/// ```rust,ignore
/// (Some("string to write: {}"), Some(buf))
/// ```
#[allow(clippy::too_many_lines)]
fn check_tts<'a>(&self, cx: &EarlyContext<'a>, tts: TokenStream, is_write: bool) -> (Option<StrLit>, Option<Expr>) {
use rustc_parse_format::{
AlignUnknown, ArgumentImplicitlyIs, ArgumentIs, ArgumentNamed, CountImplied, FormatSpec, ParseMode, Parser,
Piece,
};
let mut parser = parser::Parser::new(&cx.sess.parse_sess, tts, false, None);
let mut expr: Option<Expr> = None;
if is_write {
expr = match parser.parse_expr().map_err(|mut err| err.cancel()) {
Ok(p) => Some(p.into_inner()),
Err(_) => return (None, None),
};
// might be `writeln!(foo)`
if parser.expect(&token::Comma).map_err(|mut err| err.cancel()).is_err() {
return (None, expr);
}
}
let fmtstr = match parser.parse_str_lit() {
Ok(fmtstr) => fmtstr,
Err(_) => return (None, expr),
};
let tmp = fmtstr.symbol.as_str();
let mut args = vec![];
let mut fmt_parser = Parser::new(&tmp, None, None, false, ParseMode::Format);
while let Some(piece) = fmt_parser.next() {
if !fmt_parser.errors.is_empty() {
return (None, expr);
}
if let Piece::NextArgument(arg) = piece {
if !self.in_debug_impl && arg.format.ty == "?" {
// FIXME: modify rustc's fmt string parser to give us the current span
span_lint(cx, USE_DEBUG, parser.prev_token.span, "use of `Debug`-based formatting");
}
args.push(arg);
}
}
let lint = if is_write { WRITE_LITERAL } else { PRINT_LITERAL };
let mut idx = 0;
loop {
const SIMPLE: FormatSpec<'_> = FormatSpec {
fill: None,
align: AlignUnknown,
flags: 0,
precision: CountImplied,
precision_span: None,
width: CountImplied,
width_span: None,
ty: "",
ty_span: None,
};
if !parser.eat(&token::Comma) {
return (Some(fmtstr), expr);
}
let token_expr = if let Ok(expr) = parser.parse_expr().map_err(|mut err| err.cancel()) {
expr
} else {
return (Some(fmtstr), None);
};
match &token_expr.kind {
ExprKind::Lit(lit) if !matches!(lit.kind, LitKind::Int(..) | LitKind::Float(..)) => {
let mut all_simple = true;
let mut seen = false;
for arg in &args {
match arg.position {
ArgumentImplicitlyIs(n) | ArgumentIs(n) => {
if n == idx {
all_simple &= arg.format == SIMPLE;
seen = true;
}
},
ArgumentNamed(_) => {},
}
}
if all_simple && seen {
span_lint(cx, lint, token_expr.span, "literal with an empty format string");
}
idx += 1;
}
ExprKind::Assign(lhs, rhs, _) => {
if_chain! {
if let ExprKind::Lit(ref lit) = rhs.kind;
if !matches!(lit.kind, LitKind::Int(..) | LitKind::Float(..));
if let ExprKind::Path(_, p) = &lhs.kind;
then {
let mut all_simple = true;
let mut seen = false;
for arg in &args {
match arg.position {
ArgumentImplicitlyIs(_) | ArgumentIs(_) => {},
ArgumentNamed(name) => {
if *p == name {
seen = true;
all_simple &= arg.format == SIMPLE;
}
},
}
}
if all_simple && seen {
span_lint(cx, lint, rhs.span, "literal with an empty format string");
}
}
}
},
_ => idx += 1,
}
}
}
}
/// Checks if the format string contains a single newline that terminates it.
///
/// Literal and escaped newlines are both checked (only literal for raw strings).
fn check_newlines(fmtstr: &StrLit) -> bool {
let mut has_internal_newline = false;
let mut last_was_cr = false;
let mut should_lint = false;
let contents = &fmtstr.symbol.as_str();
let mut cb = |r: Range<usize>, c: Result<char, EscapeError>| {
let c = c.unwrap();
if r.end == contents.len() && c == '\n' && !last_was_cr && !has_internal_newline {
should_lint = true;
} else {
last_was_cr = c == '\r';
if c == '\n' {
has_internal_newline = true;
}
}
};
match fmtstr.style {
StrStyle::Cooked => unescape::unescape_literal(contents, unescape::Mode::Str, &mut cb),
StrStyle::Raw(_) => unescape::unescape_literal(contents, unescape::Mode::RawStr, &mut cb),
}
should_lint
}
|
use neon::{
prelude::*,
types::buffer::{Binary, BorrowError, TypedArray},
};
pub fn return_array_buffer(mut cx: FunctionContext) -> JsResult<JsArrayBuffer> {
let b: Handle<JsArrayBuffer> = cx.array_buffer(16)?;
Ok(b)
}
pub fn return_array_buffer_from_slice(mut cx: FunctionContext) -> JsResult<JsArrayBuffer> {
let len: Handle<JsNumber> = cx.argument(0)?;
let len: f64 = len.value(&mut cx);
let len: u32 = len as u32;
let mut v: Vec<u8> = Vec::new();
for i in 0..len {
v.push(i as u8);
}
let b: Handle<JsArrayBuffer> = JsArrayBuffer::from_slice(&mut cx, &v)?;
Ok(b)
}
pub fn read_array_buffer_with_lock(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsTypedArray<u32>>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let lock = cx.lock();
let n = buf.try_borrow(&lock).map(|buf| buf[i]).or_throw(&mut cx)?;
Ok(cx.number(n))
}
pub fn read_array_buffer_with_borrow(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = buf.as_slice(&cx)[i];
Ok(cx.number(n as f64))
}
pub fn write_array_buffer_with_lock(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let mut b: Handle<JsArrayBuffer> = cx.argument(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as u32 as usize;
let x = cx.argument::<JsNumber>(2)?.value(&mut cx) as u8;
let lock = cx.lock();
b.try_borrow_mut(&lock)
.map(|mut slice| {
slice[i] = x;
})
.or_throw(&mut cx)?;
Ok(cx.undefined())
}
pub fn write_array_buffer_with_borrow_mut(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let mut buf = cx.argument::<JsArrayBuffer>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = cx.argument::<JsNumber>(2)?.value(&mut cx) as u8;
buf.as_mut_slice(&mut cx)[i] = n;
Ok(cx.undefined())
}
pub fn read_typed_array_with_borrow(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsTypedArray<i32>>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = buf.as_slice(&cx)[i];
Ok(cx.number(n as f64))
}
pub fn write_typed_array_with_borrow_mut(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let mut buf = cx.argument::<JsTypedArray<i32>>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = cx.argument::<JsNumber>(2)?.value(&mut cx) as i32;
buf.as_mut_slice(&mut cx)[i] = n;
Ok(cx.undefined())
}
pub fn read_u8_typed_array(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsTypedArray<u8>>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = buf.as_slice(&cx)[i];
Ok(cx.number(n as f64))
}
pub fn copy_typed_array(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let source = cx.argument::<JsTypedArray<u32>>(0)?;
let mut dest = cx.argument::<JsTypedArray<u32>>(1)?;
let mut run = || -> Result<_, BorrowError> {
let lock = cx.lock();
let source = source.try_borrow(&lock)?;
let mut dest = dest.try_borrow_mut(&lock)?;
dest.copy_from_slice(&source);
Ok(())
};
run().or_throw(&mut cx)?;
Ok(cx.undefined())
}
pub fn return_uninitialized_buffer(mut cx: FunctionContext) -> JsResult<JsBuffer> {
let b: Handle<JsBuffer> = unsafe { JsBuffer::uninitialized(&mut cx, 16)? };
Ok(b)
}
pub fn return_buffer(mut cx: FunctionContext) -> JsResult<JsBuffer> {
let b: Handle<JsBuffer> = cx.buffer(16)?;
Ok(b)
}
pub fn return_external_buffer(mut cx: FunctionContext) -> JsResult<JsBuffer> {
let data = cx.argument::<JsString>(0)?.value(&mut cx);
let buf = JsBuffer::external(&mut cx, data.into_bytes());
Ok(buf)
}
pub fn return_external_array_buffer(mut cx: FunctionContext) -> JsResult<JsArrayBuffer> {
let data = cx.argument::<JsString>(0)?.value(&mut cx);
let buf = JsArrayBuffer::external(&mut cx, data.into_bytes());
Ok(buf)
}
pub fn return_int8array_from_arraybuffer(mut cx: FunctionContext) -> JsResult<JsInt8Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
JsInt8Array::from_buffer(&mut cx, buf)
}
pub fn return_int16array_from_arraybuffer(mut cx: FunctionContext) -> JsResult<JsInt16Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
JsInt16Array::from_buffer(&mut cx, buf)
}
pub fn return_uint32array_from_arraybuffer(mut cx: FunctionContext) -> JsResult<JsUint32Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
JsUint32Array::from_buffer(&mut cx, buf)
}
pub fn return_float64array_from_arraybuffer(mut cx: FunctionContext) -> JsResult<JsFloat64Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
JsFloat64Array::from_buffer(&mut cx, buf)
}
pub fn return_biguint64array_from_arraybuffer(
mut cx: FunctionContext,
) -> JsResult<JsBigUint64Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
JsBigUint64Array::from_buffer(&mut cx, buf)
}
pub fn return_new_int32array(mut cx: FunctionContext) -> JsResult<JsInt32Array> {
let len = cx.argument::<JsNumber>(0)?.value(&mut cx) as usize;
JsInt32Array::new(&mut cx, len)
}
pub fn return_int32array_from_slice(mut cx: FunctionContext) -> JsResult<JsInt32Array> {
let len: Handle<JsNumber> = cx.argument(0)?;
let len: f64 = len.value(&mut cx);
let len: u32 = len as u32;
let mut v: Vec<i32> = Vec::new();
for i in 0..len {
v.push(i as i32);
}
let a: Handle<JsInt32Array> = JsInt32Array::from_slice(&mut cx, &v)?;
Ok(a)
}
pub fn return_uint32array_from_arraybuffer_region(
mut cx: FunctionContext,
) -> JsResult<JsUint32Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
let offset = cx.argument::<JsNumber>(1)?;
let offset = offset.value(&mut cx);
let len = cx.argument::<JsNumber>(2)?;
let len = len.value(&mut cx);
JsUint32Array::from_region(&mut cx, &buf.region(offset as usize, len as usize))
}
pub fn get_arraybuffer_byte_length(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
let size = buf.size(&mut cx);
let n = cx.number(size as u32);
Ok(n)
}
fn typed_array_info<'cx, C, T: Binary + Copy>(
cx: &mut C,
a: Handle<'cx, JsTypedArray<T>>,
) -> JsResult<'cx, JsObject>
where
C: Context<'cx>,
JsTypedArray<T>: Value,
{
let offset = a.offset(cx);
let offset = cx.number(offset as u32);
let len = a.len(cx);
let len = cx.number(len as u32);
let size = a.size(cx);
let size = cx.number(size as u32);
let buffer = a.buffer(cx);
let obj = cx.empty_object();
obj.set(cx, "byteOffset", offset)?;
obj.set(cx, "length", len)?;
obj.set(cx, "byteLength", size)?;
obj.set(cx, "buffer", buffer)?;
Ok(obj)
}
fn detach_and_then<'cx, F>(mut cx: FunctionContext<'cx>, f: F) -> JsResult<JsObject>
where
F: FnOnce(
&mut FunctionContext<'cx>,
Handle<'cx, JsUint32Array>,
) -> NeonResult<Option<Handle<'cx, JsUint32Array>>>,
{
let mut a = cx.argument::<JsUint32Array>(0)?;
let detach = cx.argument::<JsFunction>(1)?;
let before = typed_array_info(&mut cx, a)?;
detach.call_with(&cx).arg(a).exec(&mut cx)?;
if let Some(new_array) = f(&mut cx, a)? {
a = new_array;
}
let after = typed_array_info(&mut cx, a)?;
let result = cx.empty_object();
result.set(&mut cx, "before", before)?;
result.set(&mut cx, "after", after)?;
Ok(result)
}
pub fn detach_same_handle(cx: FunctionContext) -> JsResult<JsObject> {
detach_and_then(cx, |_, _| Ok(None))
}
pub fn detach_and_escape(cx: FunctionContext) -> JsResult<JsObject> {
detach_and_then(cx, |cx, a| {
let a = cx.compute_scoped(|_| Ok(a))?;
Ok(Some(a))
})
}
pub fn detach_and_cast(cx: FunctionContext) -> JsResult<JsObject> {
detach_and_then(cx, |cx, a| {
let v = a.upcast::<JsValue>();
let a = v.downcast_or_throw::<JsUint32Array, _>(cx)?;
Ok(Some(a))
})
}
pub fn detach_and_unroot(cx: FunctionContext) -> JsResult<JsObject> {
detach_and_then(cx, |cx, a| {
let root = a.root(cx);
let a = root.into_inner(cx);
Ok(Some(a))
})
}
pub fn get_typed_array_info(mut cx: FunctionContext) -> JsResult<JsObject> {
let x = cx.argument::<JsValue>(0)?;
if let Ok(a) = x.downcast::<JsTypedArray<u8>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<i8>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<u16>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<i16>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<u32>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<i32>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<u64>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<i64>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<f32>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<f64>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else {
cx.throw_type_error("expected a typed array")
}
}
pub fn build_f32_region(mut cx: FunctionContext) -> JsResult<JsValue> {
let buf: Handle<JsArrayBuffer> = cx.argument(0)?;
let offset: Handle<JsNumber> = cx.argument(1)?;
let offset: usize = offset.value(&mut cx) as u32 as usize;
let len: Handle<JsNumber> = cx.argument(2)?;
let len: usize = len.value(&mut cx) as u32 as usize;
let convert: Handle<JsBoolean> = cx.argument(3)?;
let convert: bool = convert.value(&mut cx);
let region = buf.region::<f32>(offset, len);
if convert {
let arr = region.to_typed_array(&mut cx)?;
Ok(arr.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
pub fn build_f64_region(mut cx: FunctionContext) -> JsResult<JsValue> {
let buf: Handle<JsArrayBuffer> = cx.argument(0)?;
let offset: Handle<JsNumber> = cx.argument(1)?;
let offset: usize = offset.value(&mut cx) as u32 as usize;
let len: Handle<JsNumber> = cx.argument(2)?;
let len: usize = len.value(&mut cx) as u32 as usize;
let convert: Handle<JsBoolean> = cx.argument(3)?;
let convert: bool = convert.value(&mut cx);
let region = buf.region::<f64>(offset, len);
if convert {
let arr = region.to_typed_array(&mut cx)?;
Ok(arr.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
pub fn read_buffer_with_lock(mut cx: FunctionContext) -> JsResult<JsNumber> {
let b: Handle<JsBuffer> = cx.argument(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let lock = cx.lock();
let x = b
.try_borrow(&lock)
.map(|slice| slice[i])
.or_throw(&mut cx)?;
Ok(cx.number(x))
}
pub fn read_buffer_with_borrow(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsBuffer>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = buf.as_slice(&cx)[i];
Ok(cx.number(n as f64))
}
pub fn write_buffer_with_lock(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let mut b: Handle<JsBuffer> = cx.argument(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let x = cx.argument::<JsNumber>(2)?.value(&mut cx) as u8;
let lock = cx.lock();
b.try_borrow_mut(&lock)
.map(|mut slice| slice[i] = x)
.or_throw(&mut cx)?;
Ok(cx.undefined())
}
pub fn write_buffer_with_borrow_mut(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let mut buf = cx.argument::<JsBuffer>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = cx.argument::<JsNumber>(2)?.value(&mut cx) as u8;
buf.as_mut_slice(&mut cx)[i] = n;
Ok(cx.undefined())
}
Simplify test logic with an iterator
Co-authored-by: K.J. Valencik <d1a42f97b9c113f196655acea89996f8f4d9161d@gmail.com>
use neon::{
prelude::*,
types::buffer::{Binary, BorrowError, TypedArray},
};
pub fn return_array_buffer(mut cx: FunctionContext) -> JsResult<JsArrayBuffer> {
let b: Handle<JsArrayBuffer> = cx.array_buffer(16)?;
Ok(b)
}
pub fn return_array_buffer_from_slice(mut cx: FunctionContext) -> JsResult<JsArrayBuffer> {
let len = cx.argument::<JsNumber>(0)?.value(&mut cx) as usize;
let v = (0..len).map(|i| i as u8).collect::<Vec<_>>();
JsArrayBuffer::from_slice(&mut cx, &v)
}
pub fn read_array_buffer_with_lock(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsTypedArray<u32>>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let lock = cx.lock();
let n = buf.try_borrow(&lock).map(|buf| buf[i]).or_throw(&mut cx)?;
Ok(cx.number(n))
}
pub fn read_array_buffer_with_borrow(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = buf.as_slice(&cx)[i];
Ok(cx.number(n as f64))
}
pub fn write_array_buffer_with_lock(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let mut b: Handle<JsArrayBuffer> = cx.argument(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as u32 as usize;
let x = cx.argument::<JsNumber>(2)?.value(&mut cx) as u8;
let lock = cx.lock();
b.try_borrow_mut(&lock)
.map(|mut slice| {
slice[i] = x;
})
.or_throw(&mut cx)?;
Ok(cx.undefined())
}
pub fn write_array_buffer_with_borrow_mut(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let mut buf = cx.argument::<JsArrayBuffer>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = cx.argument::<JsNumber>(2)?.value(&mut cx) as u8;
buf.as_mut_slice(&mut cx)[i] = n;
Ok(cx.undefined())
}
pub fn read_typed_array_with_borrow(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsTypedArray<i32>>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = buf.as_slice(&cx)[i];
Ok(cx.number(n as f64))
}
pub fn write_typed_array_with_borrow_mut(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let mut buf = cx.argument::<JsTypedArray<i32>>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = cx.argument::<JsNumber>(2)?.value(&mut cx) as i32;
buf.as_mut_slice(&mut cx)[i] = n;
Ok(cx.undefined())
}
pub fn read_u8_typed_array(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsTypedArray<u8>>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = buf.as_slice(&cx)[i];
Ok(cx.number(n as f64))
}
pub fn copy_typed_array(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let source = cx.argument::<JsTypedArray<u32>>(0)?;
let mut dest = cx.argument::<JsTypedArray<u32>>(1)?;
let mut run = || -> Result<_, BorrowError> {
let lock = cx.lock();
let source = source.try_borrow(&lock)?;
let mut dest = dest.try_borrow_mut(&lock)?;
dest.copy_from_slice(&source);
Ok(())
};
run().or_throw(&mut cx)?;
Ok(cx.undefined())
}
pub fn return_uninitialized_buffer(mut cx: FunctionContext) -> JsResult<JsBuffer> {
let b: Handle<JsBuffer> = unsafe { JsBuffer::uninitialized(&mut cx, 16)? };
Ok(b)
}
pub fn return_buffer(mut cx: FunctionContext) -> JsResult<JsBuffer> {
let b: Handle<JsBuffer> = cx.buffer(16)?;
Ok(b)
}
pub fn return_external_buffer(mut cx: FunctionContext) -> JsResult<JsBuffer> {
let data = cx.argument::<JsString>(0)?.value(&mut cx);
let buf = JsBuffer::external(&mut cx, data.into_bytes());
Ok(buf)
}
pub fn return_external_array_buffer(mut cx: FunctionContext) -> JsResult<JsArrayBuffer> {
let data = cx.argument::<JsString>(0)?.value(&mut cx);
let buf = JsArrayBuffer::external(&mut cx, data.into_bytes());
Ok(buf)
}
pub fn return_int8array_from_arraybuffer(mut cx: FunctionContext) -> JsResult<JsInt8Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
JsInt8Array::from_buffer(&mut cx, buf)
}
pub fn return_int16array_from_arraybuffer(mut cx: FunctionContext) -> JsResult<JsInt16Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
JsInt16Array::from_buffer(&mut cx, buf)
}
pub fn return_uint32array_from_arraybuffer(mut cx: FunctionContext) -> JsResult<JsUint32Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
JsUint32Array::from_buffer(&mut cx, buf)
}
pub fn return_float64array_from_arraybuffer(mut cx: FunctionContext) -> JsResult<JsFloat64Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
JsFloat64Array::from_buffer(&mut cx, buf)
}
pub fn return_biguint64array_from_arraybuffer(
mut cx: FunctionContext,
) -> JsResult<JsBigUint64Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
JsBigUint64Array::from_buffer(&mut cx, buf)
}
pub fn return_new_int32array(mut cx: FunctionContext) -> JsResult<JsInt32Array> {
let len = cx.argument::<JsNumber>(0)?.value(&mut cx) as usize;
JsInt32Array::new(&mut cx, len)
}
pub fn return_int32array_from_slice(mut cx: FunctionContext) -> JsResult<JsInt32Array> {
let len: Handle<JsNumber> = cx.argument(0)?;
let len: f64 = len.value(&mut cx);
let len: u32 = len as u32;
let mut v: Vec<i32> = Vec::new();
for i in 0..len {
v.push(i as i32);
}
let a: Handle<JsInt32Array> = JsInt32Array::from_slice(&mut cx, &v)?;
Ok(a)
}
pub fn return_uint32array_from_arraybuffer_region(
mut cx: FunctionContext,
) -> JsResult<JsUint32Array> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
let offset = cx.argument::<JsNumber>(1)?;
let offset = offset.value(&mut cx);
let len = cx.argument::<JsNumber>(2)?;
let len = len.value(&mut cx);
JsUint32Array::from_region(&mut cx, &buf.region(offset as usize, len as usize))
}
pub fn get_arraybuffer_byte_length(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsArrayBuffer>(0)?;
let size = buf.size(&mut cx);
let n = cx.number(size as u32);
Ok(n)
}
fn typed_array_info<'cx, C, T: Binary + Copy>(
cx: &mut C,
a: Handle<'cx, JsTypedArray<T>>,
) -> JsResult<'cx, JsObject>
where
C: Context<'cx>,
JsTypedArray<T>: Value,
{
let offset = a.offset(cx);
let offset = cx.number(offset as u32);
let len = a.len(cx);
let len = cx.number(len as u32);
let size = a.size(cx);
let size = cx.number(size as u32);
let buffer = a.buffer(cx);
let obj = cx.empty_object();
obj.set(cx, "byteOffset", offset)?;
obj.set(cx, "length", len)?;
obj.set(cx, "byteLength", size)?;
obj.set(cx, "buffer", buffer)?;
Ok(obj)
}
fn detach_and_then<'cx, F>(mut cx: FunctionContext<'cx>, f: F) -> JsResult<JsObject>
where
F: FnOnce(
&mut FunctionContext<'cx>,
Handle<'cx, JsUint32Array>,
) -> NeonResult<Option<Handle<'cx, JsUint32Array>>>,
{
let mut a = cx.argument::<JsUint32Array>(0)?;
let detach = cx.argument::<JsFunction>(1)?;
let before = typed_array_info(&mut cx, a)?;
detach.call_with(&cx).arg(a).exec(&mut cx)?;
if let Some(new_array) = f(&mut cx, a)? {
a = new_array;
}
let after = typed_array_info(&mut cx, a)?;
let result = cx.empty_object();
result.set(&mut cx, "before", before)?;
result.set(&mut cx, "after", after)?;
Ok(result)
}
pub fn detach_same_handle(cx: FunctionContext) -> JsResult<JsObject> {
detach_and_then(cx, |_, _| Ok(None))
}
pub fn detach_and_escape(cx: FunctionContext) -> JsResult<JsObject> {
detach_and_then(cx, |cx, a| {
let a = cx.compute_scoped(|_| Ok(a))?;
Ok(Some(a))
})
}
pub fn detach_and_cast(cx: FunctionContext) -> JsResult<JsObject> {
detach_and_then(cx, |cx, a| {
let v = a.upcast::<JsValue>();
let a = v.downcast_or_throw::<JsUint32Array, _>(cx)?;
Ok(Some(a))
})
}
pub fn detach_and_unroot(cx: FunctionContext) -> JsResult<JsObject> {
detach_and_then(cx, |cx, a| {
let root = a.root(cx);
let a = root.into_inner(cx);
Ok(Some(a))
})
}
pub fn get_typed_array_info(mut cx: FunctionContext) -> JsResult<JsObject> {
let x = cx.argument::<JsValue>(0)?;
if let Ok(a) = x.downcast::<JsTypedArray<u8>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<i8>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<u16>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<i16>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<u32>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<i32>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<u64>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<i64>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<f32>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else if let Ok(a) = x.downcast::<JsTypedArray<f64>, _>(&mut cx) {
typed_array_info(&mut cx, a)
} else {
cx.throw_type_error("expected a typed array")
}
}
pub fn build_f32_region(mut cx: FunctionContext) -> JsResult<JsValue> {
let buf: Handle<JsArrayBuffer> = cx.argument(0)?;
let offset: Handle<JsNumber> = cx.argument(1)?;
let offset: usize = offset.value(&mut cx) as u32 as usize;
let len: Handle<JsNumber> = cx.argument(2)?;
let len: usize = len.value(&mut cx) as u32 as usize;
let convert: Handle<JsBoolean> = cx.argument(3)?;
let convert: bool = convert.value(&mut cx);
let region = buf.region::<f32>(offset, len);
if convert {
let arr = region.to_typed_array(&mut cx)?;
Ok(arr.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
pub fn build_f64_region(mut cx: FunctionContext) -> JsResult<JsValue> {
let buf: Handle<JsArrayBuffer> = cx.argument(0)?;
let offset: Handle<JsNumber> = cx.argument(1)?;
let offset: usize = offset.value(&mut cx) as u32 as usize;
let len: Handle<JsNumber> = cx.argument(2)?;
let len: usize = len.value(&mut cx) as u32 as usize;
let convert: Handle<JsBoolean> = cx.argument(3)?;
let convert: bool = convert.value(&mut cx);
let region = buf.region::<f64>(offset, len);
if convert {
let arr = region.to_typed_array(&mut cx)?;
Ok(arr.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
pub fn read_buffer_with_lock(mut cx: FunctionContext) -> JsResult<JsNumber> {
let b: Handle<JsBuffer> = cx.argument(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let lock = cx.lock();
let x = b
.try_borrow(&lock)
.map(|slice| slice[i])
.or_throw(&mut cx)?;
Ok(cx.number(x))
}
pub fn read_buffer_with_borrow(mut cx: FunctionContext) -> JsResult<JsNumber> {
let buf = cx.argument::<JsBuffer>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = buf.as_slice(&cx)[i];
Ok(cx.number(n as f64))
}
pub fn write_buffer_with_lock(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let mut b: Handle<JsBuffer> = cx.argument(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let x = cx.argument::<JsNumber>(2)?.value(&mut cx) as u8;
let lock = cx.lock();
b.try_borrow_mut(&lock)
.map(|mut slice| slice[i] = x)
.or_throw(&mut cx)?;
Ok(cx.undefined())
}
pub fn write_buffer_with_borrow_mut(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let mut buf = cx.argument::<JsBuffer>(0)?;
let i = cx.argument::<JsNumber>(1)?.value(&mut cx) as usize;
let n = cx.argument::<JsNumber>(2)?.value(&mut cx) as u8;
buf.as_mut_slice(&mut cx)[i] = n;
Ok(cx.undefined())
}
|
fn main() {
let poopoo = 12;
let mut paapaa = 11;
println!("so the value of this integer I call poopoo is: {} and paapaa is: {}", poopoo, paapaa);
paapaa++
println!("now the values are: {} and {}", poopoo, paapaa);
}
main.rs modified. now it compiles. now I should move to implementing my feature goal
fn main() {
let ayy = 12;
let mut lmao = 11;
println!("so the value of this integer I call ayy is: {} and lmao is: {}", ayy, lmao);
lmao += 1;
println!("now the values are: {} and {}", ayy, lmao);
}
|
use std::collections::HashMap;
use std::fmt;
use descriptor::EnumDescriptorProto;
use descriptor::EnumValueDescriptorProto;
use descriptor::FileDescriptorProto;
use descriptorx::WithScope;
use descriptorx::FileScope;
use reflect::ProtobufValue;
use std::any::TypeId;
use std::hash::Hash;
use std::hash::Hasher;
use std::marker;
use std::mem;
use ProtobufEnum;
#[derive(Clone)]
pub struct EnumValueDescriptor {
proto: &'static EnumValueDescriptorProto,
protobuf_value: &'static ProtobufValue,
get_descriptor: &'static GetEnumDescriptor,
}
impl PartialEq for EnumValueDescriptor {
fn eq(&self, other: &EnumValueDescriptor) -> bool {
self.enum_descriptor() == other.enum_descriptor() && self.value() == other.value()
}
}
impl Hash for EnumValueDescriptor {
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.value(), state)
}
}
fn _assert_send_sync() {
fn _assert_send_sync<T: Send + Sync>() {}
_assert_send_sync::<EnumValueDescriptor>();
}
impl fmt::Debug for EnumValueDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EnumValueDescriptor")
.field("proto", self.proto)
.field("value", &"...")
.finish()
}
}
impl Copy for EnumValueDescriptor {}
impl EnumValueDescriptor {
/// Name of enum variant as specified in proto file
pub fn name(&self) -> &'static str {
self.proto.get_name()
}
/// `i32` value of the enum variant
pub fn value(&self) -> i32 {
self.proto.get_number()
}
/// Convert to generic `ProtobufValue`
pub fn protobuf_value(&self) -> &'static ProtobufValue {
self.protobuf_value
}
pub fn enum_descriptor(&self) -> &EnumDescriptor {
self.get_descriptor.descriptor()
}
pub fn cast<E: 'static>(&self) -> Option<E> {
self.enum_descriptor().cast(self.value())
}
}
trait GetEnumDescriptor: Send + Sync + 'static {
fn descriptor(&self) -> &EnumDescriptor;
unsafe fn copy_to(&self, value: i32, dest: *mut ());
}
struct GetDescriptorImpl<E: ProtobufEnum>(marker::PhantomData<E>);
impl<E: ProtobufEnum> GetEnumDescriptor for GetDescriptorImpl<E> {
fn descriptor(&self) -> &EnumDescriptor {
E::enum_descriptor_static()
}
unsafe fn copy_to(&self, value: i32, dest: *mut ()) {
let e = E::from_i32(value).expect("unknown value");
(&e as *const E).copy_to(dest as *mut E, 1);
}
}
pub struct EnumDescriptor {
proto: &'static EnumDescriptorProto,
values: Vec<EnumValueDescriptor>,
type_id: TypeId,
get_descriptor: &'static GetEnumDescriptor,
index_by_name: HashMap<String, usize>,
index_by_number: HashMap<i32, usize>,
}
/// Identity comparison: message descriptor are equal if their addresses are equal
impl PartialEq for EnumDescriptor {
fn eq(&self, other: &EnumDescriptor) -> bool {
self as *const EnumDescriptor == other as *const EnumDescriptor
}
}
// find enum by rust type name
fn find_enum_by_rust_name<'a>(
fd: &'a FileDescriptorProto,
rust_name: &str,
) -> &'a EnumDescriptorProto {
FileScope {
file_descriptor: fd,
}.find_enums()
.into_iter()
.find(|e| e.rust_name() == rust_name)
.unwrap()
.en
}
impl EnumDescriptor {
pub fn name(&self) -> &'static str {
self.proto.get_name()
}
pub fn for_type<E: ProtobufEnum>() -> &'static EnumDescriptor {
E::enum_descriptor_static()
}
pub fn new<E>(rust_name: &'static str, file: &'static FileDescriptorProto) -> EnumDescriptor
where
E: ProtobufEnum,
{
let proto = find_enum_by_rust_name(file, rust_name);
let mut index_by_name = HashMap::new();
let mut index_by_number = HashMap::new();
for (i, v) in proto.value.iter().enumerate() {
index_by_number.insert(v.get_number(), i);
index_by_name.insert(v.get_name().to_string(), i);
}
let proto_values = &proto.value;
let code_values = E::values();
assert_eq!(proto_values.len(), code_values.len());
let get_descriptor = &GetDescriptorImpl(marker::PhantomData::<E>);
let values = proto_values
.iter()
.zip(code_values)
.map(|(p, c)| EnumValueDescriptor {
proto: p,
protobuf_value: c,
get_descriptor,
}).collect();
EnumDescriptor {
proto,
values,
type_id: TypeId::of::<E>(),
get_descriptor,
index_by_name,
index_by_number,
}
}
pub fn values(&self) -> &[EnumValueDescriptor] {
&self.values
}
pub fn value_by_name<'a>(&'a self, name: &str) -> Option<&'a EnumValueDescriptor> {
let &index = self.index_by_name.get(name)?;
Some(&self.values[index])
}
pub fn value_by_number<'a>(&'a self, number: i32) -> Option<&'a EnumValueDescriptor> {
let &index = self.index_by_number.get(&number)?;
Some(&self.values[index])
}
pub fn is<E: 'static>(&self) -> bool {
TypeId::of::<E>() == self.type_id
}
pub fn cast<E: 'static>(&self, value: i32) -> Option<E> {
if self.is::<E>() {
unsafe {
let mut r = mem::uninitialized();
self.get_descriptor
.copy_to(value, &mut r as *mut E as *mut ());
Some(r)
}
} else {
None
}
}
}
Elide lifetimes
use std::collections::HashMap;
use std::fmt;
use descriptor::EnumDescriptorProto;
use descriptor::EnumValueDescriptorProto;
use descriptor::FileDescriptorProto;
use descriptorx::WithScope;
use descriptorx::FileScope;
use reflect::ProtobufValue;
use std::any::TypeId;
use std::hash::Hash;
use std::hash::Hasher;
use std::marker;
use std::mem;
use ProtobufEnum;
#[derive(Clone)]
pub struct EnumValueDescriptor {
proto: &'static EnumValueDescriptorProto,
protobuf_value: &'static ProtobufValue,
get_descriptor: &'static GetEnumDescriptor,
}
impl PartialEq for EnumValueDescriptor {
fn eq(&self, other: &EnumValueDescriptor) -> bool {
self.enum_descriptor() == other.enum_descriptor() && self.value() == other.value()
}
}
impl Hash for EnumValueDescriptor {
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.value(), state)
}
}
fn _assert_send_sync() {
fn _assert_send_sync<T: Send + Sync>() {}
_assert_send_sync::<EnumValueDescriptor>();
}
impl fmt::Debug for EnumValueDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EnumValueDescriptor")
.field("proto", self.proto)
.field("value", &"...")
.finish()
}
}
impl Copy for EnumValueDescriptor {}
impl EnumValueDescriptor {
/// Name of enum variant as specified in proto file
pub fn name(&self) -> &'static str {
self.proto.get_name()
}
/// `i32` value of the enum variant
pub fn value(&self) -> i32 {
self.proto.get_number()
}
/// Convert to generic `ProtobufValue`
pub fn protobuf_value(&self) -> &'static ProtobufValue {
self.protobuf_value
}
pub fn enum_descriptor(&self) -> &EnumDescriptor {
self.get_descriptor.descriptor()
}
pub fn cast<E: 'static>(&self) -> Option<E> {
self.enum_descriptor().cast(self.value())
}
}
trait GetEnumDescriptor: Send + Sync + 'static {
fn descriptor(&self) -> &EnumDescriptor;
unsafe fn copy_to(&self, value: i32, dest: *mut ());
}
struct GetDescriptorImpl<E: ProtobufEnum>(marker::PhantomData<E>);
impl<E: ProtobufEnum> GetEnumDescriptor for GetDescriptorImpl<E> {
fn descriptor(&self) -> &EnumDescriptor {
E::enum_descriptor_static()
}
unsafe fn copy_to(&self, value: i32, dest: *mut ()) {
let e = E::from_i32(value).expect("unknown value");
(&e as *const E).copy_to(dest as *mut E, 1);
}
}
pub struct EnumDescriptor {
proto: &'static EnumDescriptorProto,
values: Vec<EnumValueDescriptor>,
type_id: TypeId,
get_descriptor: &'static GetEnumDescriptor,
index_by_name: HashMap<String, usize>,
index_by_number: HashMap<i32, usize>,
}
/// Identity comparison: message descriptor are equal if their addresses are equal
impl PartialEq for EnumDescriptor {
fn eq(&self, other: &EnumDescriptor) -> bool {
self as *const EnumDescriptor == other as *const EnumDescriptor
}
}
// find enum by rust type name
fn find_enum_by_rust_name<'a>(
fd: &'a FileDescriptorProto,
rust_name: &str,
) -> &'a EnumDescriptorProto {
FileScope {
file_descriptor: fd,
}.find_enums()
.into_iter()
.find(|e| e.rust_name() == rust_name)
.unwrap()
.en
}
impl EnumDescriptor {
pub fn name(&self) -> &'static str {
self.proto.get_name()
}
pub fn for_type<E: ProtobufEnum>() -> &'static EnumDescriptor {
E::enum_descriptor_static()
}
pub fn new<E>(rust_name: &'static str, file: &'static FileDescriptorProto) -> EnumDescriptor
where
E: ProtobufEnum,
{
let proto = find_enum_by_rust_name(file, rust_name);
let mut index_by_name = HashMap::new();
let mut index_by_number = HashMap::new();
for (i, v) in proto.value.iter().enumerate() {
index_by_number.insert(v.get_number(), i);
index_by_name.insert(v.get_name().to_string(), i);
}
let proto_values = &proto.value;
let code_values = E::values();
assert_eq!(proto_values.len(), code_values.len());
let get_descriptor = &GetDescriptorImpl(marker::PhantomData::<E>);
let values = proto_values
.iter()
.zip(code_values)
.map(|(p, c)| EnumValueDescriptor {
proto: p,
protobuf_value: c,
get_descriptor,
}).collect();
EnumDescriptor {
proto,
values,
type_id: TypeId::of::<E>(),
get_descriptor,
index_by_name,
index_by_number,
}
}
pub fn values(&self) -> &[EnumValueDescriptor] {
&self.values
}
pub fn value_by_name(&self, name: &str) -> Option<&EnumValueDescriptor> {
let &index = self.index_by_name.get(name)?;
Some(&self.values[index])
}
pub fn value_by_number(&self, number: i32) -> Option<&EnumValueDescriptor> {
let &index = self.index_by_number.get(&number)?;
Some(&self.values[index])
}
pub fn is<E: 'static>(&self) -> bool {
TypeId::of::<E>() == self.type_id
}
pub fn cast<E: 'static>(&self, value: i32) -> Option<E> {
if self.is::<E>() {
unsafe {
let mut r = mem::uninitialized();
self.get_descriptor
.copy_to(value, &mut r as *mut E as *mut ());
Some(r)
}
} else {
None
}
}
}
|
use std::any::TypeId;
use std::collections::HashMap;
use std::fmt;
use std::hash::Hash;
use std::hash::Hasher;
use std::marker;
use crate::descriptor::EnumDescriptorProto;
use crate::descriptor::EnumValueDescriptorProto;
use crate::descriptor::FileDescriptorProto;
use crate::enums::ProtobufEnum;
use crate::enums::ProtobufEnumOrUnknown;
use crate::reflect::find_message_or_enum::find_message_or_enum;
use crate::reflect::find_message_or_enum::MessageOrEnum;
use crate::reflect::ProtobufValue;
/// Description for enum variant.
///
/// Used in reflection.
#[derive(Clone)]
pub struct EnumValueDescriptor {
proto: &'static EnumValueDescriptorProto,
protobuf_value: &'static dyn ProtobufValue,
get_descriptor: &'static dyn GetEnumDescriptor,
}
impl PartialEq for EnumValueDescriptor {
fn eq(&self, other: &EnumValueDescriptor) -> bool {
self.enum_descriptor() == other.enum_descriptor() && self.value() == other.value()
}
}
impl Hash for EnumValueDescriptor {
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.value(), state)
}
}
fn _assert_send_sync() {
fn _assert_send_sync<T: Send + Sync>() {}
_assert_send_sync::<EnumValueDescriptor>();
}
impl fmt::Debug for EnumValueDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EnumValueDescriptor")
.field("proto", self.proto)
.field("value", &"...")
.finish()
}
}
impl Copy for EnumValueDescriptor {}
impl EnumValueDescriptor {
/// Name of enum variant as specified in proto file
pub fn name(&self) -> &'static str {
self.proto.get_name()
}
/// `i32` value of the enum variant
pub fn value(&self) -> i32 {
self.proto.get_number()
}
/// Convert to generic `ProtobufValue`
pub fn protobuf_value(&self) -> &'static dyn ProtobufValue {
self.protobuf_value
}
/// Get descriptor of enum holding this value.
pub fn enum_descriptor(&self) -> &EnumDescriptor {
self.get_descriptor.descriptor()
}
/// Convert this value descriptor into proper enum object.
///
/// ```
/// # use protobuf::well_known_types::NullValue;
/// # use protobuf::ProtobufEnum;
/// # use protobuf::reflect::EnumValueDescriptor;
///
/// let value: &EnumValueDescriptor = NullValue::NULL_VALUE.descriptor();
/// let null: Option<NullValue> = value.cast();
/// assert_eq!(Some(NullValue::NULL_VALUE), null);
/// ```
pub fn cast<E: ProtobufEnum>(&self) -> Option<E> {
self.enum_descriptor().cast_to_protobuf_enum::<E>(self.value())
}
}
trait GetEnumDescriptor: Send + Sync + 'static {
fn descriptor(&self) -> &EnumDescriptor;
unsafe fn copy_to(&self, value: i32, dest: *mut ());
}
struct GetDescriptorImpl<E: ProtobufEnum>(marker::PhantomData<E>);
impl<E: ProtobufEnum> GetEnumDescriptor for GetDescriptorImpl<E> {
fn descriptor(&self) -> &EnumDescriptor {
E::enum_descriptor_static()
}
unsafe fn copy_to(&self, value: i32, dest: *mut ()) {
let e = E::from_i32(value).expect("unknown value");
(&e as *const E).copy_to(dest as *mut E, 1);
}
}
/// Dynamic representation of enum type.
///
/// Can be used in reflective operations.
pub struct EnumDescriptor {
full_name: String,
proto: &'static EnumDescriptorProto,
values: Vec<EnumValueDescriptor>,
/// Type id of `<E>`
type_id: TypeId,
/// Type id of `<ProtobufEnumOrUnknown<E>>`
enum_or_unknown_type_id: TypeId,
#[cfg(not(rustc_nightly))]
get_descriptor: &'static dyn GetEnumDescriptor,
index_by_name: HashMap<String, usize>,
index_by_number: HashMap<i32, usize>,
}
impl fmt::Debug for EnumDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EnumDescriptor")
.field("full_name", &self.full_name)
.field("..", &"..")
.finish()
}
}
/// Identity comparison: message descriptor are equal if their addresses are equal
impl PartialEq for EnumDescriptor {
fn eq(&self, other: &EnumDescriptor) -> bool {
self as *const EnumDescriptor == other as *const EnumDescriptor
}
}
impl EnumDescriptor {
/// Enum name as given in `.proto` file
pub fn name(&self) -> &'static str {
self.proto.get_name()
}
/// Fully qualified protobuf name of enum
pub fn full_name(&self) -> &str {
&self.full_name[..]
}
/// Get `EnumDescriptor` object for given enum type
pub fn for_type<E: ProtobufEnum>() -> &'static EnumDescriptor {
E::enum_descriptor_static()
}
fn compute_full_name(
package: &str,
path_to_package: &str,
proto: &EnumDescriptorProto,
) -> String {
let mut full_name = package.to_owned();
if path_to_package.len() != 0 {
if full_name.len() != 0 {
full_name.push('.');
}
full_name.push_str(path_to_package);
}
if full_name.len() != 0 {
full_name.push('.');
}
full_name.push_str(proto.get_name());
full_name
}
/// Separate function to reduce generated code size bloat.
fn make_indices(proto: &EnumDescriptorProto) -> (HashMap<String, usize>, HashMap<i32, usize>) {
let mut index_by_name = HashMap::new();
let mut index_by_number = HashMap::new();
for (i, v) in proto.value.iter().enumerate() {
index_by_number.insert(v.get_number(), i);
index_by_name.insert(v.get_name().to_string(), i);
}
(index_by_name, index_by_number)
}
/// Construct `EnumDescriptor` given enum name and `FileDescriptorProto`.
///
/// This function is called from generated code, and should rarely be called directly.
///
/// This function is not a part of public API.
#[doc(hidden)]
pub fn new<E>(name_in_file: &'static str, file: &'static FileDescriptorProto) -> EnumDescriptor
where
E: ProtobufEnum,
{
let (path_to_package, proto) = match find_message_or_enum(file, name_in_file) {
(path_to_package, MessageOrEnum::Enum(e)) => (path_to_package, e),
(_, MessageOrEnum::Message(_)) => panic!("not an enum"),
};
let (index_by_name, index_by_number) = EnumDescriptor::make_indices(proto);
let proto_values = &proto.value;
let code_values = E::values();
assert_eq!(proto_values.len(), code_values.len());
let get_descriptor = &GetDescriptorImpl(marker::PhantomData::<E>);
let values = proto_values
.iter()
.zip(code_values)
.map(|(p, c)| EnumValueDescriptor {
proto: p,
protobuf_value: c,
get_descriptor,
})
.collect();
EnumDescriptor {
full_name: EnumDescriptor::compute_full_name(
file.get_package(),
&path_to_package,
&proto,
),
proto,
values,
type_id: TypeId::of::<E>(),
enum_or_unknown_type_id: TypeId::of::<ProtobufEnumOrUnknown<E>>(),
#[cfg(not(rustc_nightly))]
get_descriptor,
index_by_name,
index_by_number,
}
}
/// This enum values
pub fn values(&self) -> &[EnumValueDescriptor] {
&self.values
}
/// Find enum variant by name
pub fn value_by_name<'a>(&'a self, name: &str) -> Option<&'a EnumValueDescriptor> {
let &index = self.index_by_name.get(name)?;
Some(&self.values[index])
}
/// Find enum variant by number
pub fn value_by_number(&self, number: i32) -> Option<&EnumValueDescriptor> {
let &index = self.index_by_number.get(&number)?;
Some(&self.values[index])
}
pub(crate) fn value_by_number_or_default(&self, number: i32) -> &EnumValueDescriptor {
match self.value_by_number(number) {
Some(v) => v,
None => &self.values()[0],
}
}
/// Check if this enum descriptor corresponds given enum type
///
/// ```
/// # use protobuf::ProtobufEnum;
/// # use protobuf::descriptor::field_descriptor_proto::Label;
/// # use protobuf::reflect::EnumDescriptor;
///
/// let descriptor: &EnumDescriptor = Label::enum_descriptor_static();
///
/// assert!(descriptor.is::<Label>())
/// ```
pub fn is<E: ProtobufEnum>(&self) -> bool {
TypeId::of::<E>() == self.type_id
}
/// Create enum object from given value.
///
/// Type parameter `E` can be either [`ProtobufEnum`](crate::ProtobufEnum)
/// or [`ProtobufEnumOrUnknown`](crate::ProtobufEnumOrUnknown).
///
/// # Panics
///
/// This operation panics of `E` is `ProtobufEnum` and `value` is unknown.
pub(crate) fn cast<E: 'static>(&self, value: i32) -> Option<E> {
if let Some(e) = self.cast_to_protobuf_enum(value) {
return Some(e);
}
if let Some(e) = self.cast_to_protobuf_enum_or_unknown(value) {
return Some(e);
}
None
}
#[cfg(rustc_nightly)]
fn cast_to_protobuf_enum<E: 'static>(&self, value: i32) -> Option<E> {
if TypeId::of::<E>() != self.type_id {
return None;
}
Some(<E as cast_impl::CastValueToProtobufEnum>::cast(value))
}
#[cfg(not(rustc_nightly))]
fn cast_to_protobuf_enum<E: 'static>(&self, value: i32) -> Option<E> {
if TypeId::of::<E>() != self.type_id {
return None;
}
use std::mem;
unsafe {
let mut r = mem::uninitialized();
self.get_descriptor
.copy_to(value, &mut r as *mut E as *mut ());
Some(r)
}
}
#[cfg(rustc_nightly)]
fn cast_to_protobuf_enum_or_unknown<E: 'static>(&self, value: i32) -> Option<E> {
if TypeId::of::<E>() != self.enum_or_unknown_type_id {
return None;
}
Some(<E as cast_impl::CastValueToProtobufEnumOrUnknown>::cast(value))
}
#[cfg(not(rustc_nightly))]
fn cast_to_protobuf_enum_or_unknown<E: 'static>(&self, value: i32) -> Option<E> {
if TypeId::of::<E>() == self.enum_or_unknown_type_id {
return None;
}
use std::mem;
use std::ptr;
debug_assert_eq!(mem::size_of::<E>(), mem::size_of::<i32>());
unsafe {
// This works because `ProtobufEnumOrUnknown<E>` is `#[repr(transparent)]`
let mut r = mem::uninitialized();
ptr::copy(&value, &mut r as *mut E as *mut i32, 1);
Some(r)
}
}
}
#[cfg(rustc_nightly)]
mod cast_impl {
use super::*;
pub(crate) trait CastValueToProtobufEnumOrUnknown: Sized {
fn cast(value: i32) -> Self;
}
impl<T> CastValueToProtobufEnumOrUnknown for T {
default fn cast(_value: i32) -> T {
unreachable!();
}
}
impl<E: ProtobufEnum> CastValueToProtobufEnumOrUnknown for ProtobufEnumOrUnknown<E> {
fn cast(value: i32) -> ProtobufEnumOrUnknown<E> {
ProtobufEnumOrUnknown::from_i32(value)
}
}
pub(crate) trait CastValueToProtobufEnum: Sized {
fn cast(value: i32) -> Self;
}
impl<T> CastValueToProtobufEnum for T {
default fn cast(_value: i32) -> T {
unreachable!();
}
}
impl<E: ProtobufEnum> CastValueToProtobufEnum for E {
fn cast(value: i32) -> E {
E::from_i32(value).expect(&format!("unknown enum value: {}", value))
}
}
}
Fix tests on stable
use std::any::TypeId;
use std::collections::HashMap;
use std::fmt;
use std::hash::Hash;
use std::hash::Hasher;
use std::marker;
use crate::descriptor::EnumDescriptorProto;
use crate::descriptor::EnumValueDescriptorProto;
use crate::descriptor::FileDescriptorProto;
use crate::enums::ProtobufEnum;
use crate::enums::ProtobufEnumOrUnknown;
use crate::reflect::find_message_or_enum::find_message_or_enum;
use crate::reflect::find_message_or_enum::MessageOrEnum;
use crate::reflect::ProtobufValue;
/// Description for enum variant.
///
/// Used in reflection.
#[derive(Clone)]
pub struct EnumValueDescriptor {
proto: &'static EnumValueDescriptorProto,
protobuf_value: &'static dyn ProtobufValue,
get_descriptor: &'static dyn GetEnumDescriptor,
}
impl PartialEq for EnumValueDescriptor {
fn eq(&self, other: &EnumValueDescriptor) -> bool {
self.enum_descriptor() == other.enum_descriptor() && self.value() == other.value()
}
}
impl Hash for EnumValueDescriptor {
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.value(), state)
}
}
fn _assert_send_sync() {
fn _assert_send_sync<T: Send + Sync>() {}
_assert_send_sync::<EnumValueDescriptor>();
}
impl fmt::Debug for EnumValueDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EnumValueDescriptor")
.field("proto", self.proto)
.field("value", &"...")
.finish()
}
}
impl Copy for EnumValueDescriptor {}
impl EnumValueDescriptor {
/// Name of enum variant as specified in proto file
pub fn name(&self) -> &'static str {
self.proto.get_name()
}
/// `i32` value of the enum variant
pub fn value(&self) -> i32 {
self.proto.get_number()
}
/// Convert to generic `ProtobufValue`
pub fn protobuf_value(&self) -> &'static dyn ProtobufValue {
self.protobuf_value
}
/// Get descriptor of enum holding this value.
pub fn enum_descriptor(&self) -> &EnumDescriptor {
self.get_descriptor.descriptor()
}
/// Convert this value descriptor into proper enum object.
///
/// ```
/// # use protobuf::well_known_types::NullValue;
/// # use protobuf::ProtobufEnum;
/// # use protobuf::reflect::EnumValueDescriptor;
///
/// let value: &EnumValueDescriptor = NullValue::NULL_VALUE.descriptor();
/// let null: Option<NullValue> = value.cast();
/// assert_eq!(Some(NullValue::NULL_VALUE), null);
/// ```
pub fn cast<E: ProtobufEnum>(&self) -> Option<E> {
self.enum_descriptor().cast_to_protobuf_enum::<E>(self.value())
}
}
trait GetEnumDescriptor: Send + Sync + 'static {
fn descriptor(&self) -> &EnumDescriptor;
unsafe fn copy_to(&self, value: i32, dest: *mut ());
}
struct GetDescriptorImpl<E: ProtobufEnum>(marker::PhantomData<E>);
impl<E: ProtobufEnum> GetEnumDescriptor for GetDescriptorImpl<E> {
fn descriptor(&self) -> &EnumDescriptor {
E::enum_descriptor_static()
}
unsafe fn copy_to(&self, value: i32, dest: *mut ()) {
let e = E::from_i32(value).expect("unknown value");
(&e as *const E).copy_to(dest as *mut E, 1);
}
}
/// Dynamic representation of enum type.
///
/// Can be used in reflective operations.
pub struct EnumDescriptor {
full_name: String,
proto: &'static EnumDescriptorProto,
values: Vec<EnumValueDescriptor>,
/// Type id of `<E>`
type_id: TypeId,
/// Type id of `<ProtobufEnumOrUnknown<E>>`
enum_or_unknown_type_id: TypeId,
#[cfg(not(rustc_nightly))]
get_descriptor: &'static dyn GetEnumDescriptor,
index_by_name: HashMap<String, usize>,
index_by_number: HashMap<i32, usize>,
}
impl fmt::Debug for EnumDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EnumDescriptor")
.field("full_name", &self.full_name)
.field("..", &"..")
.finish()
}
}
/// Identity comparison: message descriptor are equal if their addresses are equal
impl PartialEq for EnumDescriptor {
fn eq(&self, other: &EnumDescriptor) -> bool {
self as *const EnumDescriptor == other as *const EnumDescriptor
}
}
impl EnumDescriptor {
/// Enum name as given in `.proto` file
pub fn name(&self) -> &'static str {
self.proto.get_name()
}
/// Fully qualified protobuf name of enum
pub fn full_name(&self) -> &str {
&self.full_name[..]
}
/// Get `EnumDescriptor` object for given enum type
pub fn for_type<E: ProtobufEnum>() -> &'static EnumDescriptor {
E::enum_descriptor_static()
}
fn compute_full_name(
package: &str,
path_to_package: &str,
proto: &EnumDescriptorProto,
) -> String {
let mut full_name = package.to_owned();
if path_to_package.len() != 0 {
if full_name.len() != 0 {
full_name.push('.');
}
full_name.push_str(path_to_package);
}
if full_name.len() != 0 {
full_name.push('.');
}
full_name.push_str(proto.get_name());
full_name
}
/// Separate function to reduce generated code size bloat.
fn make_indices(proto: &EnumDescriptorProto) -> (HashMap<String, usize>, HashMap<i32, usize>) {
let mut index_by_name = HashMap::new();
let mut index_by_number = HashMap::new();
for (i, v) in proto.value.iter().enumerate() {
index_by_number.insert(v.get_number(), i);
index_by_name.insert(v.get_name().to_string(), i);
}
(index_by_name, index_by_number)
}
/// Construct `EnumDescriptor` given enum name and `FileDescriptorProto`.
///
/// This function is called from generated code, and should rarely be called directly.
///
/// This function is not a part of public API.
#[doc(hidden)]
pub fn new<E>(name_in_file: &'static str, file: &'static FileDescriptorProto) -> EnumDescriptor
where
E: ProtobufEnum,
{
let (path_to_package, proto) = match find_message_or_enum(file, name_in_file) {
(path_to_package, MessageOrEnum::Enum(e)) => (path_to_package, e),
(_, MessageOrEnum::Message(_)) => panic!("not an enum"),
};
let (index_by_name, index_by_number) = EnumDescriptor::make_indices(proto);
let proto_values = &proto.value;
let code_values = E::values();
assert_eq!(proto_values.len(), code_values.len());
let get_descriptor = &GetDescriptorImpl(marker::PhantomData::<E>);
let values = proto_values
.iter()
.zip(code_values)
.map(|(p, c)| EnumValueDescriptor {
proto: p,
protobuf_value: c,
get_descriptor,
})
.collect();
EnumDescriptor {
full_name: EnumDescriptor::compute_full_name(
file.get_package(),
&path_to_package,
&proto,
),
proto,
values,
type_id: TypeId::of::<E>(),
enum_or_unknown_type_id: TypeId::of::<ProtobufEnumOrUnknown<E>>(),
#[cfg(not(rustc_nightly))]
get_descriptor,
index_by_name,
index_by_number,
}
}
/// This enum values
pub fn values(&self) -> &[EnumValueDescriptor] {
&self.values
}
/// Find enum variant by name
pub fn value_by_name<'a>(&'a self, name: &str) -> Option<&'a EnumValueDescriptor> {
let &index = self.index_by_name.get(name)?;
Some(&self.values[index])
}
/// Find enum variant by number
pub fn value_by_number(&self, number: i32) -> Option<&EnumValueDescriptor> {
let &index = self.index_by_number.get(&number)?;
Some(&self.values[index])
}
pub(crate) fn value_by_number_or_default(&self, number: i32) -> &EnumValueDescriptor {
match self.value_by_number(number) {
Some(v) => v,
None => &self.values()[0],
}
}
/// Check if this enum descriptor corresponds given enum type
///
/// ```
/// # use protobuf::ProtobufEnum;
/// # use protobuf::descriptor::field_descriptor_proto::Label;
/// # use protobuf::reflect::EnumDescriptor;
///
/// let descriptor: &EnumDescriptor = Label::enum_descriptor_static();
///
/// assert!(descriptor.is::<Label>())
/// ```
pub fn is<E: ProtobufEnum>(&self) -> bool {
TypeId::of::<E>() == self.type_id
}
/// Create enum object from given value.
///
/// Type parameter `E` can be either [`ProtobufEnum`](crate::ProtobufEnum)
/// or [`ProtobufEnumOrUnknown`](crate::ProtobufEnumOrUnknown).
///
/// # Panics
///
/// This operation panics of `E` is `ProtobufEnum` and `value` is unknown.
pub(crate) fn cast<E: 'static>(&self, value: i32) -> Option<E> {
if let Some(e) = self.cast_to_protobuf_enum(value) {
return Some(e);
}
if let Some(e) = self.cast_to_protobuf_enum_or_unknown(value) {
return Some(e);
}
None
}
#[cfg(rustc_nightly)]
fn cast_to_protobuf_enum<E: 'static>(&self, value: i32) -> Option<E> {
if TypeId::of::<E>() != self.type_id {
return None;
}
Some(<E as cast_impl::CastValueToProtobufEnum>::cast(value))
}
#[cfg(not(rustc_nightly))]
fn cast_to_protobuf_enum<E: 'static>(&self, value: i32) -> Option<E> {
if TypeId::of::<E>() != self.type_id {
return None;
}
use std::mem;
unsafe {
let mut r = mem::uninitialized();
self.get_descriptor
.copy_to(value, &mut r as *mut E as *mut ());
Some(r)
}
}
#[cfg(rustc_nightly)]
fn cast_to_protobuf_enum_or_unknown<E: 'static>(&self, value: i32) -> Option<E> {
if TypeId::of::<E>() != self.enum_or_unknown_type_id {
return None;
}
Some(<E as cast_impl::CastValueToProtobufEnumOrUnknown>::cast(value))
}
#[cfg(not(rustc_nightly))]
fn cast_to_protobuf_enum_or_unknown<E: 'static>(&self, value: i32) -> Option<E> {
if TypeId::of::<E>() != self.enum_or_unknown_type_id {
return None;
}
use std::mem;
use std::ptr;
debug_assert_eq!(mem::size_of::<E>(), mem::size_of::<i32>());
unsafe {
// This works because `ProtobufEnumOrUnknown<E>` is `#[repr(transparent)]`
let mut r = mem::uninitialized();
ptr::copy(&value, &mut r as *mut E as *mut i32, 1);
Some(r)
}
}
}
#[cfg(rustc_nightly)]
mod cast_impl {
use super::*;
pub(crate) trait CastValueToProtobufEnumOrUnknown: Sized {
fn cast(value: i32) -> Self;
}
impl<T> CastValueToProtobufEnumOrUnknown for T {
default fn cast(_value: i32) -> T {
unreachable!();
}
}
impl<E: ProtobufEnum> CastValueToProtobufEnumOrUnknown for ProtobufEnumOrUnknown<E> {
fn cast(value: i32) -> ProtobufEnumOrUnknown<E> {
ProtobufEnumOrUnknown::from_i32(value)
}
}
pub(crate) trait CastValueToProtobufEnum: Sized {
fn cast(value: i32) -> Self;
}
impl<T> CastValueToProtobufEnum for T {
default fn cast(_value: i32) -> T {
unreachable!();
}
}
impl<E: ProtobufEnum> CastValueToProtobufEnum for E {
fn cast(value: i32) -> E {
E::from_i32(value).expect(&format!("unknown enum value: {}", value))
}
}
}
|
#[crate_id="inifile#1.0"];
#[crate_type = "lib"];
#[license = "MIT"];
#[desc = "Library for simple INI file management" ];
#[comment = "Example of library: INI file management"];
//! INI file management, partial implementation of Python API.
//!
//! Tested with rust-0.10-pre
//!
//! @author Eliovir <http://github.com/~eliovir>
//!
//! @license MIT license <http://www.opensource.org/licenses/mit-license.php>
//!
//! @see http://docs.python.org/2/library/configparser.html
//!
//! @since 2013-12-18
//!
//! @todo eddyb: you may want that to be Option<&'a str> so you can return None when the option isn't present. Option<T> can be either Some(T) or None. Option<~T> and Option<&T> are nullable pointers semantically (and optimized as such)
extern crate collections;
extern crate test;
use collections::hashmap::HashMap;
use std::from_str::FromStr;
use std::io::BufferedReader;
use std::io::fs::File;
use std::path::Path;
use std::fmt;
#[cfg(test)]
use test::BenchHarness;
pub struct IniFile {
/// Comments on sections and options
comments: HashMap<~str, HashMap<~str, ~str>>,
/// Option names, used to keep order (as HashMap doesn't).
options: ~[~[~str]],
/// INI structure: sections contain options (name=>value)
opts: HashMap<~str, HashMap<~str, ~str>>,
/// File path
path: Path,
/// Section names, used to keep order (as HashMap doesn't).
sections: ~[~str]
}
/**
* IniFile implements a basic configuration which provides a structure similar to what's found in Microsoft Windows INI files.
* You can use this to write programs which can be customized by end users easily.
*/
impl IniFile {
/**
* Add a section named section to the instance.
* If a section by the given name already exists, fail!()
*/
pub fn add_section(&mut self, section: &str) {
if !self.has_section(section.to_owned()) {
self.comments.insert(section.to_owned(), HashMap::new());
self.opts.insert(section.to_owned(), HashMap::new());
self.sections.push(section.to_owned());
self.options.push(~[]);
} else {
fail!("The section {:?} already exists!", section);
}
}
/**
* Getter on filepath.
*/
pub fn filepath(&self) -> ~str {
format!("{}", self.path.display())
}
/**
* Get an option value for the named section.
*/
pub fn get(&self, section: &str, option: &str) -> ~str {
//pub fn get<'a>(&'a self, section: &str, option: &str) -> &'a str {
if !self.has_option(section, option) {
()
}
self.opts.get(§ion.to_owned()).get(&option.to_owned()).to_owned()
}
/**
* A convenience method which coerces the option in the specified section to a boolean.
* Note that the accepted values for the option are '1', 'yes', 'true', and 'on', which cause this method to return True, and '0', 'no', 'false', and 'off', which cause it to return False.
* @todo These string values are checked in a case-insensitive manner.
*/
pub fn get_bool(&self, section: &str, option: &str) -> bool {
let value = self.get(section, option);
match value {
~"1" | ~"yes" | ~"true" | ~"T" | ~"on" => true,
~"0" | ~"no" | ~"false" | ~"F" | ~"off" => false,
_ => fail!("{} is not a boolean.", value)
}
}
/**
* A convenience method which coerces the option in the specified section to a float f64.
*/
pub fn get_f64(&self, section: &str, option: &str) -> f64 {
let value = self.get(section, option);
let x: Option<f64> = FromStr::from_str(value);
match x {
None => fail!("{} is not a float.", value),
_ => x.unwrap()
}
}
/**
* A convenience method which coerces the option in the specified section to an integer.
*/
pub fn get_int(&self, section: &str, option: &str) -> int {
let value = self.get(section, option);
// https://github.com/mozilla/rust/wiki/Doc-FAQ-Cheatsheet#string-to-int
let x: Option<int> = FromStr::from_str(value);
match x {
None => fail!("{} is not an integer.", value),
_ => x.unwrap()
}
}
/**
* Indicates whether the given section exists and contains the given option.
*/
pub fn has_option(&self, section: &str, option: &str) -> bool {
self.has_section(section) &&
self.opts.get(§ion.to_owned()).contains_key(&option.to_owned())
}
/**
* Indicates whether the named section is present in the configuration.
*/
pub fn has_section(&self, section: &str) -> bool {
self.opts.contains_key(§ion.to_owned())
}
pub fn new() -> IniFile {
IniFile { comments: HashMap::new(), options: ~[~[]], path: Path::new(""), opts: HashMap::new(), sections: ~[]}
}
/**
* Return a list of options available in the specified section.
*/
pub fn options(&self, section: ~str) -> ~[~str] {
let section_index = self.sections.position_elem(§ion).unwrap();
self.options[section_index].clone()
}
/**
* Read and parse configuration data from filepath.
*/
pub fn read(&mut self, filepath: &str) {
self.path = Path::new(filepath);
let file = File::open(&self.path);
match file {
Err(e) => fail!("open of {:?} failed: {}", self.path, e),
_ => debug!("open of {:?} succeeded", self.path)
}
let mut reader = BufferedReader::new(file);
let mut lines: ~[~str] = ~[];
for line in reader.lines() {
lines.push(line);
}
self.read_string(lines);
}
/**
* Parse configuration data from a vector of strings (file lines).
*/
pub fn read_string(&mut self, lines: ~[~str]) {
let mut section: ~str = ~"Default";
let mut comment_lines: ~str = ~"";
for line in lines.iter() {
let mut line_len = line.len();
if line_len > 0 && line.slice_chars(line_len - 1, line_len) == "\n" {
line_len = line_len - 1;
}
if line_len == 0 {
comment_lines.push_str(line.clone());
continue;
}
if line.slice_chars(0, 1) == "#" ||
line.slice_chars(0, 1) == ";" {
comment_lines.push_str(line.clone());
continue;
}
if line.slice_chars(0, 1) == "[" {
section = line.slice_chars(1, line_len - 1).to_owned();
if !self.opts.contains_key(§ion) {
self.add_section(section.clone());
self.comments.get_mut(§ion).insert(~"__section_comment__", comment_lines);
comment_lines = ~"";
}
continue;
}
let index: uint = line.find_str("=").unwrap();
let optkey: ~str = line.slice_chars(0, index).to_owned();
let optval: ~str = line.slice_chars(index + 1, line_len).to_owned();
self.comments.get_mut(§ion).insert(optkey.clone(), comment_lines);
comment_lines = ~"";
self.opts.get_mut(§ion).insert(optkey.clone(), optval);
let section_index = self.sections.position_elem(§ion).unwrap();
self.options[section_index].push(optkey.clone());
}
}
/**
* Remove the specified option from the specified section. If the section does not exist, fails.
* If the option existed to be removed, return True; otherwise return False.
*/
pub fn remove_option(&mut self, section: ~str, option: ~str) -> bool {
if !self.has_section(section.clone()) {
fail!("Section [{:?}] does not exist!");
}
/*
if !self.has_option(section.to_owned(), option.to_owned()) {
false
}
*/
let section_index = self.sections.position_elem(§ion).unwrap();
self.options[section_index].remove(section_index);
self.comments.get_mut(§ion).pop(&option);
self.opts.get_mut(§ion).pop(&option);
true
}
/**
* Remove the specified section from the configuration.
* If the section in fact existed, return True; otherwise return False.
*/
pub fn remove_section(&mut self, section: ~str) -> bool {
/*
if (!self.has_section(section.clone())) {
false
}
*/
self.opts.pop(§ion);
self.comments.pop(§ion);
// http://static.rust-lang.org/doc/0.8/std/vec.html
let index = self.sections.position_elem(§ion).unwrap();
self.sections.remove(index);
self.options.remove(index);
true
}
/**
* Save the current configuration into the original file.
*/
pub fn save(&self) {
self.write(self.filepath());
}
/**
* Return a list of the available sections.
*/
pub fn sections(&self) -> ~[~str] {
/*
let mut sections: ~[~str] = ~[];
self.opts.iter().advance(|(k, _)| {sections.push(k.to_owned()); true});
sections
*/
self.sections.clone()
}
/**
* If the given section exists, set the given option to the specified value; otherwise fail!().
*/
pub fn set(&mut self, section: ~str, option: ~str, value: ~str) {
if !self.has_section(section.to_owned()) {
//self.add_section(section.to_owned());
fail!("Section [{:?}] does not exist!");
}
if !self.has_option(section.to_owned(), option.to_owned()) {
self.opts.get_mut(§ion).insert(option.clone(), value);
let section_index = self.sections.position_elem(§ion).unwrap();
self.options[section_index].push(option.clone());
} else {
self.opts.get_mut(§ion).swap(option, value);
}
}
/**
* Redefine file path.
*/
pub fn set_path(&mut self, filepath: Path) {
self.path = filepath;
}
/**
* Write a representation of the configuration to the specified file path.
* This representation can be parsed by a future read() call.
*/
pub fn write(&self, filepath: &str) {
// http://static.rust-lang.org/doc/master/std/io/index.html
let mut file = File::create(&Path::new(filepath));
match file.write(self.to_str().into_bytes()) {
Ok(()) => debug!("INI file {:?} written", self.path),
Err(e) => println!("failed to write to {:?}: {}", self.path, e),
}
}
}
/**
* Operator overloading
* @see http://maniagnosis.crsr.net/2013/04/operator-overloading-in-rust.html
*/
impl fmt::Show for IniFile {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut lines = ~"";
let sections = self.sections().clone();
for section in sections.iter() {
if self.comments.contains_key(section) && self.comments.get(section).contains_key(&~"__section_comment__") {
lines.push_str(self.comments.get(section).get(&~"__section_comment__").clone());
}
lines.push_str(format!("[{}]\n", section.to_owned()));
let options = self.options(section.to_owned()).clone();
for key in options.iter() {
if self.comments.contains_key(section) && self.comments.get(section).contains_key(key) {
lines.push_str(self.comments.get(section).get(key).clone());
}
lines.push_str(format!("{}={}\n", key.to_owned(), self.get(section.to_owned(), key.to_owned())));
}
}
write!(f.buf, "{}", lines)
}
}
#[cfg(test)]
mod tests {
use collections::hashmap::HashMap;
use std::io::BufferedReader;
use std::io::fs;
use std::io::fs::File;
use std::path::Path;
#[test]
fn defaultFilepathIsEmpty() {
let ini = super::IniFile::new();
let expected = ".";
let found = ini.filepath();
assert!(expected == found, format!("Default file path must be \"\", not \"{}\".", found));
}
#[test]
fn filepath() {
let mut ini = super::IniFile::new();
let filepath = "data/config.ini";
ini.read(filepath);
let expected = "data/config.ini";
let found=ini.filepath();
assert!(expected == found, format!("Default file path must be \"{}\", not \"{}\".", expected, found));
}
#[test]
fn sections_length() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let expected = 5;
let sections = ini.sections();
let found = sections.len();
assert!(expected == found, format!("{:u} sections are expected, not {:u}.", expected, found));
}
#[test]
fn sections_names() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let expected = ~[~"section1", ~"section2", ~"Booleans", ~"Integers", ~"Floats"];
let found = ini.sections();
assert!(expected == found, format!("Sections must be \"{:?}\", not {:?}.", expected, found));
}
#[test]
fn has_option_true() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let found = ini.has_option("section1", "value11");
assert!(found, "Option \"value11\" in section [section1] must be found!");
}
#[test]
fn has_option_false() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let found = ini.has_option("section1", "unknown key");
assert!(!found, "Option \"unknown key\" in section [section1] must not be found!");
}
#[test]
fn has_section_true() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let found = ini.has_section("section1");
assert!(found, "Section section1 must be found!");
}
#[test]
fn has_section_false() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let found = ini.has_section("unknown section");
assert!(!found, "Section \"unknown section\" must not be found!");
}
#[test]
fn get() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let expected = "string 11";
let found = ini.get("section1", "value11");
assert!(expected == found, format!("[section1] value11 must be \"{}\", not \"{}\".", expected, found));
}
#[test]
fn get_bool_true() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let options = ["true1", "true2", "true3"];
for key in options.iter() {
let found = ini.get_bool("Booleans", key.to_owned());
assert!(found, format!("[Booleans] {:?} must be true.", key));
}
}
#[test]
fn get_bool_false() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let options = ["false1", "false2", "false3"];
for key in options.iter() {
let found = ini.get_bool("Booleans", key.to_owned());
assert!(!found, format!("[Booleans] {:?} must be false.", key));
}
}
#[test]
#[should_fail]
fn get_bool_fail() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.get_bool("section1", "value11");
}
#[test]
fn get_int() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let mut test: HashMap<~str, int> = HashMap::new();
test.insert(~"integer0", 0i);
test.insert(~"integer1", 1i);
test.insert(~"integer2", 2i);
test.insert(~"integer3", 03i);
for (key, expected) in test.iter() {
let found = ini.get_int("Integers", key.to_owned());
assert!((expected*1) == found,
format!("[Integers] {:?} must be \"{:?}\", not \"{:?}\".", key, expected, found));
}
}
#[test]
#[should_fail]
fn get_int_fail() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.get_int("section1", "value11");
}
#[test]
fn get_f64() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let mut test: HashMap<~str, f64> = HashMap::new();
test.insert(~"float01", 0.1f64);
test.insert(~"float11", 1.1f64);
test.insert(~"float20", 2.0f64);
test.insert(~"float30", 3.0f64);
for (key, expected) in test.iter() {
let found = ini.get_f64("Floats", key.to_owned());
assert!((expected*1.0f64) == found,
format!("[Floats] {:?} must be \"{:?}\", not \"{:?}\".", key, expected, found));
}
}
#[test]
fn add_section() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let expected = ~[~"section1", ~"section2", ~"Booleans", ~"Integers", ~"Floats"];
let found = ini.sections();
assert!(expected == found, format!("Sections must be \"{:?}\", not {:?}.", expected, found));
ini.add_section("New section");
let expected2 = ~[~"section1", ~"section2", ~"Booleans", ~"Integers", ~"Floats", ~"New section"];
let found2 = ini.sections();
assert!(expected2 == found2, format!("Sections must be \"{:?}\", not {:?}.", expected2, found2));
}
#[test]
#[should_fail]
fn add_section_twice() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.add_section("New section");
ini.add_section("New section");
}
#[test]
fn remove_section() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.remove_section(~"section1");
let expected = ~[~"section2", ~"Booleans", ~"Integers", ~"Floats"];
let found = ini.sections();
assert!(expected == found, format!("Sections must be \"{:?}\", not {:?}.", expected, found));
}
#[test]
fn set() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.set(~"section1", ~"value2", ~"string 2");
let expected = "string 2";
let found = ini.get("section1", "value2");
assert!(expected == found, format!("[section1] value2 must be \"{}\", not \"{}\".", expected, found));
}
#[test]
fn options() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let expected = ~[~"value11", ~"value"];
let found = ini.options(~"section1");
assert!(expected == found, format!("Items of [section1] must be \"{:?}\", not {:?}.", expected, found));
}
#[test]
fn to_str() {
let filepath = "data/config.ini";
let path = Path::new(filepath);
let file = File::open(&path);
match file {
Err(e) => fail!("open of {:?} failed: {}", path, e),
_ => debug!("open of {:?} succeeded", path)
}
let mut reader = BufferedReader::new(file);
let mut lines: ~[~str] = ~[];
for line in reader.lines() {
lines.push(line);
}
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let found = ini.to_str();
let expected = lines.concat();
assert_eq!(expected, found);
}
#[test]
fn write() {
use std::task;
use std::any::Any;
// Copy config.ini to write_test.ini using `write()`.
let writepath = "data/write_test.ini";
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.write(writepath);
// Check that the new file exists
let path = Path::new(writepath);
assert!(path.exists(), format!("{} should exist after writing inifile!", writepath));
// Check the contents
let mut ini2 = super::IniFile::new();
ini2.read(writepath);
let found = ini2.to_str();
let expected = ini.to_str();
assert_eq!(expected, found);
// Clean
assert!(path.exists(), format!("{} should exist after reading the new inifile!", writepath));
let result: Result<(), ~Any> = task::try(proc() {
match fs::unlink(&path) {
Err(e) => fail!("open of {:?} failed: {}", path, e),
_ => debug!("open of {:?} succeeded", path)
}
});
assert!(!result.is_err(), format!("Unlinking {} should not fail!", writepath));
}
#[test]
fn save() {
let filepath = ~"data/save_test.ini";
let path = Path::new(filepath);
if path.exists() {
println!("The file {:?} should not exist before test::save() is executed!", path);
}
let mut ini = super::IniFile::new();
ini.add_section("section1");
ini.set(~"section1", ~"key1", ~"value1");
ini.set_path(path.clone());
ini.save();
let file = File::open(&path);
match file {
Err(e) => fail!("open of {:?} failed: {}", path, e),
_ => debug!("open of {:?} succeeded", path)
}
let mut reader = BufferedReader::new(file);
let mut lines: ~[~str] = ~[];
for line in reader.lines() {
lines.push(line);
}
let found = lines.concat();
let expected = ~"[section1]\nkey1=value1\n";
assert_eq!(expected, found);
match fs::unlink(&path) {
Err(e) => fail!("open of {:?} failed: {}", path, e),
_ => debug!("open of {:?} succeeded", path)
}
}
}
#[bench]
fn bench_inifile(b: &mut BenchHarness) {
b.iter(|| {
let mut ini = IniFile::new();
ini.read("data/config.ini");
});
}
Changes to version 0.10-pre (6cbba7c 2014-03-03 07:51:40)
#[crate_id="inifile#1.0"];
#[crate_type = "lib"];
#[license = "MIT"];
#[desc = "Library for simple INI file management" ];
#[comment = "Example of library: INI file management"];
//! INI file management, partial implementation of Python API.
//!
//! Tested with rust-0.10-pre
//!
//! @author Eliovir <http://github.com/~eliovir>
//!
//! @license MIT license <http://www.opensource.org/licenses/mit-license.php>
//!
//! @see http://docs.python.org/2/library/configparser.html
//!
//! @since 2013-12-18
//!
//! @todo eddyb: you may want that to be Option<&'a str> so you can return None when the option isn't present. Option<T> can be either Some(T) or None. Option<~T> and Option<&T> are nullable pointers semantically (and optimized as such)
extern crate collections;
extern crate test;
use collections::hashmap::HashMap;
use std::from_str::FromStr;
use std::io::BufferedReader;
use std::io::fs::File;
use std::path::Path;
use std::fmt;
#[cfg(test)]
use test::BenchHarness;
pub struct IniFile {
/// Comments on sections and options
comments: HashMap<~str, HashMap<~str, ~str>>,
/// Option names, used to keep order (as HashMap doesn't).
options: ~[~[~str]],
/// INI structure: sections contain options (name=>value)
opts: HashMap<~str, HashMap<~str, ~str>>,
/// File path
path: Path,
/// Section names, used to keep order (as HashMap doesn't).
sections: ~[~str]
}
/**
* IniFile implements a basic configuration which provides a structure similar to what's found in Microsoft Windows INI files.
* You can use this to write programs which can be customized by end users easily.
*/
impl IniFile {
/**
* Add a section named section to the instance.
* If a section by the given name already exists, fail!()
*/
pub fn add_section(&mut self, section: &str) {
if !self.has_section(section.to_owned()) {
self.comments.insert(section.to_owned(), HashMap::new());
self.opts.insert(section.to_owned(), HashMap::new());
self.sections.push(section.to_owned());
self.options.push(~[]);
} else {
fail!("The section {:?} already exists!", section);
}
}
/**
* Getter on filepath.
*/
pub fn filepath(&self) -> ~str {
format!("{}", self.path.display())
}
/**
* Get an option value for the named section.
*/
pub fn get(&self, section: &str, option: &str) -> ~str {
//pub fn get<'a>(&'a self, section: &str, option: &str) -> &'a str {
if !self.has_option(section, option) {
()
}
self.opts.get(§ion.to_owned()).get(&option.to_owned()).to_owned()
}
/**
* A convenience method which coerces the option in the specified section to a boolean.
* Note that the accepted values for the option are '1', 'yes', 'true', and 'on', which cause this method to return True, and '0', 'no', 'false', and 'off', which cause it to return False.
* @todo These string values are checked in a case-insensitive manner.
*/
pub fn get_bool(&self, section: &str, option: &str) -> bool {
let value = self.get(section, option);
match value.as_slice() {
"1" | "yes" | "true" | "T" | "on" => true,
"0" | "no" | "false" | "F" | "off" => false,
_ => fail!("{} is not a boolean.", value)
}
}
/**
* A convenience method which coerces the option in the specified section to a float f64.
*/
pub fn get_f64(&self, section: &str, option: &str) -> f64 {
let value = self.get(section, option);
let x: Option<f64> = FromStr::from_str(value);
match x {
None => fail!("{} is not a float.", value),
_ => x.unwrap()
}
}
/**
* A convenience method which coerces the option in the specified section to an integer.
*/
pub fn get_int(&self, section: &str, option: &str) -> int {
let value = self.get(section, option);
// https://github.com/mozilla/rust/wiki/Doc-FAQ-Cheatsheet#string-to-int
let x: Option<int> = FromStr::from_str(value);
match x {
None => fail!("{} is not an integer.", value),
_ => x.unwrap()
}
}
/**
* Indicates whether the given section exists and contains the given option.
*/
pub fn has_option(&self, section: &str, option: &str) -> bool {
self.has_section(section) &&
self.opts.get(§ion.to_owned()).contains_key(&option.to_owned())
}
/**
* Indicates whether the named section is present in the configuration.
*/
pub fn has_section(&self, section: &str) -> bool {
self.opts.contains_key(§ion.to_owned())
}
pub fn new() -> IniFile {
IniFile { comments: HashMap::new(), options: ~[~[]], path: Path::new(""), opts: HashMap::new(), sections: ~[]}
}
/**
* Return a list of options available in the specified section.
*/
pub fn options(&self, section: ~str) -> ~[~str] {
let section_index = self.sections.position_elem(§ion).unwrap();
self.options[section_index].clone()
}
/**
* Read and parse configuration data from filepath.
*/
pub fn read(&mut self, filepath: &str) {
self.path = Path::new(filepath);
let file = File::open(&self.path);
match file {
Err(e) => fail!("open of {:?} failed: {}", self.path, e),
_ => debug!("open of {:?} succeeded", self.path)
}
let mut reader = BufferedReader::new(file);
let mut lines: ~[~str] = ~[];
for line in reader.lines() {
lines.push(line);
}
self.read_string(lines);
}
/**
* Parse configuration data from a vector of strings (file lines).
*/
pub fn read_string(&mut self, lines: ~[~str]) {
let mut section: ~str = ~"Default";
let mut comment_lines: ~str = ~"";
for line in lines.iter() {
let mut line_len = line.len();
if line_len > 0 && line.slice_chars(line_len - 1, line_len) == "\n" {
line_len = line_len - 1;
}
if line_len == 0 {
comment_lines.push_str(line.clone());
continue;
}
if line.slice_chars(0, 1) == "#" ||
line.slice_chars(0, 1) == ";" {
comment_lines.push_str(line.clone());
continue;
}
if line.slice_chars(0, 1) == "[" {
section = line.slice_chars(1, line_len - 1).to_owned();
if !self.opts.contains_key(§ion) {
self.add_section(section.clone());
self.comments.get_mut(§ion).insert(~"__section_comment__", comment_lines);
comment_lines = ~"";
}
continue;
}
let index: uint = line.find_str("=").unwrap();
let optkey: ~str = line.slice_chars(0, index).to_owned();
let optval: ~str = line.slice_chars(index + 1, line_len).to_owned();
self.comments.get_mut(§ion).insert(optkey.clone(), comment_lines);
comment_lines = ~"";
self.opts.get_mut(§ion).insert(optkey.clone(), optval);
let section_index = self.sections.position_elem(§ion).unwrap();
self.options[section_index].push(optkey.clone());
}
}
/**
* Remove the specified option from the specified section. If the section does not exist, fails.
* If the option existed to be removed, return True; otherwise return False.
*/
pub fn remove_option(&mut self, section: ~str, option: ~str) -> bool {
if !self.has_section(section.clone()) {
fail!("Section [{:?}] does not exist!");
}
/*
if !self.has_option(section.to_owned(), option.to_owned()) {
false
}
*/
let section_index = self.sections.position_elem(§ion).unwrap();
self.options[section_index].remove(section_index);
self.comments.get_mut(§ion).pop(&option);
self.opts.get_mut(§ion).pop(&option);
true
}
/**
* Remove the specified section from the configuration.
* If the section in fact existed, return True; otherwise return False.
*/
pub fn remove_section(&mut self, section: ~str) -> bool {
/*
if (!self.has_section(section.clone())) {
false
}
*/
self.opts.pop(§ion);
self.comments.pop(§ion);
// http://static.rust-lang.org/doc/0.8/std/vec.html
let index = self.sections.position_elem(§ion).unwrap();
self.sections.remove(index);
self.options.remove(index);
true
}
/**
* Save the current configuration into the original file.
*/
pub fn save(&self) {
self.write(self.filepath());
}
/**
* Return a list of the available sections.
*/
pub fn sections(&self) -> ~[~str] {
/*
let mut sections: ~[~str] = ~[];
self.opts.iter().advance(|(k, _)| {sections.push(k.to_owned()); true});
sections
*/
self.sections.clone()
}
/**
* If the given section exists, set the given option to the specified value; otherwise fail!().
*/
pub fn set(&mut self, section: ~str, option: ~str, value: ~str) {
if !self.has_section(section.to_owned()) {
//self.add_section(section.to_owned());
fail!("Section [{:?}] does not exist!");
}
if !self.has_option(section.to_owned(), option.to_owned()) {
self.opts.get_mut(§ion).insert(option.clone(), value);
let section_index = self.sections.position_elem(§ion).unwrap();
self.options[section_index].push(option.clone());
} else {
self.opts.get_mut(§ion).swap(option, value);
}
}
/**
* Redefine file path.
*/
pub fn set_path(&mut self, filepath: Path) {
self.path = filepath;
}
/**
* Write a representation of the configuration to the specified file path.
* This representation can be parsed by a future read() call.
*/
pub fn write(&self, filepath: &str) {
// http://static.rust-lang.org/doc/master/std/io/index.html
let mut file = File::create(&Path::new(filepath));
match file.write(self.to_str().into_bytes()) {
Ok(()) => debug!("INI file {:?} written", self.path),
Err(e) => println!("failed to write to {:?}: {}", self.path, e),
}
}
}
/**
* Operator overloading
* @see http://maniagnosis.crsr.net/2013/04/operator-overloading-in-rust.html
*/
impl fmt::Show for IniFile {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut lines = ~"";
let sections = self.sections().clone();
for section in sections.iter() {
if self.comments.contains_key(section) && self.comments.get(section).contains_key(&~"__section_comment__") {
lines.push_str(self.comments.get(section).get(&~"__section_comment__").clone());
}
lines.push_str(format!("[{}]\n", section.to_owned()));
let options = self.options(section.to_owned()).clone();
for key in options.iter() {
if self.comments.contains_key(section) && self.comments.get(section).contains_key(key) {
lines.push_str(self.comments.get(section).get(key).clone());
}
lines.push_str(format!("{}={}\n", key.to_owned(), self.get(section.to_owned(), key.to_owned())));
}
}
write!(f.buf, "{}", lines)
}
}
#[cfg(test)]
mod tests {
use collections::hashmap::HashMap;
use std::io::BufferedReader;
use std::io::fs;
use std::io::fs::File;
use std::path::Path;
#[test]
fn defaultFilepathIsEmpty() {
let ini = super::IniFile::new();
let expected = ".";
let found = ini.filepath();
assert!(expected == found, format!("Default file path must be \"\", not \"{}\".", found));
}
#[test]
fn filepath() {
let mut ini = super::IniFile::new();
let filepath = "data/config.ini";
ini.read(filepath);
let expected = "data/config.ini";
let found=ini.filepath();
assert!(expected == found, format!("Default file path must be \"{}\", not \"{}\".", expected, found));
}
#[test]
fn sections_length() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let expected = 5;
let sections = ini.sections();
let found = sections.len();
assert!(expected == found, format!("{:u} sections are expected, not {:u}.", expected, found));
}
#[test]
fn sections_names() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let expected = ~[~"section1", ~"section2", ~"Booleans", ~"Integers", ~"Floats"];
let found = ini.sections();
assert!(expected == found, format!("Sections must be \"{:?}\", not {:?}.", expected, found));
}
#[test]
fn has_option_true() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let found = ini.has_option("section1", "value11");
assert!(found, "Option \"value11\" in section [section1] must be found!");
}
#[test]
fn has_option_false() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let found = ini.has_option("section1", "unknown key");
assert!(!found, "Option \"unknown key\" in section [section1] must not be found!");
}
#[test]
fn has_section_true() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let found = ini.has_section("section1");
assert!(found, "Section section1 must be found!");
}
#[test]
fn has_section_false() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let found = ini.has_section("unknown section");
assert!(!found, "Section \"unknown section\" must not be found!");
}
#[test]
fn get() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let expected = "string 11";
let found = ini.get("section1", "value11");
assert!(expected == found, format!("[section1] value11 must be \"{}\", not \"{}\".", expected, found));
}
#[test]
fn get_bool_true() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let options = ["true1", "true2", "true3"];
for key in options.iter() {
let found = ini.get_bool("Booleans", key.to_owned());
assert!(found, format!("[Booleans] {:?} must be true.", key));
}
}
#[test]
fn get_bool_false() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let options = ["false1", "false2", "false3"];
for key in options.iter() {
let found = ini.get_bool("Booleans", key.to_owned());
assert!(!found, format!("[Booleans] {:?} must be false.", key));
}
}
#[test]
#[should_fail]
fn get_bool_fail() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.get_bool("section1", "value11");
}
#[test]
fn get_int() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let mut test: HashMap<~str, int> = HashMap::new();
test.insert(~"integer0", 0i);
test.insert(~"integer1", 1i);
test.insert(~"integer2", 2i);
test.insert(~"integer3", 03i);
for (key, expected) in test.iter() {
let found = ini.get_int("Integers", key.to_owned());
assert!((expected*1) == found,
format!("[Integers] {:?} must be \"{:?}\", not \"{:?}\".", key, expected, found));
}
}
#[test]
#[should_fail]
fn get_int_fail() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.get_int("section1", "value11");
}
#[test]
fn get_f64() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let mut test: HashMap<~str, f64> = HashMap::new();
test.insert(~"float01", 0.1f64);
test.insert(~"float11", 1.1f64);
test.insert(~"float20", 2.0f64);
test.insert(~"float30", 3.0f64);
for (key, expected) in test.iter() {
let found = ini.get_f64("Floats", key.to_owned());
assert!((expected*1.0f64) == found,
format!("[Floats] {:?} must be \"{:?}\", not \"{:?}\".", key, expected, found));
}
}
#[test]
fn add_section() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let expected = ~[~"section1", ~"section2", ~"Booleans", ~"Integers", ~"Floats"];
let found = ini.sections();
assert!(expected == found, format!("Sections must be \"{:?}\", not {:?}.", expected, found));
ini.add_section("New section");
let expected2 = ~[~"section1", ~"section2", ~"Booleans", ~"Integers", ~"Floats", ~"New section"];
let found2 = ini.sections();
assert!(expected2 == found2, format!("Sections must be \"{:?}\", not {:?}.", expected2, found2));
}
#[test]
#[should_fail]
fn add_section_twice() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.add_section("New section");
ini.add_section("New section");
}
#[test]
fn remove_section() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.remove_section(~"section1");
let expected = ~[~"section2", ~"Booleans", ~"Integers", ~"Floats"];
let found = ini.sections();
assert!(expected == found, format!("Sections must be \"{:?}\", not {:?}.", expected, found));
}
#[test]
fn set() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.set(~"section1", ~"value2", ~"string 2");
let expected = "string 2";
let found = ini.get("section1", "value2");
assert!(expected == found, format!("[section1] value2 must be \"{}\", not \"{}\".", expected, found));
}
#[test]
fn options() {
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let expected = ~[~"value11", ~"value"];
let found = ini.options(~"section1");
assert!(expected == found, format!("Items of [section1] must be \"{:?}\", not {:?}.", expected, found));
}
#[test]
fn to_str() {
let filepath = "data/config.ini";
let path = Path::new(filepath);
let file = File::open(&path);
match file {
Err(e) => fail!("open of {:?} failed: {}", path, e),
_ => debug!("open of {:?} succeeded", path)
}
let mut reader = BufferedReader::new(file);
let mut lines: ~[~str] = ~[];
for line in reader.lines() {
lines.push(line);
}
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
let found = ini.to_str();
let expected = lines.concat();
assert_eq!(expected, found);
}
#[test]
fn write() {
use std::task;
use std::any::Any;
// Copy config.ini to write_test.ini using `write()`.
let writepath = "data/write_test.ini";
let mut ini = super::IniFile::new();
ini.read("data/config.ini");
ini.write(writepath);
// Check that the new file exists
let path = Path::new(writepath);
assert!(path.exists(), format!("{} should exist after writing inifile!", writepath));
// Check the contents
let mut ini2 = super::IniFile::new();
ini2.read(writepath);
let found = ini2.to_str();
let expected = ini.to_str();
assert_eq!(expected, found);
// Clean
assert!(path.exists(), format!("{} should exist after reading the new inifile!", writepath));
let result: Result<(), ~Any> = task::try(proc() {
match fs::unlink(&path) {
Err(e) => fail!("open of {:?} failed: {}", path, e),
_ => debug!("open of {:?} succeeded", path)
}
});
assert!(!result.is_err(), format!("Unlinking {} should not fail!", writepath));
}
#[test]
fn save() {
let filepath = ~"data/save_test.ini";
let path = Path::new(filepath);
if path.exists() {
println!("The file {:?} should not exist before test::save() is executed!", path);
}
let mut ini = super::IniFile::new();
ini.add_section("section1");
ini.set(~"section1", ~"key1", ~"value1");
ini.set_path(path.clone());
ini.save();
let file = File::open(&path);
match file {
Err(e) => fail!("open of {:?} failed: {}", path, e),
_ => debug!("open of {:?} succeeded", path)
}
let mut reader = BufferedReader::new(file);
let mut lines: ~[~str] = ~[];
for line in reader.lines() {
lines.push(line);
}
let found = lines.concat();
let expected = ~"[section1]\nkey1=value1\n";
assert_eq!(expected, found);
match fs::unlink(&path) {
Err(e) => fail!("open of {:?} failed: {}", path, e),
_ => debug!("open of {:?} succeeded", path)
}
}
}
#[bench]
fn bench_inifile(b: &mut BenchHarness) {
b.iter(|| {
let mut ini = IniFile::new();
ini.read("data/config.ini");
});
}
|
#[macro_use] extern crate nom;
#[macro_use] extern crate clap;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate lazy_static;
#[macro_use] extern crate sozu_lib as sozu;
#[macro_use] extern crate sozu_command_lib as sozu_command;
#[cfg(target_os = "linux")]
extern crate num_cpus;
#[cfg(target_os="linux")]
use regex::Regex;
#[cfg(feature = "jemallocator")]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
#[macro_use]
mod logging;
mod command;
mod worker;
mod upgrade;
mod cli;
mod util;
use std::panic;
use sozu_command::config::Config;
use clap::ArgMatches;
#[cfg(target_os = "linux")]
use libc::{cpu_set_t,pid_t};
use crate::command::Worker;
use crate::worker::{start_workers,get_executable_path};
use sozu::metrics::METRICS;
enum StartupError {
ConfigurationFileNotSpecified,
ConfigurationFileLoadError(std::io::Error),
#[allow(dead_code)]
TooManyAllowedConnections(String),
#[allow(dead_code)]
TooManyAllowedConnectionsForWorker(String),
WorkersSpawnFail(nix::Error),
PIDFileNotWritable(String)
}
fn main() {
register_panic_hook();
// Init parsing of arguments
let matches = cli::init();
// Check if we are upgrading workers or main
let upgrade = cli::upgrade_worker(&matches).or_else(|| cli::upgrade_main(&matches));
// If we are not, then we want to start sozu
if upgrade == None {
let start = get_config_file_path(&matches)
.and_then(|config_file| load_configuration(config_file))
.and_then(|config| {
util::write_pid_file(&config)
.map(|()| config)
.map_err(|err| StartupError::PIDFileNotWritable(err))
})
.map(|config| {
util::setup_logging(&config);
info!("Starting up");
util::setup_metrics(&config);
config
})
.and_then(|config| update_process_limits(&config).map(|()| config))
.and_then(|config| init_workers(&config).map(|workers| (config, workers)))
.map(|(config, workers)| {
if config.handle_process_affinity {
set_workers_affinity(&workers);
}
let command_socket_path = config.command_socket_path();
if let Err(e) = command::start(config, command_socket_path, workers) {
error!("could not start worker: {:?}", e);
}
});
match start {
Ok(_) => info!("main process stopped"), // Ok() is only called when the proxy exits
Err(StartupError::ConfigurationFileNotSpecified) => {
error!("Configuration file hasn't been specified. Either use -c with the start command \
or use the SOZU_CONFIG environment variable when building sozu.");
std::process::exit(1);
},
Err(StartupError::ConfigurationFileLoadError(err)) => {
error!("Invalid configuration file. Error: {:?}", err);
std::process::exit(1);
},
Err(StartupError::TooManyAllowedConnections(err)) | Err(StartupError::TooManyAllowedConnectionsForWorker(err)) => {
error!("{}", err);
std::process::exit(1);
},
Err(StartupError::WorkersSpawnFail(err)) => {
error!("At least one worker failed to spawn. Error: {:?}", err);
std::process::exit(1);
},
Err(StartupError::PIDFileNotWritable(err)) => {
error!("{}", err);
std::process::exit(1);
}
}
}
}
fn init_workers(config: &Config) -> Result<Vec<Worker>, StartupError> {
let path = unsafe { get_executable_path() };
match start_workers(path, &config) {
Ok(workers) => {
info!("created workers: {:?}", workers);
Ok(workers)
},
Err(e) => Err(StartupError::WorkersSpawnFail(e))
}
}
fn get_config_file_path<'a>(matches: &'a ArgMatches<'a>) -> Result<&'a str, StartupError> {
let start_matches = matches.subcommand_matches("start").expect("unknown subcommand");
match start_matches.value_of("config") {
Some(config_file) => Ok(config_file),
None => option_env!("SOZU_CONFIG").ok_or(StartupError::ConfigurationFileNotSpecified)
}
}
fn load_configuration(config_file: &str) -> Result<Config, StartupError> {
match Config::load_from_path(config_file) {
Ok(config) => Ok(config),
Err(e) => Err(StartupError::ConfigurationFileLoadError(e))
}
}
/// Set workers process affinity, see man sched_setaffinity
/// Bind each worker (including the main) process to a CPU core.
/// Can bind multiple processes to a CPU core if there are more processes
/// than CPU cores. Only works on Linux.
#[cfg(target_os = "linux")]
fn set_workers_affinity(workers: &Vec<Worker>) {
let mut cpu_count = 0;
let max_cpu = num_cpus::get();
// +1 for the main process that will also be bound to its CPU core
if (workers.len() + 1) > max_cpu {
warn!("There are more workers than available CPU cores, \
multiple workers will be bound to the same CPU core. \
This may impact performances");
}
let main_pid = unsafe { libc::getpid() };
set_process_affinity(main_pid, cpu_count);
cpu_count = cpu_count + 1;
for ref worker in workers {
if cpu_count >= max_cpu {
cpu_count = 0;
}
set_process_affinity(worker.pid, cpu_count);
cpu_count = cpu_count + 1;
}
}
/// Set workers process affinity, see man sched_setaffinity
/// Bind each worker (including the main) process to a CPU core.
/// Can bind multiple processes to a CPU core if there are more processes
/// than CPU cores. Only works on Linux.
#[cfg(not(target_os = "linux"))]
fn set_workers_affinity(_: &Vec<Worker>) {
}
/// Set a specific process to run onto a specific CPU core
#[cfg(target_os = "linux")]
use std::mem;
#[cfg(target_os = "linux")]
fn set_process_affinity(pid: pid_t, cpu: usize) {
unsafe {
let mut cpu_set: cpu_set_t = mem::zeroed();
let size_cpu_set = mem::size_of::<cpu_set_t>();
libc::CPU_SET(cpu, &mut cpu_set);
libc::sched_setaffinity(pid, size_cpu_set, &mut cpu_set);
debug!("Worker {} bound to CPU core {}", pid, cpu);
};
}
#[cfg(target_os="linux")]
// We check the hard_limit. The soft_limit can be changed at runtime
// by the process or any user. hard_limit can only be changed by root
fn update_process_limits(config: &Config) -> Result<(), StartupError> {
let wanted_opened_files = (config.max_connections as u64) * 2;
// Ensure we don't exceed the system maximum capacity
let f = sozu_command::config::Config::load_file("/proc/sys/fs/file-max")
.expect("Couldn't read /proc/sys/fs/file-max");
let re_max = Regex::new(r"(\d*)").unwrap();
let system_max_fd = re_max.captures(&f).and_then(|c| c.get(1))
.and_then(|m| m.as_str().parse::<usize>().ok())
.expect("Couldn't parse /proc/sys/fs/file-max");
if config.max_connections > system_max_fd {
let error = format!("Proxies total max_connections can't be higher than system's file-max limit. \
Current limit: {}, current value: {}", system_max_fd, config.max_connections);
return Err(StartupError::TooManyAllowedConnections(error))
}
// Get the soft and hard limits for the current process
let mut limits = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut limits) };
// Ensure we don't exceed the hard limit
if limits.rlim_max < wanted_opened_files {
let error = format!("at least one worker can't have that many connections. \
current max file descriptor hard limit is: {}, \
configured max_connections is {} (the worker needs two file descriptors \
per client connection)", limits.rlim_max, config.max_connections);
return Err(StartupError::TooManyAllowedConnectionsForWorker(error));
}
if limits.rlim_cur < wanted_opened_files && limits.rlim_cur != limits.rlim_max {
// Try to get twice what we need to be safe, or rlim_max if we exceed that
limits.rlim_cur = limits.rlim_max.min(wanted_opened_files * 2);
unsafe {
libc::setrlimit(libc::RLIMIT_NOFILE, &limits);
// Refresh the data we have
libc::getrlimit(libc::RLIMIT_NOFILE, &mut limits);
}
}
// Ensure we don't exceed the new soft limit
if limits.rlim_cur < wanted_opened_files {
let error = format!("at least one worker can't have that many connections. \
current max file descriptor soft limit is: {}, \
configured max_connections is {} (the worker needs two file descriptors \
per client connection)", limits.rlim_cur, config.max_connections);
return Err(StartupError::TooManyAllowedConnectionsForWorker(error));
}
Ok(())
}
#[cfg(not(target_os = "linux"))]
fn update_process_limits(_: &Config) -> Result<(), StartupError> {
Ok(())
}
fn register_panic_hook() {
// We save the original panic hook so we can call it later
// to have the original behavior
let original_panic_hook = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
incr!("panic");
METRICS.with(|metrics| {
(*metrics.borrow_mut()).send_data();
});
(*original_panic_hook)(panic_info)
}));
}
initialize the logger before writing the pid file
#[macro_use] extern crate nom;
#[macro_use] extern crate clap;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate lazy_static;
#[macro_use] extern crate sozu_lib as sozu;
#[macro_use] extern crate sozu_command_lib as sozu_command;
#[cfg(target_os = "linux")]
extern crate num_cpus;
#[cfg(target_os="linux")]
use regex::Regex;
#[cfg(feature = "jemallocator")]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
#[macro_use]
mod logging;
mod command;
mod worker;
mod upgrade;
mod cli;
mod util;
use std::panic;
use sozu_command::config::Config;
use clap::ArgMatches;
#[cfg(target_os = "linux")]
use libc::{cpu_set_t,pid_t};
use crate::command::Worker;
use crate::worker::{start_workers,get_executable_path};
use sozu::metrics::METRICS;
enum StartupError {
ConfigurationFileNotSpecified,
ConfigurationFileLoadError(std::io::Error),
#[allow(dead_code)]
TooManyAllowedConnections(String),
#[allow(dead_code)]
TooManyAllowedConnectionsForWorker(String),
WorkersSpawnFail(nix::Error),
PIDFileNotWritable(String)
}
fn main() {
register_panic_hook();
// Init parsing of arguments
let matches = cli::init();
// Check if we are upgrading workers or main
let upgrade = cli::upgrade_worker(&matches).or_else(|| cli::upgrade_main(&matches));
// If we are not, then we want to start sozu
if upgrade == None {
let start = get_config_file_path(&matches)
.and_then(|config_file| load_configuration(config_file))
.map(|config| {
util::setup_logging(&config);
info!("Starting up");
util::setup_metrics(&config);
config
})
.and_then(|config| {
util::write_pid_file(&config)
.map(|()| config)
.map_err(|err| StartupError::PIDFileNotWritable(err))
})
.and_then(|config| update_process_limits(&config).map(|()| config))
.and_then(|config| init_workers(&config).map(|workers| (config, workers)))
.map(|(config, workers)| {
if config.handle_process_affinity {
set_workers_affinity(&workers);
}
let command_socket_path = config.command_socket_path();
if let Err(e) = command::start(config, command_socket_path, workers) {
error!("could not start worker: {:?}", e);
}
});
match start {
Ok(_) => info!("main process stopped"), // Ok() is only called when the proxy exits
Err(StartupError::ConfigurationFileNotSpecified) => {
error!("Configuration file hasn't been specified. Either use -c with the start command \
or use the SOZU_CONFIG environment variable when building sozu.");
std::process::exit(1);
},
Err(StartupError::ConfigurationFileLoadError(err)) => {
error!("Invalid configuration file. Error: {:?}", err);
std::process::exit(1);
},
Err(StartupError::TooManyAllowedConnections(err)) | Err(StartupError::TooManyAllowedConnectionsForWorker(err)) => {
error!("{}", err);
std::process::exit(1);
},
Err(StartupError::WorkersSpawnFail(err)) => {
error!("At least one worker failed to spawn. Error: {:?}", err);
std::process::exit(1);
},
Err(StartupError::PIDFileNotWritable(err)) => {
error!("{}", err);
std::process::exit(1);
}
}
}
}
fn init_workers(config: &Config) -> Result<Vec<Worker>, StartupError> {
let path = unsafe { get_executable_path() };
match start_workers(path, &config) {
Ok(workers) => {
info!("created workers: {:?}", workers);
Ok(workers)
},
Err(e) => Err(StartupError::WorkersSpawnFail(e))
}
}
fn get_config_file_path<'a>(matches: &'a ArgMatches<'a>) -> Result<&'a str, StartupError> {
let start_matches = matches.subcommand_matches("start").expect("unknown subcommand");
match start_matches.value_of("config") {
Some(config_file) => Ok(config_file),
None => option_env!("SOZU_CONFIG").ok_or(StartupError::ConfigurationFileNotSpecified)
}
}
fn load_configuration(config_file: &str) -> Result<Config, StartupError> {
match Config::load_from_path(config_file) {
Ok(config) => Ok(config),
Err(e) => Err(StartupError::ConfigurationFileLoadError(e))
}
}
/// Set workers process affinity, see man sched_setaffinity
/// Bind each worker (including the main) process to a CPU core.
/// Can bind multiple processes to a CPU core if there are more processes
/// than CPU cores. Only works on Linux.
#[cfg(target_os = "linux")]
fn set_workers_affinity(workers: &Vec<Worker>) {
let mut cpu_count = 0;
let max_cpu = num_cpus::get();
// +1 for the main process that will also be bound to its CPU core
if (workers.len() + 1) > max_cpu {
warn!("There are more workers than available CPU cores, \
multiple workers will be bound to the same CPU core. \
This may impact performances");
}
let main_pid = unsafe { libc::getpid() };
set_process_affinity(main_pid, cpu_count);
cpu_count = cpu_count + 1;
for ref worker in workers {
if cpu_count >= max_cpu {
cpu_count = 0;
}
set_process_affinity(worker.pid, cpu_count);
cpu_count = cpu_count + 1;
}
}
/// Set workers process affinity, see man sched_setaffinity
/// Bind each worker (including the main) process to a CPU core.
/// Can bind multiple processes to a CPU core if there are more processes
/// than CPU cores. Only works on Linux.
#[cfg(not(target_os = "linux"))]
fn set_workers_affinity(_: &Vec<Worker>) {
}
/// Set a specific process to run onto a specific CPU core
#[cfg(target_os = "linux")]
use std::mem;
#[cfg(target_os = "linux")]
fn set_process_affinity(pid: pid_t, cpu: usize) {
unsafe {
let mut cpu_set: cpu_set_t = mem::zeroed();
let size_cpu_set = mem::size_of::<cpu_set_t>();
libc::CPU_SET(cpu, &mut cpu_set);
libc::sched_setaffinity(pid, size_cpu_set, &mut cpu_set);
debug!("Worker {} bound to CPU core {}", pid, cpu);
};
}
#[cfg(target_os="linux")]
// We check the hard_limit. The soft_limit can be changed at runtime
// by the process or any user. hard_limit can only be changed by root
fn update_process_limits(config: &Config) -> Result<(), StartupError> {
let wanted_opened_files = (config.max_connections as u64) * 2;
// Ensure we don't exceed the system maximum capacity
let f = sozu_command::config::Config::load_file("/proc/sys/fs/file-max")
.expect("Couldn't read /proc/sys/fs/file-max");
let re_max = Regex::new(r"(\d*)").unwrap();
let system_max_fd = re_max.captures(&f).and_then(|c| c.get(1))
.and_then(|m| m.as_str().parse::<usize>().ok())
.expect("Couldn't parse /proc/sys/fs/file-max");
if config.max_connections > system_max_fd {
let error = format!("Proxies total max_connections can't be higher than system's file-max limit. \
Current limit: {}, current value: {}", system_max_fd, config.max_connections);
return Err(StartupError::TooManyAllowedConnections(error))
}
// Get the soft and hard limits for the current process
let mut limits = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut limits) };
// Ensure we don't exceed the hard limit
if limits.rlim_max < wanted_opened_files {
let error = format!("at least one worker can't have that many connections. \
current max file descriptor hard limit is: {}, \
configured max_connections is {} (the worker needs two file descriptors \
per client connection)", limits.rlim_max, config.max_connections);
return Err(StartupError::TooManyAllowedConnectionsForWorker(error));
}
if limits.rlim_cur < wanted_opened_files && limits.rlim_cur != limits.rlim_max {
// Try to get twice what we need to be safe, or rlim_max if we exceed that
limits.rlim_cur = limits.rlim_max.min(wanted_opened_files * 2);
unsafe {
libc::setrlimit(libc::RLIMIT_NOFILE, &limits);
// Refresh the data we have
libc::getrlimit(libc::RLIMIT_NOFILE, &mut limits);
}
}
// Ensure we don't exceed the new soft limit
if limits.rlim_cur < wanted_opened_files {
let error = format!("at least one worker can't have that many connections. \
current max file descriptor soft limit is: {}, \
configured max_connections is {} (the worker needs two file descriptors \
per client connection)", limits.rlim_cur, config.max_connections);
return Err(StartupError::TooManyAllowedConnectionsForWorker(error));
}
Ok(())
}
#[cfg(not(target_os = "linux"))]
fn update_process_limits(_: &Config) -> Result<(), StartupError> {
Ok(())
}
fn register_panic_hook() {
// We save the original panic hook so we can call it later
// to have the original behavior
let original_panic_hook = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
incr!("panic");
METRICS.with(|metrics| {
(*metrics.borrow_mut()).send_data();
});
(*original_panic_hook)(panic_info)
}));
}
|
#[macro_use] extern crate nom;
#[macro_use] extern crate clap;
#[macro_use] extern crate serde_derive;
extern crate mio;
extern crate mio_uds;
extern crate serde;
extern crate serde_json;
extern crate time;
extern crate libc;
extern crate slab;
extern crate rand;
extern crate nix;
#[macro_use] extern crate sozu_lib as sozu;
extern crate sozu_command_lib as sozu_command;
#[cfg(target_os = "linux")]
extern crate num_cpus;
extern crate procinfo;
mod command;
mod worker;
mod logging;
mod upgrade;
use std::net::{UdpSocket,ToSocketAddrs};
use std::{mem,env};
use sozu::network::metrics::{METRICS,ProxyMetrics};
use sozu_command::config::Config;
use clap::{App,Arg,SubCommand};
#[cfg(target_os = "linux")]
use libc::{cpu_set_t,pid_t};
use command::Worker;
use worker::{begin_worker_process,start_workers};
use upgrade::begin_new_master_process;
fn main() {
let matches = App::new("sozu")
.version(crate_version!())
.about("hot reconfigurable proxy")
.subcommand(SubCommand::with_name("start")
.about("launch the master process")
.arg(Arg::with_name("config")
.short("c")
.long("config")
.value_name("FILE")
.help("Sets a custom config file")
.takes_value(true)
.required(true)))
.subcommand(SubCommand::with_name("worker")
.about("start a worker (internal command, should not be used directly)")
.arg(Arg::with_name("id").long("id")
.takes_value(true).required(true).help("worker identifier"))
.arg(Arg::with_name("fd").long("fd")
.takes_value(true).required(true).help("IPC file descriptor"))
.arg(Arg::with_name("channel-buffer-size").long("channel-buffer-size")
.takes_value(true).required(false).help("Worker's channel buffer size")))
.subcommand(SubCommand::with_name("upgrade")
.about("start a new master process (internal command, should not be used directly)")
.arg(Arg::with_name("fd").long("fd")
.takes_value(true).required(true).help("IPC file descriptor"))
.arg(Arg::with_name("channel-buffer-size").long("channel-buffer-size")
.takes_value(true).required(false).help("Worker's channel buffer size")))
.get_matches();
if let Some(matches) = matches.subcommand_matches("worker") {
let fd = matches.value_of("fd").expect("needs a file descriptor")
.parse::<i32>().expect("the file descriptor must be a number");
let id = matches.value_of("id").expect("needs a worker id");
let buffer_size = matches.value_of("channel-buffer-size")
.and_then(|size| size.parse::<usize>().ok())
.unwrap_or(10000);
begin_worker_process(fd, id, buffer_size);
return;
}
if let Some(matches) = matches.subcommand_matches("upgrade") {
let fd = matches.value_of("fd").expect("needs a file descriptor")
.parse::<i32>().expect("the file descriptor must be a number");
let buffer_size = matches.value_of("channel-buffer-size")
.and_then(|size| size.parse::<usize>().ok())
.unwrap_or(10000);
begin_new_master_process(fd, buffer_size);
return;
}
let submatches = matches.subcommand_matches("start").expect("unknown subcommand");
let config_file = submatches.value_of("config").expect("required config file");
if let Ok(config) = Config::load_from_path(config_file) {
//FIXME: should have an id for the master too
logging::setup("MASTER".to_string(), &config.log_level, &config.log_target);
info!("starting up");
let metrics_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let metrics_host = (&config.metrics.address[..], config.metrics.port).to_socket_addrs().unwrap().next().unwrap();
METRICS.lock().unwrap().set_up_remote(metrics_socket, metrics_host);
let metrics_guard = ProxyMetrics::run();
let process_limits_ok = if cfg!(target_os = "linux") { check_process_limits(config.clone()) } else { true };
if process_limits_ok {
match start_workers(&config) {
Ok(workers) => {
info!("created workers: {:?}", workers);
let handle_process_affinity = match config.handle_process_affinity {
Some(val) => val,
None => false
};
if cfg!(target_os = "linux") && handle_process_affinity {
set_workers_affinity(&workers);
}
command::start(config, workers);
},
Err(e) => error!("Error while creating workers: {}", e)
}
}
info!("master process stopped");
} else {
error!("could not load configuration file at '{}', stopping", config_file);
}
}
/// Set workers process affinity, see man sched_setaffinity
/// Bind each worker (including the master) process to a CPU core.
/// Can bind multiple processes to a CPU core if there are more processes
/// than CPU cores. Only works on Linux.
#[cfg(target_os = "linux")]
fn set_workers_affinity(workers: &Vec<Worker>) {
let mut cpu_count = 0;
let max_cpu = num_cpus::get();
// +1 for the master process that will also be bound to its CPU core
if (workers.len() + 1) > max_cpu {
warn!("There are more workers than available CPU cores, \
multiple workers will be bound to the same CPU core. \
This may impact performances");
}
let master_pid = unsafe { libc::getpid() };
set_process_affinity(master_pid, cpu_count);
cpu_count = cpu_count + 1;
for ref worker in workers {
if cpu_count >= max_cpu {
cpu_count = 0;
}
set_process_affinity(worker.pid, cpu_count);
cpu_count = cpu_count + 1;
}
}
/// Set workers process affinity, see man sched_setaffinity
/// Bind each worker (including the master) process to a CPU core.
/// Can bind multiple processes to a CPU core if there are more processes
/// than CPU cores. Only works on Linux.
#[cfg(not(target_os = "linux"))]
fn set_workers_affinity(_: &Vec<Worker>) {
}
/// Set a specific process to run onto a specific CPU core
#[cfg(target_os = "linux")]
fn set_process_affinity(pid: pid_t, cpu: usize) {
unsafe {
let mut cpu_set: cpu_set_t = mem::zeroed();
let size_cpu_set = mem::size_of::<cpu_set_t>();
libc::CPU_SET(cpu, &mut cpu_set);
libc::sched_setaffinity(pid, size_cpu_set, &mut cpu_set);
debug!("Worker {} bound to CPU core {}", pid, cpu);
};
}
#[cfg(target_os="linux")]
// We check the hard_limit. The soft_limit can be changed at runtime
// by the process or any user. hard_limit can only be changed by root
fn check_process_limits(config: Config) -> bool {
let process_limits = procinfo::pid::limits_self()
.expect("Couldn't read /proc/self/limits to determine max open file descriptors limit");
// If limit is "unlimited"
if process_limits.max_open_files.hard.is_none() {
return true;
}
let hard_limit = process_limits.max_open_files.hard.unwrap();
let http_max_cons = config.http.and_then(|proxy| Some(proxy.max_connections)).unwrap_or(0);
let https_max_cons = config.https.and_then(|proxy| Some(proxy.max_connections)).unwrap_or(0);
// check if all proxies are under the hard limit
if http_max_cons > hard_limit || https_max_cons > hard_limit {
error!("At least one proxy can't have that much of connections. \
Current max file descriptor hard limit is: {}", hard_limit);
return false;
}
let total_proxies_connections = http_max_cons + https_max_cons;
let system_max_fd = procinfo::sys::fs::file_max::file_max()
.expect("Couldn't read /proc/sys/fs/file-max") as usize;
if total_proxies_connections > system_max_fd {
error!("Proxies total max_connections can't be higher than system's file-max limit. \
Current limit: {}, current value: {}", system_max_fd, total_proxies_connections);
return false;
}
true
}
fix compilation with procinfo
#[macro_use] extern crate nom;
#[macro_use] extern crate clap;
#[macro_use] extern crate serde_derive;
extern crate mio;
extern crate mio_uds;
extern crate serde;
extern crate serde_json;
extern crate time;
extern crate libc;
extern crate slab;
extern crate rand;
extern crate nix;
#[macro_use] extern crate sozu_lib as sozu;
extern crate sozu_command_lib as sozu_command;
#[cfg(target_os = "linux")]
extern crate num_cpus;
#[cfg(target_os = "linux")]
extern crate procinfo;
mod command;
mod worker;
mod logging;
mod upgrade;
use std::net::{UdpSocket,ToSocketAddrs};
use std::{mem,env};
use sozu::network::metrics::{METRICS,ProxyMetrics};
use sozu_command::config::Config;
use clap::{App,Arg,SubCommand};
#[cfg(target_os = "linux")]
use libc::{cpu_set_t,pid_t};
use command::Worker;
use worker::{begin_worker_process,start_workers};
use upgrade::begin_new_master_process;
fn main() {
let matches = App::new("sozu")
.version(crate_version!())
.about("hot reconfigurable proxy")
.subcommand(SubCommand::with_name("start")
.about("launch the master process")
.arg(Arg::with_name("config")
.short("c")
.long("config")
.value_name("FILE")
.help("Sets a custom config file")
.takes_value(true)
.required(true)))
.subcommand(SubCommand::with_name("worker")
.about("start a worker (internal command, should not be used directly)")
.arg(Arg::with_name("id").long("id")
.takes_value(true).required(true).help("worker identifier"))
.arg(Arg::with_name("fd").long("fd")
.takes_value(true).required(true).help("IPC file descriptor"))
.arg(Arg::with_name("channel-buffer-size").long("channel-buffer-size")
.takes_value(true).required(false).help("Worker's channel buffer size")))
.subcommand(SubCommand::with_name("upgrade")
.about("start a new master process (internal command, should not be used directly)")
.arg(Arg::with_name("fd").long("fd")
.takes_value(true).required(true).help("IPC file descriptor"))
.arg(Arg::with_name("channel-buffer-size").long("channel-buffer-size")
.takes_value(true).required(false).help("Worker's channel buffer size")))
.get_matches();
if let Some(matches) = matches.subcommand_matches("worker") {
let fd = matches.value_of("fd").expect("needs a file descriptor")
.parse::<i32>().expect("the file descriptor must be a number");
let id = matches.value_of("id").expect("needs a worker id");
let buffer_size = matches.value_of("channel-buffer-size")
.and_then(|size| size.parse::<usize>().ok())
.unwrap_or(10000);
begin_worker_process(fd, id, buffer_size);
return;
}
if let Some(matches) = matches.subcommand_matches("upgrade") {
let fd = matches.value_of("fd").expect("needs a file descriptor")
.parse::<i32>().expect("the file descriptor must be a number");
let buffer_size = matches.value_of("channel-buffer-size")
.and_then(|size| size.parse::<usize>().ok())
.unwrap_or(10000);
begin_new_master_process(fd, buffer_size);
return;
}
let submatches = matches.subcommand_matches("start").expect("unknown subcommand");
let config_file = submatches.value_of("config").expect("required config file");
if let Ok(config) = Config::load_from_path(config_file) {
//FIXME: should have an id for the master too
logging::setup("MASTER".to_string(), &config.log_level, &config.log_target);
info!("starting up");
let metrics_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let metrics_host = (&config.metrics.address[..], config.metrics.port).to_socket_addrs().unwrap().next().unwrap();
METRICS.lock().unwrap().set_up_remote(metrics_socket, metrics_host);
let metrics_guard = ProxyMetrics::run();
if check_process_limits(config.clone()) {
match start_workers(&config) {
Ok(workers) => {
info!("created workers: {:?}", workers);
let handle_process_affinity = match config.handle_process_affinity {
Some(val) => val,
None => false
};
if cfg!(target_os = "linux") && handle_process_affinity {
set_workers_affinity(&workers);
}
command::start(config, workers);
},
Err(e) => error!("Error while creating workers: {}", e)
}
}
info!("master process stopped");
} else {
error!("could not load configuration file at '{}', stopping", config_file);
}
}
/// Set workers process affinity, see man sched_setaffinity
/// Bind each worker (including the master) process to a CPU core.
/// Can bind multiple processes to a CPU core if there are more processes
/// than CPU cores. Only works on Linux.
#[cfg(target_os = "linux")]
fn set_workers_affinity(workers: &Vec<Worker>) {
let mut cpu_count = 0;
let max_cpu = num_cpus::get();
// +1 for the master process that will also be bound to its CPU core
if (workers.len() + 1) > max_cpu {
warn!("There are more workers than available CPU cores, \
multiple workers will be bound to the same CPU core. \
This may impact performances");
}
let master_pid = unsafe { libc::getpid() };
set_process_affinity(master_pid, cpu_count);
cpu_count = cpu_count + 1;
for ref worker in workers {
if cpu_count >= max_cpu {
cpu_count = 0;
}
set_process_affinity(worker.pid, cpu_count);
cpu_count = cpu_count + 1;
}
}
/// Set workers process affinity, see man sched_setaffinity
/// Bind each worker (including the master) process to a CPU core.
/// Can bind multiple processes to a CPU core if there are more processes
/// than CPU cores. Only works on Linux.
#[cfg(not(target_os = "linux"))]
fn set_workers_affinity(_: &Vec<Worker>) {
}
/// Set a specific process to run onto a specific CPU core
#[cfg(target_os = "linux")]
fn set_process_affinity(pid: pid_t, cpu: usize) {
unsafe {
let mut cpu_set: cpu_set_t = mem::zeroed();
let size_cpu_set = mem::size_of::<cpu_set_t>();
libc::CPU_SET(cpu, &mut cpu_set);
libc::sched_setaffinity(pid, size_cpu_set, &mut cpu_set);
debug!("Worker {} bound to CPU core {}", pid, cpu);
};
}
#[cfg(target_os="linux")]
// We check the hard_limit. The soft_limit can be changed at runtime
// by the process or any user. hard_limit can only be changed by root
fn check_process_limits(config: Config) -> bool {
let process_limits = procinfo::pid::limits_self()
.expect("Couldn't read /proc/self/limits to determine max open file descriptors limit");
// If limit is "unlimited"
if process_limits.max_open_files.hard.is_none() {
return true;
}
let hard_limit = process_limits.max_open_files.hard.unwrap();
let http_max_cons = config.http.and_then(|proxy| Some(proxy.max_connections)).unwrap_or(0);
let https_max_cons = config.https.and_then(|proxy| Some(proxy.max_connections)).unwrap_or(0);
// check if all proxies are under the hard limit
if http_max_cons > hard_limit || https_max_cons > hard_limit {
error!("At least one proxy can't have that much of connections. \
Current max file descriptor hard limit is: {}", hard_limit);
return false;
}
let total_proxies_connections = http_max_cons + https_max_cons;
let system_max_fd = procinfo::sys::fs::file_max::file_max()
.expect("Couldn't read /proc/sys/fs/file-max") as usize;
if total_proxies_connections > system_max_fd {
error!("Proxies total max_connections can't be higher than system's file-max limit. \
Current limit: {}, current value: {}", system_max_fd, total_proxies_connections);
return false;
}
true
}
#[cfg(not(target_os = "linux"))]
fn check_process_limits(_: Config) -> bool {
true
}
|
use crate::utils::paths;
use crate::utils::{
is_copy, is_type_diagnostic_item, match_trait_method, remove_blocks, snippet_with_applicability, span_lint_and_sugg,
};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::mir::Mutability;
use rustc_middle::ty;
use rustc_middle::ty::adjustment::Adjust;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::symbol::Ident;
use rustc_span::{sym, Span};
declare_clippy_lint! {
/// **What it does:** Checks for usage of `iterator.map(|x| x.clone())` and suggests
/// `iterator.cloned()` instead
///
/// **Why is this bad?** Readability, this can be written more concisely
///
/// **Known problems:** None
///
/// **Example:**
///
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.map(|i| *i);
/// ```
///
/// The correct use would be:
///
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.cloned();
/// ```
pub MAP_CLONE,
style,
"using `iterator.map(|x| x.clone())`, or dereferencing closures for `Copy` types"
}
declare_lint_pass!(MapClone => [MAP_CLONE]);
impl<'tcx> LateLintPass<'tcx> for MapClone {
fn check_expr(&mut self, cx: &LateContext<'_>, e: &hir::Expr<'_>) {
if e.span.from_expansion() {
return;
}
if_chain! {
if let hir::ExprKind::MethodCall(ref method, _, ref args, _) = e.kind;
if args.len() == 2;
if method.ident.as_str() == "map";
let ty = cx.typeck_results().expr_ty(&args[0]);
if is_type_diagnostic_item(cx, ty, sym::option_type) || match_trait_method(cx, e, &paths::ITERATOR);
if let hir::ExprKind::Closure(_, _, body_id, _, _) = args[1].kind;
let closure_body = cx.tcx.hir().body(body_id);
let closure_expr = remove_blocks(&closure_body.value);
then {
match closure_body.params[0].pat.kind {
hir::PatKind::Ref(ref inner, hir::Mutability::Not) => if let hir::PatKind::Binding(
hir::BindingAnnotation::Unannotated, .., name, None
) = inner.kind {
if ident_eq(name, closure_expr) {
lint(cx, e.span, args[0].span, true);
}
},
hir::PatKind::Binding(hir::BindingAnnotation::Unannotated, .., name, None) => {
match closure_expr.kind {
hir::ExprKind::Unary(hir::UnOp::UnDeref, ref inner) => {
if ident_eq(name, inner) {
if let ty::Ref(.., Mutability::Not) = cx.typeck_results().expr_ty(inner).kind() {
lint(cx, e.span, args[0].span, true);
}
}
},
hir::ExprKind::MethodCall(ref method, _, [obj], _) => if_chain! {
if ident_eq(name, obj) && method.ident.name == sym::clone;
if match_trait_method(cx, closure_expr, &paths::CLONE_TRAIT);
// no autoderefs
if !cx.typeck_results().expr_adjustments(obj).iter()
.any(|a| matches!(a.kind, Adjust::Deref(Some(..))));
then {
let obj_ty = cx.typeck_results().expr_ty(obj);
if let ty::Ref(_, ty, mutability) = obj_ty.kind() {
if matches!(mutability, Mutability::Not) {
let copy = is_copy(cx, ty);
lint(cx, e.span, args[0].span, copy);
}
} else {
lint_needless_cloning(cx, e.span, args[0].span);
}
}
},
_ => {},
}
},
_ => {},
}
}
}
}
}
fn ident_eq(name: Ident, path: &hir::Expr<'_>) -> bool {
if let hir::ExprKind::Path(hir::QPath::Resolved(None, ref path)) = path.kind {
path.segments.len() == 1 && path.segments[0].ident == name
} else {
false
}
}
fn lint_needless_cloning(cx: &LateContext<'_>, root: Span, receiver: Span) {
span_lint_and_sugg(
cx,
MAP_CLONE,
root.trim_start(receiver).unwrap(),
"you are needlessly cloning iterator elements",
"remove the `map` call",
String::new(),
Applicability::MachineApplicable,
)
}
fn lint(cx: &LateContext<'_>, replace: Span, root: Span, copied: bool) {
let mut applicability = Applicability::MachineApplicable;
if copied {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for copying elements",
"consider calling the dedicated `copied` method",
format!(
"{}.copied()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
)
} else {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for cloning elements",
"consider calling the dedicated `cloned` method",
format!(
"{}.cloned()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
)
}
}
Auto merge of #6340 - giraffate:improve_doc_for_map_clone, r=Manishearth
Improve doc about `map_clone`
A follow up of https://github.com/rust-lang/rust-clippy/issues/6239#issuecomment-719100677.
`map_clone` works with not only `Iterator` but `Option` although not written in [doc](https://rust-lang.github.io/rust-clippy/master/#map_clone). Also, an example in the doc shows a usage of dereferencing, but this isn't also written.
changelog: Improve doc about `map_clone`
use crate::utils::paths;
use crate::utils::{
is_copy, is_type_diagnostic_item, match_trait_method, remove_blocks, snippet_with_applicability, span_lint_and_sugg,
};
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::mir::Mutability;
use rustc_middle::ty;
use rustc_middle::ty::adjustment::Adjust;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::symbol::Ident;
use rustc_span::{sym, Span};
declare_clippy_lint! {
/// **What it does:** Checks for usage of `map(|x| x.clone())` or
/// dereferencing closures for `Copy` types, on `Iterator` or `Option`,
/// and suggests `cloned()` or `copied()` instead
///
/// **Why is this bad?** Readability, this can be written more concisely
///
/// **Known problems:** None
///
/// **Example:**
///
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.map(|i| *i);
/// ```
///
/// The correct use would be:
///
/// ```rust
/// let x = vec![42, 43];
/// let y = x.iter();
/// let z = y.cloned();
/// ```
pub MAP_CLONE,
style,
"using `iterator.map(|x| x.clone())`, or dereferencing closures for `Copy` types"
}
declare_lint_pass!(MapClone => [MAP_CLONE]);
impl<'tcx> LateLintPass<'tcx> for MapClone {
fn check_expr(&mut self, cx: &LateContext<'_>, e: &hir::Expr<'_>) {
if e.span.from_expansion() {
return;
}
if_chain! {
if let hir::ExprKind::MethodCall(ref method, _, ref args, _) = e.kind;
if args.len() == 2;
if method.ident.as_str() == "map";
let ty = cx.typeck_results().expr_ty(&args[0]);
if is_type_diagnostic_item(cx, ty, sym::option_type) || match_trait_method(cx, e, &paths::ITERATOR);
if let hir::ExprKind::Closure(_, _, body_id, _, _) = args[1].kind;
let closure_body = cx.tcx.hir().body(body_id);
let closure_expr = remove_blocks(&closure_body.value);
then {
match closure_body.params[0].pat.kind {
hir::PatKind::Ref(ref inner, hir::Mutability::Not) => if let hir::PatKind::Binding(
hir::BindingAnnotation::Unannotated, .., name, None
) = inner.kind {
if ident_eq(name, closure_expr) {
lint(cx, e.span, args[0].span, true);
}
},
hir::PatKind::Binding(hir::BindingAnnotation::Unannotated, .., name, None) => {
match closure_expr.kind {
hir::ExprKind::Unary(hir::UnOp::UnDeref, ref inner) => {
if ident_eq(name, inner) {
if let ty::Ref(.., Mutability::Not) = cx.typeck_results().expr_ty(inner).kind() {
lint(cx, e.span, args[0].span, true);
}
}
},
hir::ExprKind::MethodCall(ref method, _, [obj], _) => if_chain! {
if ident_eq(name, obj) && method.ident.name == sym::clone;
if match_trait_method(cx, closure_expr, &paths::CLONE_TRAIT);
// no autoderefs
if !cx.typeck_results().expr_adjustments(obj).iter()
.any(|a| matches!(a.kind, Adjust::Deref(Some(..))));
then {
let obj_ty = cx.typeck_results().expr_ty(obj);
if let ty::Ref(_, ty, mutability) = obj_ty.kind() {
if matches!(mutability, Mutability::Not) {
let copy = is_copy(cx, ty);
lint(cx, e.span, args[0].span, copy);
}
} else {
lint_needless_cloning(cx, e.span, args[0].span);
}
}
},
_ => {},
}
},
_ => {},
}
}
}
}
}
fn ident_eq(name: Ident, path: &hir::Expr<'_>) -> bool {
if let hir::ExprKind::Path(hir::QPath::Resolved(None, ref path)) = path.kind {
path.segments.len() == 1 && path.segments[0].ident == name
} else {
false
}
}
fn lint_needless_cloning(cx: &LateContext<'_>, root: Span, receiver: Span) {
span_lint_and_sugg(
cx,
MAP_CLONE,
root.trim_start(receiver).unwrap(),
"you are needlessly cloning iterator elements",
"remove the `map` call",
String::new(),
Applicability::MachineApplicable,
)
}
fn lint(cx: &LateContext<'_>, replace: Span, root: Span, copied: bool) {
let mut applicability = Applicability::MachineApplicable;
if copied {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for copying elements",
"consider calling the dedicated `copied` method",
format!(
"{}.copied()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
)
} else {
span_lint_and_sugg(
cx,
MAP_CLONE,
replace,
"you are using an explicit closure for cloning elements",
"consider calling the dedicated `cloned` method",
format!(
"{}.cloned()",
snippet_with_applicability(cx, root, "..", &mut applicability)
),
applicability,
)
}
}
|
#[macro_use]
pub mod sym;
#[allow(clippy::module_name_repetitions)]
pub mod ast_utils;
pub mod attrs;
pub mod author;
pub mod camel_case;
pub mod comparisons;
pub mod conf;
pub mod constants;
mod diagnostics;
pub mod eager_or_lazy;
pub mod higher;
mod hir_utils;
pub mod inspector;
#[cfg(feature = "internal-lints")]
pub mod internal_lints;
pub mod numeric_literal;
pub mod paths;
pub mod ptr;
pub mod qualify_min_const_fn;
pub mod sugg;
pub mod usage;
pub mod visitors;
pub use self::attrs::*;
pub use self::diagnostics::*;
pub use self::hir_utils::{both, eq_expr_value, over, SpanlessEq, SpanlessHash};
use std::borrow::Cow;
use std::collections::hash_map::Entry;
use std::hash::BuildHasherDefault;
use std::mem;
use if_chain::if_chain;
use rustc_ast::ast::{self, Attribute, LitKind};
use rustc_attr as attr;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::intravisit::{NestedVisitorMap, Visitor};
use rustc_hir::Node;
use rustc_hir::{
def, Arm, Block, Body, Constness, Crate, Expr, ExprKind, FnDecl, HirId, ImplItem, ImplItemKind, Item, ItemKind,
MatchSource, Param, Pat, PatKind, Path, PathSegment, QPath, TraitItem, TraitItemKind, TraitRef, TyKind, Unsafety,
};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::{LateContext, Level, Lint, LintContext};
use rustc_middle::hir::map::Map;
use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
use rustc_middle::ty::{self, layout::IntegerExt, Ty, TyCtxt, TypeFoldable};
use rustc_semver::RustcVersion;
use rustc_session::Session;
use rustc_span::hygiene::{ExpnKind, MacroKind};
use rustc_span::source_map::original_sp;
use rustc_span::sym as rustc_sym;
use rustc_span::symbol::{self, kw, Symbol};
use rustc_span::{BytePos, Pos, Span, DUMMY_SP};
use rustc_target::abi::Integer;
use rustc_trait_selection::traits::query::normalize::AtExt;
use smallvec::SmallVec;
use crate::consts::{constant, Constant};
pub fn parse_msrv(msrv: &str, sess: Option<&Session>, span: Option<Span>) -> Option<RustcVersion> {
if let Ok(version) = RustcVersion::parse(msrv) {
return Some(version);
} else if let Some(sess) = sess {
if let Some(span) = span {
sess.span_err(span, &format!("`{}` is not a valid Rust version", msrv));
}
}
None
}
pub fn meets_msrv(msrv: Option<&RustcVersion>, lint_msrv: &RustcVersion) -> bool {
msrv.map_or(true, |msrv| msrv.meets(*lint_msrv))
}
macro_rules! extract_msrv_attr {
(LateContext) => {
extract_msrv_attr!(@LateContext, ());
};
(EarlyContext) => {
extract_msrv_attr!(@EarlyContext);
};
(@$context:ident$(, $call:tt)?) => {
fn enter_lint_attrs(&mut self, cx: &rustc_lint::$context<'tcx>, attrs: &'tcx [rustc_ast::ast::Attribute]) {
use $crate::utils::get_unique_inner_attr;
match get_unique_inner_attr(cx.sess$($call)?, attrs, "msrv") {
Some(msrv_attr) => {
if let Some(msrv) = msrv_attr.value_str() {
self.msrv = $crate::utils::parse_msrv(
&msrv.to_string(),
Some(cx.sess$($call)?),
Some(msrv_attr.span),
);
} else {
cx.sess$($call)?.span_err(msrv_attr.span, "bad clippy attribute");
}
},
_ => (),
}
}
};
}
/// Returns `true` if the two spans come from differing expansions (i.e., one is
/// from a macro and one isn't).
#[must_use]
pub fn differing_macro_contexts(lhs: Span, rhs: Span) -> bool {
rhs.ctxt() != lhs.ctxt()
}
/// Returns `true` if the given `NodeId` is inside a constant context
///
/// # Example
///
/// ```rust,ignore
/// if in_constant(cx, expr.hir_id) {
/// // Do something
/// }
/// ```
pub fn in_constant(cx: &LateContext<'_>, id: HirId) -> bool {
let parent_id = cx.tcx.hir().get_parent_item(id);
match cx.tcx.hir().get(parent_id) {
Node::Item(&Item {
kind: ItemKind::Const(..) | ItemKind::Static(..),
..
})
| Node::TraitItem(&TraitItem {
kind: TraitItemKind::Const(..),
..
})
| Node::ImplItem(&ImplItem {
kind: ImplItemKind::Const(..),
..
})
| Node::AnonConst(_) => true,
Node::Item(&Item {
kind: ItemKind::Fn(ref sig, ..),
..
})
| Node::ImplItem(&ImplItem {
kind: ImplItemKind::Fn(ref sig, _),
..
}) => sig.header.constness == Constness::Const,
_ => false,
}
}
/// Returns `true` if this `span` was expanded by any macro.
#[must_use]
pub fn in_macro(span: Span) -> bool {
if span.from_expansion() {
!matches!(span.ctxt().outer_expn_data().kind, ExpnKind::Desugaring(..))
} else {
false
}
}
// If the snippet is empty, it's an attribute that was inserted during macro
// expansion and we want to ignore those, because they could come from external
// sources that the user has no control over.
// For some reason these attributes don't have any expansion info on them, so
// we have to check it this way until there is a better way.
pub fn is_present_in_source<T: LintContext>(cx: &T, span: Span) -> bool {
if let Some(snippet) = snippet_opt(cx, span) {
if snippet.is_empty() {
return false;
}
}
true
}
/// Checks if given pattern is a wildcard (`_`)
pub fn is_wild<'tcx>(pat: &impl std::ops::Deref<Target = Pat<'tcx>>) -> bool {
matches!(pat.kind, PatKind::Wild)
}
/// Checks if type is struct, enum or union type with the given def path.
///
/// If the type is a diagnostic item, use `is_type_diagnostic_item` instead.
/// If you change the signature, remember to update the internal lint `MatchTypeOnDiagItem`
pub fn match_type(cx: &LateContext<'_>, ty: Ty<'_>, path: &[&str]) -> bool {
match ty.kind() {
ty::Adt(adt, _) => match_def_path(cx, adt.did, path),
_ => false,
}
}
/// Checks if the type is equal to a diagnostic item
///
/// If you change the signature, remember to update the internal lint `MatchTypeOnDiagItem`
pub fn is_type_diagnostic_item(cx: &LateContext<'_>, ty: Ty<'_>, diag_item: Symbol) -> bool {
match ty.kind() {
ty::Adt(adt, _) => cx.tcx.is_diagnostic_item(diag_item, adt.did),
_ => false,
}
}
/// Checks if the type is equal to a lang item
pub fn is_type_lang_item(cx: &LateContext<'_>, ty: Ty<'_>, lang_item: hir::LangItem) -> bool {
match ty.kind() {
ty::Adt(adt, _) => cx.tcx.lang_items().require(lang_item).unwrap() == adt.did,
_ => false,
}
}
/// Checks if the method call given in `expr` belongs to the given trait.
pub fn match_trait_method(cx: &LateContext<'_>, expr: &Expr<'_>, path: &[&str]) -> bool {
let def_id = cx.typeck_results().type_dependent_def_id(expr.hir_id).unwrap();
let trt_id = cx.tcx.trait_of_item(def_id);
trt_id.map_or(false, |trt_id| match_def_path(cx, trt_id, path))
}
/// Checks if an expression references a variable of the given name.
pub fn match_var(expr: &Expr<'_>, var: Symbol) -> bool {
if let ExprKind::Path(QPath::Resolved(None, ref path)) = expr.kind {
if let [p] = path.segments {
return p.ident.name == var;
}
}
false
}
pub fn last_path_segment<'tcx>(path: &QPath<'tcx>) -> &'tcx PathSegment<'tcx> {
match *path {
QPath::Resolved(_, ref path) => path.segments.last().expect("A path must have at least one segment"),
QPath::TypeRelative(_, ref seg) => seg,
QPath::LangItem(..) => panic!("last_path_segment: lang item has no path segments"),
}
}
pub fn single_segment_path<'tcx>(path: &QPath<'tcx>) -> Option<&'tcx PathSegment<'tcx>> {
match *path {
QPath::Resolved(_, ref path) => path.segments.get(0),
QPath::TypeRelative(_, ref seg) => Some(seg),
QPath::LangItem(..) => None,
}
}
/// Matches a `QPath` against a slice of segment string literals.
///
/// There is also `match_path` if you are dealing with a `rustc_hir::Path` instead of a
/// `rustc_hir::QPath`.
///
/// # Examples
/// ```rust,ignore
/// match_qpath(path, &["std", "rt", "begin_unwind"])
/// ```
pub fn match_qpath(path: &QPath<'_>, segments: &[&str]) -> bool {
match *path {
QPath::Resolved(_, ref path) => match_path(path, segments),
QPath::TypeRelative(ref ty, ref segment) => match ty.kind {
TyKind::Path(ref inner_path) => {
if let [prefix @ .., end] = segments {
if match_qpath(inner_path, prefix) {
return segment.ident.name.as_str() == *end;
}
}
false
},
_ => false,
},
QPath::LangItem(..) => false,
}
}
/// Matches a `Path` against a slice of segment string literals.
///
/// There is also `match_qpath` if you are dealing with a `rustc_hir::QPath` instead of a
/// `rustc_hir::Path`.
///
/// # Examples
///
/// ```rust,ignore
/// if match_path(&trait_ref.path, &paths::HASH) {
/// // This is the `std::hash::Hash` trait.
/// }
///
/// if match_path(ty_path, &["rustc", "lint", "Lint"]) {
/// // This is a `rustc_middle::lint::Lint`.
/// }
/// ```
pub fn match_path(path: &Path<'_>, segments: &[&str]) -> bool {
path.segments
.iter()
.rev()
.zip(segments.iter().rev())
.all(|(a, b)| a.ident.name.as_str() == *b)
}
/// Matches a `Path` against a slice of segment string literals, e.g.
///
/// # Examples
/// ```rust,ignore
/// match_path_ast(path, &["std", "rt", "begin_unwind"])
/// ```
pub fn match_path_ast(path: &ast::Path, segments: &[&str]) -> bool {
path.segments
.iter()
.rev()
.zip(segments.iter().rev())
.all(|(a, b)| a.ident.name.as_str() == *b)
}
/// Gets the definition associated to a path.
pub fn path_to_res(cx: &LateContext<'_>, path: &[&str]) -> Option<def::Res> {
let crates = cx.tcx.crates();
let krate = crates
.iter()
.find(|&&krate| cx.tcx.crate_name(krate).as_str() == path[0]);
if let Some(krate) = krate {
let krate = DefId {
krate: *krate,
index: CRATE_DEF_INDEX,
};
let mut current_item = None;
let mut items = cx.tcx.item_children(krate);
let mut path_it = path.iter().skip(1).peekable();
loop {
let segment = match path_it.next() {
Some(segment) => segment,
None => return None,
};
// `get_def_path` seems to generate these empty segments for extern blocks.
// We can just ignore them.
if segment.is_empty() {
continue;
}
let result = SmallVec::<[_; 8]>::new();
for item in mem::replace(&mut items, cx.tcx.arena.alloc_slice(&result)).iter() {
if item.ident.name.as_str() == *segment {
if path_it.peek().is_none() {
return Some(item.res);
}
current_item = Some(item);
items = cx.tcx.item_children(item.res.def_id());
break;
}
}
// The segment isn't a child_item.
// Try to find it under an inherent impl.
if_chain! {
if path_it.peek().is_none();
if let Some(current_item) = current_item;
let item_def_id = current_item.res.def_id();
if cx.tcx.def_kind(item_def_id) == DefKind::Struct;
then {
// Bad `find_map` suggestion. See #4193.
#[allow(clippy::find_map)]
return cx.tcx.inherent_impls(item_def_id).iter()
.flat_map(|&impl_def_id| cx.tcx.item_children(impl_def_id))
.find(|item| item.ident.name.as_str() == *segment)
.map(|item| item.res);
}
}
}
} else {
None
}
}
pub fn qpath_res(cx: &LateContext<'_>, qpath: &hir::QPath<'_>, id: hir::HirId) -> Res {
match qpath {
hir::QPath::Resolved(_, path) => path.res,
hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => {
if cx.tcx.has_typeck_results(id.owner.to_def_id()) {
cx.tcx.typeck(id.owner).qpath_res(qpath, id)
} else {
Res::Err
}
},
}
}
/// Convenience function to get the `DefId` of a trait by path.
/// It could be a trait or trait alias.
pub fn get_trait_def_id(cx: &LateContext<'_>, path: &[&str]) -> Option<DefId> {
let res = match path_to_res(cx, path) {
Some(res) => res,
None => return None,
};
match res {
Res::Def(DefKind::Trait | DefKind::TraitAlias, trait_id) => Some(trait_id),
Res::Err => unreachable!("this trait resolution is impossible: {:?}", &path),
_ => None,
}
}
/// Checks whether a type implements a trait.
/// See also `get_trait_def_id`.
pub fn implements_trait<'tcx>(
cx: &LateContext<'tcx>,
ty: Ty<'tcx>,
trait_id: DefId,
ty_params: &[GenericArg<'tcx>],
) -> bool {
// Do not check on infer_types to avoid panic in evaluate_obligation.
if ty.has_infer_types() {
return false;
}
let ty = cx.tcx.erase_regions(ty);
if ty.has_escaping_bound_vars() {
return false;
}
let ty_params = cx.tcx.mk_substs(ty_params.iter());
cx.tcx.type_implements_trait((trait_id, ty, ty_params, cx.param_env))
}
/// Gets the `hir::TraitRef` of the trait the given method is implemented for.
///
/// Use this if you want to find the `TraitRef` of the `Add` trait in this example:
///
/// ```rust
/// struct Point(isize, isize);
///
/// impl std::ops::Add for Point {
/// type Output = Self;
///
/// fn add(self, other: Self) -> Self {
/// Point(0, 0)
/// }
/// }
/// ```
pub fn trait_ref_of_method<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId) -> Option<&'tcx TraitRef<'tcx>> {
// Get the implemented trait for the current function
let parent_impl = cx.tcx.hir().get_parent_item(hir_id);
if_chain! {
if parent_impl != hir::CRATE_HIR_ID;
if let hir::Node::Item(item) = cx.tcx.hir().get(parent_impl);
if let hir::ItemKind::Impl{ of_trait: trait_ref, .. } = &item.kind;
then { return trait_ref.as_ref(); }
}
None
}
/// Checks whether this type implements `Drop`.
pub fn has_drop<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
match ty.ty_adt_def() {
Some(def) => def.has_dtor(cx.tcx),
None => false,
}
}
/// Returns the method names and argument list of nested method call expressions that make up
/// `expr`. method/span lists are sorted with the most recent call first.
pub fn method_calls<'tcx>(
expr: &'tcx Expr<'tcx>,
max_depth: usize,
) -> (Vec<Symbol>, Vec<&'tcx [Expr<'tcx>]>, Vec<Span>) {
let mut method_names = Vec::with_capacity(max_depth);
let mut arg_lists = Vec::with_capacity(max_depth);
let mut spans = Vec::with_capacity(max_depth);
let mut current = expr;
for _ in 0..max_depth {
if let ExprKind::MethodCall(path, span, args, _) = ¤t.kind {
if args.iter().any(|e| e.span.from_expansion()) {
break;
}
method_names.push(path.ident.name);
arg_lists.push(&**args);
spans.push(*span);
current = &args[0];
} else {
break;
}
}
(method_names, arg_lists, spans)
}
/// Matches an `Expr` against a chain of methods, and return the matched `Expr`s.
///
/// For example, if `expr` represents the `.baz()` in `foo.bar().baz()`,
/// `method_chain_args(expr, &["bar", "baz"])` will return a `Vec`
/// containing the `Expr`s for
/// `.bar()` and `.baz()`
pub fn method_chain_args<'a>(expr: &'a Expr<'_>, methods: &[&str]) -> Option<Vec<&'a [Expr<'a>]>> {
let mut current = expr;
let mut matched = Vec::with_capacity(methods.len());
for method_name in methods.iter().rev() {
// method chains are stored last -> first
if let ExprKind::MethodCall(ref path, _, ref args, _) = current.kind {
if path.ident.name.as_str() == *method_name {
if args.iter().any(|e| e.span.from_expansion()) {
return None;
}
matched.push(&**args); // build up `matched` backwards
current = &args[0] // go to parent expression
} else {
return None;
}
} else {
return None;
}
}
// Reverse `matched` so that it is in the same order as `methods`.
matched.reverse();
Some(matched)
}
/// Returns `true` if the provided `def_id` is an entrypoint to a program.
pub fn is_entrypoint_fn(cx: &LateContext<'_>, def_id: DefId) -> bool {
cx.tcx
.entry_fn(LOCAL_CRATE)
.map_or(false, |(entry_fn_def_id, _)| def_id == entry_fn_def_id.to_def_id())
}
/// Returns `true` if the expression is in the program's `#[panic_handler]`.
pub fn is_in_panic_handler(cx: &LateContext<'_>, e: &Expr<'_>) -> bool {
let parent = cx.tcx.hir().get_parent_item(e.hir_id);
let def_id = cx.tcx.hir().local_def_id(parent).to_def_id();
Some(def_id) == cx.tcx.lang_items().panic_impl()
}
/// Gets the name of the item the expression is in, if available.
pub fn get_item_name(cx: &LateContext<'_>, expr: &Expr<'_>) -> Option<Symbol> {
let parent_id = cx.tcx.hir().get_parent_item(expr.hir_id);
match cx.tcx.hir().find(parent_id) {
Some(
Node::Item(Item { ident, .. })
| Node::TraitItem(TraitItem { ident, .. })
| Node::ImplItem(ImplItem { ident, .. }),
) => Some(ident.name),
_ => None,
}
}
/// Gets the name of a `Pat`, if any.
pub fn get_pat_name(pat: &Pat<'_>) -> Option<Symbol> {
match pat.kind {
PatKind::Binding(.., ref spname, _) => Some(spname.name),
PatKind::Path(ref qpath) => single_segment_path(qpath).map(|ps| ps.ident.name),
PatKind::Box(ref p) | PatKind::Ref(ref p, _) => get_pat_name(&*p),
_ => None,
}
}
struct ContainsName {
name: Symbol,
result: bool,
}
impl<'tcx> Visitor<'tcx> for ContainsName {
type Map = Map<'tcx>;
fn visit_name(&mut self, _: Span, name: Symbol) {
if self.name == name {
self.result = true;
}
}
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
}
/// Checks if an `Expr` contains a certain name.
pub fn contains_name(name: Symbol, expr: &Expr<'_>) -> bool {
let mut cn = ContainsName { name, result: false };
cn.visit_expr(expr);
cn.result
}
/// Returns `true` if `expr` contains a return expression
pub fn contains_return(expr: &hir::Expr<'_>) -> bool {
struct RetCallFinder {
found: bool,
}
impl<'tcx> hir::intravisit::Visitor<'tcx> for RetCallFinder {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
if self.found {
return;
}
if let hir::ExprKind::Ret(..) = &expr.kind {
self.found = true;
} else {
hir::intravisit::walk_expr(self, expr);
}
}
fn nested_visit_map(&mut self) -> hir::intravisit::NestedVisitorMap<Self::Map> {
hir::intravisit::NestedVisitorMap::None
}
}
let mut visitor = RetCallFinder { found: false };
visitor.visit_expr(expr);
visitor.found
}
/// Converts a span to a code snippet if available, otherwise use default.
///
/// This is useful if you want to provide suggestions for your lint or more generally, if you want
/// to convert a given `Span` to a `str`.
///
/// # Example
/// ```rust,ignore
/// snippet(cx, expr.span, "..")
/// ```
pub fn snippet<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet_opt(cx, span).map_or_else(|| Cow::Borrowed(default), From::from)
}
/// Same as `snippet`, but it adapts the applicability level by following rules:
///
/// - Applicability level `Unspecified` will never be changed.
/// - If the span is inside a macro, change the applicability level to `MaybeIncorrect`.
/// - If the default value is used and the applicability level is `MachineApplicable`, change it to
/// `HasPlaceholders`
pub fn snippet_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
applicability: &mut Applicability,
) -> Cow<'a, str> {
if *applicability != Applicability::Unspecified && span.from_expansion() {
*applicability = Applicability::MaybeIncorrect;
}
snippet_opt(cx, span).map_or_else(
|| {
if *applicability == Applicability::MachineApplicable {
*applicability = Applicability::HasPlaceholders;
}
Cow::Borrowed(default)
},
From::from,
)
}
/// Same as `snippet`, but should only be used when it's clear that the input span is
/// not a macro argument.
pub fn snippet_with_macro_callsite<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet(cx, span.source_callsite(), default)
}
/// Converts a span to a code snippet. Returns `None` if not available.
pub fn snippet_opt<T: LintContext>(cx: &T, span: Span) -> Option<String> {
cx.sess().source_map().span_to_snippet(span).ok()
}
/// Converts a span (from a block) to a code snippet if available, otherwise use default.
///
/// This trims the code of indentation, except for the first line. Use it for blocks or block-like
/// things which need to be printed as such.
///
/// The `indent_relative_to` arg can be used, to provide a span, where the indentation of the
/// resulting snippet of the given span.
///
/// # Example
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", None)
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// }
/// ```
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", Some(if_expr.span))
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// } // aligned with `if`
/// ```
/// Note that the first line of the snippet always has 0 indentation.
pub fn snippet_block<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let snip = snippet(cx, span, default);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_block`, but adapts the applicability level by the rules of
/// `snippet_with_applicability`.
pub fn snippet_block_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
applicability: &mut Applicability,
) -> Cow<'a, str> {
let snip = snippet_with_applicability(cx, span, default, applicability);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Returns a new Span that extends the original Span to the first non-whitespace char of the first
/// line.
///
/// ```rust,ignore
/// let x = ();
/// // ^^
/// // will be converted to
/// let x = ();
/// // ^^^^^^^^^^
/// ```
pub fn first_line_of_span<T: LintContext>(cx: &T, span: Span) -> Span {
first_char_in_first_line(cx, span).map_or(span, |first_char_pos| span.with_lo(first_char_pos))
}
fn first_char_in_first_line<T: LintContext>(cx: &T, span: Span) -> Option<BytePos> {
let line_span = line_span(cx, span);
snippet_opt(cx, line_span).and_then(|snip| {
snip.find(|c: char| !c.is_whitespace())
.map(|pos| line_span.lo() + BytePos::from_usize(pos))
})
}
/// Returns the indentation of the line of a span
///
/// ```rust,ignore
/// let x = ();
/// // ^^ -- will return 0
/// let x = ();
/// // ^^ -- will return 4
/// ```
pub fn indent_of<T: LintContext>(cx: &T, span: Span) -> Option<usize> {
snippet_opt(cx, line_span(cx, span)).and_then(|snip| snip.find(|c: char| !c.is_whitespace()))
}
/// Returns the positon just before rarrow
///
/// ```rust,ignore
/// fn into(self) -> () {}
/// ^
/// // in case of unformatted code
/// fn into2(self)-> () {}
/// ^
/// fn into3(self) -> () {}
/// ^
/// ```
#[allow(clippy::needless_pass_by_value)]
pub fn position_before_rarrow(s: String) -> Option<usize> {
s.rfind("->").map(|rpos| {
let mut rpos = rpos;
let chars: Vec<char> = s.chars().collect();
while rpos > 1 {
if let Some(c) = chars.get(rpos - 1) {
if c.is_whitespace() {
rpos -= 1;
continue;
}
}
break;
}
rpos
})
}
/// Extends the span to the beginning of the spans line, incl. whitespaces.
///
/// ```rust,ignore
/// let x = ();
/// // ^^
/// // will be converted to
/// let x = ();
/// // ^^^^^^^^^^^^^^
/// ```
fn line_span<T: LintContext>(cx: &T, span: Span) -> Span {
let span = original_sp(span, DUMMY_SP);
let source_map_and_line = cx.sess().source_map().lookup_line(span.lo()).unwrap();
let line_no = source_map_and_line.line;
let line_start = source_map_and_line.sf.lines[line_no];
Span::new(line_start, span.hi(), span.ctxt())
}
/// Like `snippet_block`, but add braces if the expr is not an `ExprKind::Block`.
/// Also takes an `Option<String>` which can be put inside the braces.
pub fn expr_block<'a, T: LintContext>(
cx: &T,
expr: &Expr<'_>,
option: Option<String>,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let code = snippet_block(cx, expr.span, default, indent_relative_to);
let string = option.unwrap_or_default();
if expr.span.from_expansion() {
Cow::Owned(format!("{{ {} }}", snippet_with_macro_callsite(cx, expr.span, default)))
} else if let ExprKind::Block(_, _) = expr.kind {
Cow::Owned(format!("{}{}", code, string))
} else if string.is_empty() {
Cow::Owned(format!("{{ {} }}", code))
} else {
Cow::Owned(format!("{{\n{};\n{}\n}}", code, string))
}
}
/// Reindent a multiline string with possibility of ignoring the first line.
#[allow(clippy::needless_pass_by_value)]
pub fn reindent_multiline(s: Cow<'_, str>, ignore_first: bool, indent: Option<usize>) -> Cow<'_, str> {
let s_space = reindent_multiline_inner(&s, ignore_first, indent, ' ');
let s_tab = reindent_multiline_inner(&s_space, ignore_first, indent, '\t');
reindent_multiline_inner(&s_tab, ignore_first, indent, ' ').into()
}
fn reindent_multiline_inner(s: &str, ignore_first: bool, indent: Option<usize>, ch: char) -> String {
let x = s
.lines()
.skip(ignore_first as usize)
.filter_map(|l| {
if l.is_empty() {
None
} else {
// ignore empty lines
Some(l.char_indices().find(|&(_, x)| x != ch).unwrap_or((l.len(), ch)).0)
}
})
.min()
.unwrap_or(0);
let indent = indent.unwrap_or(0);
s.lines()
.enumerate()
.map(|(i, l)| {
if (ignore_first && i == 0) || l.is_empty() {
l.to_owned()
} else if x > indent {
l.split_at(x - indent).1.to_owned()
} else {
" ".repeat(indent - x) + l
}
})
.collect::<Vec<String>>()
.join("\n")
}
/// Gets the parent expression, if any –- this is useful to constrain a lint.
pub fn get_parent_expr<'tcx>(cx: &LateContext<'tcx>, e: &Expr<'_>) -> Option<&'tcx Expr<'tcx>> {
let map = &cx.tcx.hir();
let hir_id = e.hir_id;
let parent_id = map.get_parent_node(hir_id);
if hir_id == parent_id {
return None;
}
map.find(parent_id).and_then(|node| {
if let Node::Expr(parent) = node {
Some(parent)
} else {
None
}
})
}
pub fn get_enclosing_block<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId) -> Option<&'tcx Block<'tcx>> {
let map = &cx.tcx.hir();
let enclosing_node = map
.get_enclosing_scope(hir_id)
.and_then(|enclosing_id| map.find(enclosing_id));
enclosing_node.and_then(|node| match node {
Node::Block(block) => Some(block),
Node::Item(&Item {
kind: ItemKind::Fn(_, _, eid),
..
})
| Node::ImplItem(&ImplItem {
kind: ImplItemKind::Fn(_, eid),
..
}) => match cx.tcx.hir().body(eid).value.kind {
ExprKind::Block(ref block, _) => Some(block),
_ => None,
},
_ => None,
})
}
/// Returns the base type for HIR references and pointers.
pub fn walk_ptrs_hir_ty<'tcx>(ty: &'tcx hir::Ty<'tcx>) -> &'tcx hir::Ty<'tcx> {
match ty.kind {
TyKind::Ptr(ref mut_ty) | TyKind::Rptr(_, ref mut_ty) => walk_ptrs_hir_ty(&mut_ty.ty),
_ => ty,
}
}
/// Returns the base type for references and raw pointers, and count reference
/// depth.
pub fn walk_ptrs_ty_depth(ty: Ty<'_>) -> (Ty<'_>, usize) {
fn inner(ty: Ty<'_>, depth: usize) -> (Ty<'_>, usize) {
match ty.kind() {
ty::Ref(_, ty, _) => inner(ty, depth + 1),
_ => (ty, depth),
}
}
inner(ty, 0)
}
/// Checks whether the given expression is a constant integer of the given value.
/// unlike `is_integer_literal`, this version does const folding
pub fn is_integer_const(cx: &LateContext<'_>, e: &Expr<'_>, value: u128) -> bool {
if is_integer_literal(e, value) {
return true;
}
let map = cx.tcx.hir();
let parent_item = map.get_parent_item(e.hir_id);
if let Some((Constant::Int(v), _)) = map
.maybe_body_owned_by(parent_item)
.and_then(|body_id| constant(cx, cx.tcx.typeck_body(body_id), e))
{
value == v
} else {
false
}
}
/// Checks whether the given expression is a constant literal of the given value.
pub fn is_integer_literal(expr: &Expr<'_>, value: u128) -> bool {
// FIXME: use constant folding
if let ExprKind::Lit(ref spanned) = expr.kind {
if let LitKind::Int(v, _) = spanned.node {
return v == value;
}
}
false
}
/// Returns `true` if the given `Expr` has been coerced before.
///
/// Examples of coercions can be found in the Nomicon at
/// <https://doc.rust-lang.org/nomicon/coercions.html>.
///
/// See `rustc_middle::ty::adjustment::Adjustment` and `rustc_typeck::check::coercion` for more
/// information on adjustments and coercions.
pub fn is_adjusted(cx: &LateContext<'_>, e: &Expr<'_>) -> bool {
cx.typeck_results().adjustments().get(e.hir_id).is_some()
}
/// Returns the pre-expansion span if is this comes from an expansion of the
/// macro `name`.
/// See also `is_direct_expn_of`.
#[must_use]
pub fn is_expn_of(mut span: Span, name: &str) -> Option<Span> {
loop {
if span.from_expansion() {
let data = span.ctxt().outer_expn_data();
let new_span = data.call_site;
if let ExpnKind::Macro(MacroKind::Bang, mac_name) = data.kind {
if mac_name.as_str() == name {
return Some(new_span);
}
}
span = new_span;
} else {
return None;
}
}
}
/// Returns the pre-expansion span if the span directly comes from an expansion
/// of the macro `name`.
/// The difference with `is_expn_of` is that in
/// ```rust,ignore
/// foo!(bar!(42));
/// ```
/// `42` is considered expanded from `foo!` and `bar!` by `is_expn_of` but only
/// `bar!` by
/// `is_direct_expn_of`.
#[must_use]
pub fn is_direct_expn_of(span: Span, name: &str) -> Option<Span> {
if span.from_expansion() {
let data = span.ctxt().outer_expn_data();
let new_span = data.call_site;
if let ExpnKind::Macro(MacroKind::Bang, mac_name) = data.kind {
if mac_name.as_str() == name {
return Some(new_span);
}
}
}
None
}
/// Convenience function to get the return type of a function.
pub fn return_ty<'tcx>(cx: &LateContext<'tcx>, fn_item: hir::HirId) -> Ty<'tcx> {
let fn_def_id = cx.tcx.hir().local_def_id(fn_item);
let ret_ty = cx.tcx.fn_sig(fn_def_id).output();
cx.tcx.erase_late_bound_regions(ret_ty)
}
/// Walks into `ty` and returns `true` if any inner type is the same as `other_ty`
pub fn contains_ty(ty: Ty<'_>, other_ty: Ty<'_>) -> bool {
ty.walk().any(|inner| match inner.unpack() {
GenericArgKind::Type(inner_ty) => ty::TyS::same_type(other_ty, inner_ty),
GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => false,
})
}
/// Returns `true` if the given type is an `unsafe` function.
pub fn type_is_unsafe_function<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
match ty.kind() {
ty::FnDef(..) | ty::FnPtr(_) => ty.fn_sig(cx.tcx).unsafety() == Unsafety::Unsafe,
_ => false,
}
}
pub fn is_copy<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
ty.is_copy_modulo_regions(cx.tcx.at(DUMMY_SP), cx.param_env)
}
/// Checks if an expression is constructing a tuple-like enum variant or struct
pub fn is_ctor_or_promotable_const_function(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
if let ExprKind::Call(ref fun, _) = expr.kind {
if let ExprKind::Path(ref qp) = fun.kind {
let res = cx.qpath_res(qp, fun.hir_id);
return match res {
def::Res::Def(DefKind::Variant | DefKind::Ctor(..), ..) => true,
def::Res::Def(_, def_id) => cx.tcx.is_promotable_const_fn(def_id),
_ => false,
};
}
}
false
}
/// Returns `true` if a pattern is refutable.
// TODO: should be implemented using rustc/mir_build/thir machinery
pub fn is_refutable(cx: &LateContext<'_>, pat: &Pat<'_>) -> bool {
fn is_enum_variant(cx: &LateContext<'_>, qpath: &QPath<'_>, id: HirId) -> bool {
matches!(
cx.qpath_res(qpath, id),
def::Res::Def(DefKind::Variant, ..) | Res::Def(DefKind::Ctor(def::CtorOf::Variant, _), _)
)
}
fn are_refutable<'a, I: Iterator<Item = &'a Pat<'a>>>(cx: &LateContext<'_>, mut i: I) -> bool {
i.any(|pat| is_refutable(cx, pat))
}
match pat.kind {
PatKind::Wild => false,
PatKind::Binding(_, _, _, pat) => pat.map_or(false, |pat| is_refutable(cx, pat)),
PatKind::Box(ref pat) | PatKind::Ref(ref pat, _) => is_refutable(cx, pat),
PatKind::Lit(..) | PatKind::Range(..) => true,
PatKind::Path(ref qpath) => is_enum_variant(cx, qpath, pat.hir_id),
PatKind::Or(ref pats) => {
// TODO: should be the honest check, that pats is exhaustive set
are_refutable(cx, pats.iter().map(|pat| &**pat))
},
PatKind::Tuple(ref pats, _) => are_refutable(cx, pats.iter().map(|pat| &**pat)),
PatKind::Struct(ref qpath, ref fields, _) => {
is_enum_variant(cx, qpath, pat.hir_id) || are_refutable(cx, fields.iter().map(|field| &*field.pat))
},
PatKind::TupleStruct(ref qpath, ref pats, _) => {
is_enum_variant(cx, qpath, pat.hir_id) || are_refutable(cx, pats.iter().map(|pat| &**pat))
},
PatKind::Slice(ref head, ref middle, ref tail) => {
match &cx.typeck_results().node_type(pat.hir_id).kind() {
ty::Slice(..) => {
// [..] is the only irrefutable slice pattern.
!head.is_empty() || middle.is_none() || !tail.is_empty()
},
ty::Array(..) => are_refutable(cx, head.iter().chain(middle).chain(tail.iter()).map(|pat| &**pat)),
_ => {
// unreachable!()
true
},
}
},
}
}
/// Checks for the `#[automatically_derived]` attribute all `#[derive]`d
/// implementations have.
pub fn is_automatically_derived(attrs: &[ast::Attribute]) -> bool {
attrs.iter().any(|attr| attr.has_name(rustc_sym::automatically_derived))
}
/// Remove blocks around an expression.
///
/// Ie. `x`, `{ x }` and `{{{{ x }}}}` all give `x`. `{ x; y }` and `{}` return
/// themselves.
pub fn remove_blocks<'tcx>(mut expr: &'tcx Expr<'tcx>) -> &'tcx Expr<'tcx> {
while let ExprKind::Block(ref block, ..) = expr.kind {
match (block.stmts.is_empty(), block.expr.as_ref()) {
(true, Some(e)) => expr = e,
_ => break,
}
}
expr
}
pub fn is_self(slf: &Param<'_>) -> bool {
if let PatKind::Binding(.., name, _) = slf.pat.kind {
name.name == kw::SelfLower
} else {
false
}
}
pub fn is_self_ty(slf: &hir::Ty<'_>) -> bool {
if_chain! {
if let TyKind::Path(ref qp) = slf.kind;
if let QPath::Resolved(None, ref path) = *qp;
if let Res::SelfTy(..) = path.res;
then {
return true
}
}
false
}
pub fn iter_input_pats<'tcx>(decl: &FnDecl<'_>, body: &'tcx Body<'_>) -> impl Iterator<Item = &'tcx Param<'tcx>> {
(0..decl.inputs.len()).map(move |i| &body.params[i])
}
/// Checks if a given expression is a match expression expanded from the `?`
/// operator or the `try` macro.
pub fn is_try<'tcx>(expr: &'tcx Expr<'tcx>) -> Option<&'tcx Expr<'tcx>> {
fn is_ok(arm: &Arm<'_>) -> bool {
if_chain! {
if let PatKind::TupleStruct(ref path, ref pat, None) = arm.pat.kind;
if match_qpath(path, &paths::RESULT_OK[1..]);
if let PatKind::Binding(_, hir_id, _, None) = pat[0].kind;
if let ExprKind::Path(QPath::Resolved(None, ref path)) = arm.body.kind;
if let Res::Local(lid) = path.res;
if lid == hir_id;
then {
return true;
}
}
false
}
fn is_err(arm: &Arm<'_>) -> bool {
if let PatKind::TupleStruct(ref path, _, _) = arm.pat.kind {
match_qpath(path, &paths::RESULT_ERR[1..])
} else {
false
}
}
if let ExprKind::Match(_, ref arms, ref source) = expr.kind {
// desugared from a `?` operator
if let MatchSource::TryDesugar = *source {
return Some(expr);
}
if_chain! {
if arms.len() == 2;
if arms[0].guard.is_none();
if arms[1].guard.is_none();
if (is_ok(&arms[0]) && is_err(&arms[1])) ||
(is_ok(&arms[1]) && is_err(&arms[0]));
then {
return Some(expr);
}
}
}
None
}
/// Returns `true` if the lint is allowed in the current context
///
/// Useful for skipping long running code when it's unnecessary
pub fn is_allowed(cx: &LateContext<'_>, lint: &'static Lint, id: HirId) -> bool {
cx.tcx.lint_level_at_node(lint, id).0 == Level::Allow
}
pub fn get_arg_name(pat: &Pat<'_>) -> Option<Symbol> {
match pat.kind {
PatKind::Binding(.., ident, None) => Some(ident.name),
PatKind::Ref(ref subpat, _) => get_arg_name(subpat),
_ => None,
}
}
pub fn int_bits(tcx: TyCtxt<'_>, ity: ast::IntTy) -> u64 {
Integer::from_attr(&tcx, attr::IntType::SignedInt(ity)).size().bits()
}
#[allow(clippy::cast_possible_wrap)]
/// Turn a constant int byte representation into an i128
pub fn sext(tcx: TyCtxt<'_>, u: u128, ity: ast::IntTy) -> i128 {
let amt = 128 - int_bits(tcx, ity);
((u as i128) << amt) >> amt
}
#[allow(clippy::cast_sign_loss)]
/// clip unused bytes
pub fn unsext(tcx: TyCtxt<'_>, u: i128, ity: ast::IntTy) -> u128 {
let amt = 128 - int_bits(tcx, ity);
((u as u128) << amt) >> amt
}
/// clip unused bytes
pub fn clip(tcx: TyCtxt<'_>, u: u128, ity: ast::UintTy) -> u128 {
let bits = Integer::from_attr(&tcx, attr::IntType::UnsignedInt(ity)).size().bits();
let amt = 128 - bits;
(u << amt) >> amt
}
/// Removes block comments from the given `Vec` of lines.
///
/// # Examples
///
/// ```rust,ignore
/// without_block_comments(vec!["/*", "foo", "*/"]);
/// // => vec![]
///
/// without_block_comments(vec!["bar", "/*", "foo", "*/"]);
/// // => vec!["bar"]
/// ```
pub fn without_block_comments(lines: Vec<&str>) -> Vec<&str> {
let mut without = vec![];
let mut nest_level = 0;
for line in lines {
if line.contains("/*") {
nest_level += 1;
continue;
} else if line.contains("*/") {
nest_level -= 1;
continue;
}
if nest_level == 0 {
without.push(line);
}
}
without
}
pub fn any_parent_is_automatically_derived(tcx: TyCtxt<'_>, node: HirId) -> bool {
let map = &tcx.hir();
let mut prev_enclosing_node = None;
let mut enclosing_node = node;
while Some(enclosing_node) != prev_enclosing_node {
if is_automatically_derived(map.attrs(enclosing_node)) {
return true;
}
prev_enclosing_node = Some(enclosing_node);
enclosing_node = map.get_parent_item(enclosing_node);
}
false
}
/// Returns true if ty has `iter` or `iter_mut` methods
pub fn has_iter_method(cx: &LateContext<'_>, probably_ref_ty: Ty<'_>) -> Option<&'static str> {
// FIXME: instead of this hard-coded list, we should check if `<adt>::iter`
// exists and has the desired signature. Unfortunately FnCtxt is not exported
// so we can't use its `lookup_method` method.
let into_iter_collections: [&[&str]; 13] = [
&paths::VEC,
&paths::OPTION,
&paths::RESULT,
&paths::BTREESET,
&paths::BTREEMAP,
&paths::VEC_DEQUE,
&paths::LINKED_LIST,
&paths::BINARY_HEAP,
&paths::HASHSET,
&paths::HASHMAP,
&paths::PATH_BUF,
&paths::PATH,
&paths::RECEIVER,
];
let ty_to_check = match probably_ref_ty.kind() {
ty::Ref(_, ty_to_check, _) => ty_to_check,
_ => probably_ref_ty,
};
let def_id = match ty_to_check.kind() {
ty::Array(..) => return Some("array"),
ty::Slice(..) => return Some("slice"),
ty::Adt(adt, _) => adt.did,
_ => return None,
};
for path in &into_iter_collections {
if match_def_path(cx, def_id, path) {
return Some(*path.last().unwrap());
}
}
None
}
/// Matches a function call with the given path and returns the arguments.
///
/// Usage:
///
/// ```rust,ignore
/// if let Some(args) = match_function_call(cx, cmp_max_call, &paths::CMP_MAX);
/// ```
pub fn match_function_call<'tcx>(
cx: &LateContext<'tcx>,
expr: &'tcx Expr<'_>,
path: &[&str],
) -> Option<&'tcx [Expr<'tcx>]> {
if_chain! {
if let ExprKind::Call(ref fun, ref args) = expr.kind;
if let ExprKind::Path(ref qpath) = fun.kind;
if let Some(fun_def_id) = cx.qpath_res(qpath, fun.hir_id).opt_def_id();
if match_def_path(cx, fun_def_id, path);
then {
return Some(&args)
}
};
None
}
/// Checks if `Ty` is normalizable. This function is useful
/// to avoid crashes on `layout_of`.
pub fn is_normalizable<'tcx>(cx: &LateContext<'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> bool {
cx.tcx.infer_ctxt().enter(|infcx| {
let cause = rustc_middle::traits::ObligationCause::dummy();
infcx.at(&cause, param_env).normalize(ty).is_ok()
})
}
pub fn match_def_path<'tcx>(cx: &LateContext<'tcx>, did: DefId, syms: &[&str]) -> bool {
// We have to convert `syms` to `&[Symbol]` here because rustc's `match_def_path`
// accepts only that. We should probably move to Symbols in Clippy as well.
let syms = syms.iter().map(|p| Symbol::intern(p)).collect::<Vec<Symbol>>();
cx.match_def_path(did, &syms)
}
pub fn match_panic_call<'tcx>(cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) -> Option<&'tcx [Expr<'tcx>]> {
match_function_call(cx, expr, &paths::BEGIN_PANIC)
.or_else(|| match_function_call(cx, expr, &paths::BEGIN_PANIC_FMT))
.or_else(|| match_function_call(cx, expr, &paths::PANIC_ANY))
.or_else(|| match_function_call(cx, expr, &paths::PANICKING_PANIC))
.or_else(|| match_function_call(cx, expr, &paths::PANICKING_PANIC_FMT))
.or_else(|| match_function_call(cx, expr, &paths::PANICKING_PANIC_STR))
}
pub fn match_panic_def_id(cx: &LateContext<'_>, did: DefId) -> bool {
match_def_path(cx, did, &paths::BEGIN_PANIC)
|| match_def_path(cx, did, &paths::BEGIN_PANIC_FMT)
|| match_def_path(cx, did, &paths::PANIC_ANY)
|| match_def_path(cx, did, &paths::PANICKING_PANIC)
|| match_def_path(cx, did, &paths::PANICKING_PANIC_FMT)
|| match_def_path(cx, did, &paths::PANICKING_PANIC_STR)
}
/// Returns the list of condition expressions and the list of blocks in a
/// sequence of `if/else`.
/// E.g., this returns `([a, b], [c, d, e])` for the expression
/// `if a { c } else if b { d } else { e }`.
pub fn if_sequence<'tcx>(
mut expr: &'tcx Expr<'tcx>,
) -> (SmallVec<[&'tcx Expr<'tcx>; 1]>, SmallVec<[&'tcx Block<'tcx>; 1]>) {
let mut conds = SmallVec::new();
let mut blocks: SmallVec<[&Block<'_>; 1]> = SmallVec::new();
while let Some((ref cond, ref then_expr, ref else_expr)) = higher::if_block(&expr) {
conds.push(&**cond);
if let ExprKind::Block(ref block, _) = then_expr.kind {
blocks.push(block);
} else {
panic!("ExprKind::If node is not an ExprKind::Block");
}
if let Some(ref else_expr) = *else_expr {
expr = else_expr;
} else {
break;
}
}
// final `else {..}`
if !blocks.is_empty() {
if let ExprKind::Block(ref block, _) = expr.kind {
blocks.push(&**block);
}
}
(conds, blocks)
}
pub fn parent_node_is_if_expr(expr: &Expr<'_>, cx: &LateContext<'_>) -> bool {
let map = cx.tcx.hir();
let parent_id = map.get_parent_node(expr.hir_id);
let parent_node = map.get(parent_id);
match parent_node {
Node::Expr(e) => higher::if_block(&e).is_some(),
Node::Arm(e) => higher::if_block(&e.body).is_some(),
_ => false,
}
}
// Finds the attribute with the given name, if any
pub fn attr_by_name<'a>(attrs: &'a [Attribute], name: &'_ str) -> Option<&'a Attribute> {
attrs
.iter()
.find(|attr| attr.ident().map_or(false, |ident| ident.as_str() == name))
}
// Finds the `#[must_use]` attribute, if any
pub fn must_use_attr(attrs: &[Attribute]) -> Option<&Attribute> {
attr_by_name(attrs, "must_use")
}
// Returns whether the type has #[must_use] attribute
pub fn is_must_use_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
match ty.kind() {
ty::Adt(ref adt, _) => must_use_attr(&cx.tcx.get_attrs(adt.did)).is_some(),
ty::Foreign(ref did) => must_use_attr(&cx.tcx.get_attrs(*did)).is_some(),
ty::Slice(ref ty)
| ty::Array(ref ty, _)
| ty::RawPtr(ty::TypeAndMut { ref ty, .. })
| ty::Ref(_, ref ty, _) => {
// for the Array case we don't need to care for the len == 0 case
// because we don't want to lint functions returning empty arrays
is_must_use_ty(cx, *ty)
},
ty::Tuple(ref substs) => substs.types().any(|ty| is_must_use_ty(cx, ty)),
ty::Opaque(ref def_id, _) => {
for (predicate, _) in cx.tcx.explicit_item_bounds(*def_id) {
if let ty::PredicateAtom::Trait(trait_predicate, _) = predicate.skip_binders() {
if must_use_attr(&cx.tcx.get_attrs(trait_predicate.trait_ref.def_id)).is_some() {
return true;
}
}
}
false
},
ty::Dynamic(binder, _) => {
for predicate in binder.skip_binder().iter() {
if let ty::ExistentialPredicate::Trait(ref trait_ref) = predicate {
if must_use_attr(&cx.tcx.get_attrs(trait_ref.def_id)).is_some() {
return true;
}
}
}
false
},
_ => false,
}
}
// check if expr is calling method or function with #[must_use] attribute
pub fn is_must_use_func_call(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
let did = match expr.kind {
ExprKind::Call(ref path, _) => if_chain! {
if let ExprKind::Path(ref qpath) = path.kind;
if let def::Res::Def(_, did) = cx.qpath_res(qpath, path.hir_id);
then {
Some(did)
} else {
None
}
},
ExprKind::MethodCall(_, _, _, _) => cx.typeck_results().type_dependent_def_id(expr.hir_id),
_ => None,
};
did.map_or(false, |did| must_use_attr(&cx.tcx.get_attrs(did)).is_some())
}
pub fn is_no_std_crate(krate: &Crate<'_>) -> bool {
krate.item.attrs.iter().any(|attr| {
if let ast::AttrKind::Normal(ref attr, _) = attr.kind {
attr.path == symbol::sym::no_std
} else {
false
}
})
}
/// Check if parent of a hir node is a trait implementation block.
/// For example, `f` in
/// ```rust,ignore
/// impl Trait for S {
/// fn f() {}
/// }
/// ```
pub fn is_trait_impl_item(cx: &LateContext<'_>, hir_id: HirId) -> bool {
if let Some(Node::Item(item)) = cx.tcx.hir().find(cx.tcx.hir().get_parent_node(hir_id)) {
matches!(item.kind, ItemKind::Impl{ of_trait: Some(_), .. })
} else {
false
}
}
/// Check if it's even possible to satisfy the `where` clause for the item.
///
/// `trivial_bounds` feature allows functions with unsatisfiable bounds, for example:
///
/// ```ignore
/// fn foo() where i32: Iterator {
/// for _ in 2i32 {}
/// }
/// ```
pub fn fn_has_unsatisfiable_preds(cx: &LateContext<'_>, did: DefId) -> bool {
use rustc_trait_selection::traits;
let predicates =
cx.tcx
.predicates_of(did)
.predicates
.iter()
.filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None });
traits::impossible_predicates(
cx.tcx,
traits::elaborate_predicates(cx.tcx, predicates)
.map(|o| o.predicate)
.collect::<Vec<_>>(),
)
}
/// Returns the `DefId` of the callee if the given expression is a function or method call.
pub fn fn_def_id(cx: &LateContext<'_>, expr: &Expr<'_>) -> Option<DefId> {
match &expr.kind {
ExprKind::MethodCall(..) => cx.typeck_results().type_dependent_def_id(expr.hir_id),
ExprKind::Call(
Expr {
kind: ExprKind::Path(qpath),
..
},
..,
) => cx.typeck_results().qpath_res(qpath, expr.hir_id).opt_def_id(),
_ => None,
}
}
pub fn run_lints(cx: &LateContext<'_>, lints: &[&'static Lint], id: HirId) -> bool {
lints.iter().any(|lint| {
matches!(
cx.tcx.lint_level_at_node(lint, id),
(Level::Forbid | Level::Deny | Level::Warn, _)
)
})
}
/// Returns true iff the given type is a primitive (a bool or char, any integer or floating-point
/// number type, a str, or an array, slice, or tuple of those types).
pub fn is_recursively_primitive_type(ty: Ty<'_>) -> bool {
match ty.kind() {
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => true,
ty::Ref(_, inner, _) if *inner.kind() == ty::Str => true,
ty::Array(inner_type, _) | ty::Slice(inner_type) => is_recursively_primitive_type(inner_type),
ty::Tuple(inner_types) => inner_types.types().all(is_recursively_primitive_type),
_ => false,
}
}
/// Returns Option<String> where String is a textual representation of the type encapsulated in the
/// slice iff the given expression is a slice of primitives (as defined in the
/// `is_recursively_primitive_type` function) and None otherwise.
pub fn is_slice_of_primitives(cx: &LateContext<'_>, expr: &Expr<'_>) -> Option<String> {
let expr_type = cx.typeck_results().expr_ty_adjusted(expr);
let expr_kind = expr_type.kind();
let is_primitive = match expr_kind {
ty::Slice(element_type) => is_recursively_primitive_type(element_type),
ty::Ref(_, inner_ty, _) if matches!(inner_ty.kind(), &ty::Slice(_)) => {
if let ty::Slice(element_type) = inner_ty.kind() {
is_recursively_primitive_type(element_type)
} else {
unreachable!()
}
},
_ => false,
};
if is_primitive {
// if we have wrappers like Array, Slice or Tuple, print these
// and get the type enclosed in the slice ref
match expr_type.peel_refs().walk().nth(1).unwrap().expect_ty().kind() {
ty::Slice(..) => return Some("slice".into()),
ty::Array(..) => return Some("array".into()),
ty::Tuple(..) => return Some("tuple".into()),
_ => {
// is_recursively_primitive_type() should have taken care
// of the rest and we can rely on the type that is found
let refs_peeled = expr_type.peel_refs();
return Some(refs_peeled.walk().last().unwrap().to_string());
},
}
}
None
}
/// returns list of all pairs (a, b) from `exprs` such that `eq(a, b)`
/// `hash` must be comformed with `eq`
pub fn search_same<T, Hash, Eq>(exprs: &[T], hash: Hash, eq: Eq) -> Vec<(&T, &T)>
where
Hash: Fn(&T) -> u64,
Eq: Fn(&T, &T) -> bool,
{
if exprs.len() == 2 && eq(&exprs[0], &exprs[1]) {
return vec![(&exprs[0], &exprs[1])];
}
let mut match_expr_list: Vec<(&T, &T)> = Vec::new();
let mut map: FxHashMap<_, Vec<&_>> =
FxHashMap::with_capacity_and_hasher(exprs.len(), BuildHasherDefault::default());
for expr in exprs {
match map.entry(hash(expr)) {
Entry::Occupied(mut o) => {
for o in o.get() {
if eq(o, expr) {
match_expr_list.push((o, expr));
}
}
o.get_mut().push(expr);
},
Entry::Vacant(v) => {
v.insert(vec![expr]);
},
}
}
match_expr_list
}
#[macro_export]
macro_rules! unwrap_cargo_metadata {
($cx: ident, $lint: ident, $deps: expr) => {{
let mut command = cargo_metadata::MetadataCommand::new();
if !$deps {
command.no_deps();
}
match command.exec() {
Ok(metadata) => metadata,
Err(err) => {
span_lint($cx, $lint, DUMMY_SP, &format!("could not read cargo metadata: {}", err));
return;
},
}
}};
}
#[cfg(test)]
mod test {
use super::{reindent_multiline, without_block_comments};
#[test]
fn test_reindent_multiline_single_line() {
assert_eq!("", reindent_multiline("".into(), false, None));
assert_eq!("...", reindent_multiline("...".into(), false, None));
assert_eq!("...", reindent_multiline(" ...".into(), false, None));
assert_eq!("...", reindent_multiline("\t...".into(), false, None));
assert_eq!("...", reindent_multiline("\t\t...".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_block() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
assert_eq!("\
if x {
\ty
} else {
\tz
}", reindent_multiline(" if x {
\ty
} else {
\tz
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_empty_line() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_lines_deeper() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline("\
if x {
y
} else {
z
}".into(), true, Some(8)));
}
#[test]
fn test_without_block_comments_lines_without_block_comments() {
let result = without_block_comments(vec!["/*", "", "*/"]);
println!("result: {:?}", result);
assert!(result.is_empty());
let result = without_block_comments(vec!["", "/*", "", "*/", "#[crate_type = \"lib\"]", "/*", "", "*/", ""]);
assert_eq!(result, vec!["", "#[crate_type = \"lib\"]", ""]);
let result = without_block_comments(vec!["/* rust", "", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* one-line comment */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested", "/* multi-line", "comment", "*/", "test", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested /* inline /* comment */ test */ */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["foo", "bar", "baz"]);
assert_eq!(result, vec!["foo", "bar", "baz"]);
}
}
Move binder for dyn to each list item
#[macro_use]
pub mod sym;
#[allow(clippy::module_name_repetitions)]
pub mod ast_utils;
pub mod attrs;
pub mod author;
pub mod camel_case;
pub mod comparisons;
pub mod conf;
pub mod constants;
mod diagnostics;
pub mod eager_or_lazy;
pub mod higher;
mod hir_utils;
pub mod inspector;
#[cfg(feature = "internal-lints")]
pub mod internal_lints;
pub mod numeric_literal;
pub mod paths;
pub mod ptr;
pub mod qualify_min_const_fn;
pub mod sugg;
pub mod usage;
pub mod visitors;
pub use self::attrs::*;
pub use self::diagnostics::*;
pub use self::hir_utils::{both, eq_expr_value, over, SpanlessEq, SpanlessHash};
use std::borrow::Cow;
use std::collections::hash_map::Entry;
use std::hash::BuildHasherDefault;
use std::mem;
use if_chain::if_chain;
use rustc_ast::ast::{self, Attribute, LitKind};
use rustc_attr as attr;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::intravisit::{NestedVisitorMap, Visitor};
use rustc_hir::Node;
use rustc_hir::{
def, Arm, Block, Body, Constness, Crate, Expr, ExprKind, FnDecl, HirId, ImplItem, ImplItemKind, Item, ItemKind,
MatchSource, Param, Pat, PatKind, Path, PathSegment, QPath, TraitItem, TraitItemKind, TraitRef, TyKind, Unsafety,
};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::{LateContext, Level, Lint, LintContext};
use rustc_middle::hir::map::Map;
use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
use rustc_middle::ty::{self, layout::IntegerExt, Ty, TyCtxt, TypeFoldable};
use rustc_semver::RustcVersion;
use rustc_session::Session;
use rustc_span::hygiene::{ExpnKind, MacroKind};
use rustc_span::source_map::original_sp;
use rustc_span::sym as rustc_sym;
use rustc_span::symbol::{self, kw, Symbol};
use rustc_span::{BytePos, Pos, Span, DUMMY_SP};
use rustc_target::abi::Integer;
use rustc_trait_selection::traits::query::normalize::AtExt;
use smallvec::SmallVec;
use crate::consts::{constant, Constant};
pub fn parse_msrv(msrv: &str, sess: Option<&Session>, span: Option<Span>) -> Option<RustcVersion> {
if let Ok(version) = RustcVersion::parse(msrv) {
return Some(version);
} else if let Some(sess) = sess {
if let Some(span) = span {
sess.span_err(span, &format!("`{}` is not a valid Rust version", msrv));
}
}
None
}
pub fn meets_msrv(msrv: Option<&RustcVersion>, lint_msrv: &RustcVersion) -> bool {
msrv.map_or(true, |msrv| msrv.meets(*lint_msrv))
}
macro_rules! extract_msrv_attr {
(LateContext) => {
extract_msrv_attr!(@LateContext, ());
};
(EarlyContext) => {
extract_msrv_attr!(@EarlyContext);
};
(@$context:ident$(, $call:tt)?) => {
fn enter_lint_attrs(&mut self, cx: &rustc_lint::$context<'tcx>, attrs: &'tcx [rustc_ast::ast::Attribute]) {
use $crate::utils::get_unique_inner_attr;
match get_unique_inner_attr(cx.sess$($call)?, attrs, "msrv") {
Some(msrv_attr) => {
if let Some(msrv) = msrv_attr.value_str() {
self.msrv = $crate::utils::parse_msrv(
&msrv.to_string(),
Some(cx.sess$($call)?),
Some(msrv_attr.span),
);
} else {
cx.sess$($call)?.span_err(msrv_attr.span, "bad clippy attribute");
}
},
_ => (),
}
}
};
}
/// Returns `true` if the two spans come from differing expansions (i.e., one is
/// from a macro and one isn't).
#[must_use]
pub fn differing_macro_contexts(lhs: Span, rhs: Span) -> bool {
rhs.ctxt() != lhs.ctxt()
}
/// Returns `true` if the given `NodeId` is inside a constant context
///
/// # Example
///
/// ```rust,ignore
/// if in_constant(cx, expr.hir_id) {
/// // Do something
/// }
/// ```
pub fn in_constant(cx: &LateContext<'_>, id: HirId) -> bool {
let parent_id = cx.tcx.hir().get_parent_item(id);
match cx.tcx.hir().get(parent_id) {
Node::Item(&Item {
kind: ItemKind::Const(..) | ItemKind::Static(..),
..
})
| Node::TraitItem(&TraitItem {
kind: TraitItemKind::Const(..),
..
})
| Node::ImplItem(&ImplItem {
kind: ImplItemKind::Const(..),
..
})
| Node::AnonConst(_) => true,
Node::Item(&Item {
kind: ItemKind::Fn(ref sig, ..),
..
})
| Node::ImplItem(&ImplItem {
kind: ImplItemKind::Fn(ref sig, _),
..
}) => sig.header.constness == Constness::Const,
_ => false,
}
}
/// Returns `true` if this `span` was expanded by any macro.
#[must_use]
pub fn in_macro(span: Span) -> bool {
if span.from_expansion() {
!matches!(span.ctxt().outer_expn_data().kind, ExpnKind::Desugaring(..))
} else {
false
}
}
// If the snippet is empty, it's an attribute that was inserted during macro
// expansion and we want to ignore those, because they could come from external
// sources that the user has no control over.
// For some reason these attributes don't have any expansion info on them, so
// we have to check it this way until there is a better way.
pub fn is_present_in_source<T: LintContext>(cx: &T, span: Span) -> bool {
if let Some(snippet) = snippet_opt(cx, span) {
if snippet.is_empty() {
return false;
}
}
true
}
/// Checks if given pattern is a wildcard (`_`)
pub fn is_wild<'tcx>(pat: &impl std::ops::Deref<Target = Pat<'tcx>>) -> bool {
matches!(pat.kind, PatKind::Wild)
}
/// Checks if type is struct, enum or union type with the given def path.
///
/// If the type is a diagnostic item, use `is_type_diagnostic_item` instead.
/// If you change the signature, remember to update the internal lint `MatchTypeOnDiagItem`
pub fn match_type(cx: &LateContext<'_>, ty: Ty<'_>, path: &[&str]) -> bool {
match ty.kind() {
ty::Adt(adt, _) => match_def_path(cx, adt.did, path),
_ => false,
}
}
/// Checks if the type is equal to a diagnostic item
///
/// If you change the signature, remember to update the internal lint `MatchTypeOnDiagItem`
pub fn is_type_diagnostic_item(cx: &LateContext<'_>, ty: Ty<'_>, diag_item: Symbol) -> bool {
match ty.kind() {
ty::Adt(adt, _) => cx.tcx.is_diagnostic_item(diag_item, adt.did),
_ => false,
}
}
/// Checks if the type is equal to a lang item
pub fn is_type_lang_item(cx: &LateContext<'_>, ty: Ty<'_>, lang_item: hir::LangItem) -> bool {
match ty.kind() {
ty::Adt(adt, _) => cx.tcx.lang_items().require(lang_item).unwrap() == adt.did,
_ => false,
}
}
/// Checks if the method call given in `expr` belongs to the given trait.
pub fn match_trait_method(cx: &LateContext<'_>, expr: &Expr<'_>, path: &[&str]) -> bool {
let def_id = cx.typeck_results().type_dependent_def_id(expr.hir_id).unwrap();
let trt_id = cx.tcx.trait_of_item(def_id);
trt_id.map_or(false, |trt_id| match_def_path(cx, trt_id, path))
}
/// Checks if an expression references a variable of the given name.
pub fn match_var(expr: &Expr<'_>, var: Symbol) -> bool {
if let ExprKind::Path(QPath::Resolved(None, ref path)) = expr.kind {
if let [p] = path.segments {
return p.ident.name == var;
}
}
false
}
pub fn last_path_segment<'tcx>(path: &QPath<'tcx>) -> &'tcx PathSegment<'tcx> {
match *path {
QPath::Resolved(_, ref path) => path.segments.last().expect("A path must have at least one segment"),
QPath::TypeRelative(_, ref seg) => seg,
QPath::LangItem(..) => panic!("last_path_segment: lang item has no path segments"),
}
}
pub fn single_segment_path<'tcx>(path: &QPath<'tcx>) -> Option<&'tcx PathSegment<'tcx>> {
match *path {
QPath::Resolved(_, ref path) => path.segments.get(0),
QPath::TypeRelative(_, ref seg) => Some(seg),
QPath::LangItem(..) => None,
}
}
/// Matches a `QPath` against a slice of segment string literals.
///
/// There is also `match_path` if you are dealing with a `rustc_hir::Path` instead of a
/// `rustc_hir::QPath`.
///
/// # Examples
/// ```rust,ignore
/// match_qpath(path, &["std", "rt", "begin_unwind"])
/// ```
pub fn match_qpath(path: &QPath<'_>, segments: &[&str]) -> bool {
match *path {
QPath::Resolved(_, ref path) => match_path(path, segments),
QPath::TypeRelative(ref ty, ref segment) => match ty.kind {
TyKind::Path(ref inner_path) => {
if let [prefix @ .., end] = segments {
if match_qpath(inner_path, prefix) {
return segment.ident.name.as_str() == *end;
}
}
false
},
_ => false,
},
QPath::LangItem(..) => false,
}
}
/// Matches a `Path` against a slice of segment string literals.
///
/// There is also `match_qpath` if you are dealing with a `rustc_hir::QPath` instead of a
/// `rustc_hir::Path`.
///
/// # Examples
///
/// ```rust,ignore
/// if match_path(&trait_ref.path, &paths::HASH) {
/// // This is the `std::hash::Hash` trait.
/// }
///
/// if match_path(ty_path, &["rustc", "lint", "Lint"]) {
/// // This is a `rustc_middle::lint::Lint`.
/// }
/// ```
pub fn match_path(path: &Path<'_>, segments: &[&str]) -> bool {
path.segments
.iter()
.rev()
.zip(segments.iter().rev())
.all(|(a, b)| a.ident.name.as_str() == *b)
}
/// Matches a `Path` against a slice of segment string literals, e.g.
///
/// # Examples
/// ```rust,ignore
/// match_path_ast(path, &["std", "rt", "begin_unwind"])
/// ```
pub fn match_path_ast(path: &ast::Path, segments: &[&str]) -> bool {
path.segments
.iter()
.rev()
.zip(segments.iter().rev())
.all(|(a, b)| a.ident.name.as_str() == *b)
}
/// Gets the definition associated to a path.
pub fn path_to_res(cx: &LateContext<'_>, path: &[&str]) -> Option<def::Res> {
let crates = cx.tcx.crates();
let krate = crates
.iter()
.find(|&&krate| cx.tcx.crate_name(krate).as_str() == path[0]);
if let Some(krate) = krate {
let krate = DefId {
krate: *krate,
index: CRATE_DEF_INDEX,
};
let mut current_item = None;
let mut items = cx.tcx.item_children(krate);
let mut path_it = path.iter().skip(1).peekable();
loop {
let segment = match path_it.next() {
Some(segment) => segment,
None => return None,
};
// `get_def_path` seems to generate these empty segments for extern blocks.
// We can just ignore them.
if segment.is_empty() {
continue;
}
let result = SmallVec::<[_; 8]>::new();
for item in mem::replace(&mut items, cx.tcx.arena.alloc_slice(&result)).iter() {
if item.ident.name.as_str() == *segment {
if path_it.peek().is_none() {
return Some(item.res);
}
current_item = Some(item);
items = cx.tcx.item_children(item.res.def_id());
break;
}
}
// The segment isn't a child_item.
// Try to find it under an inherent impl.
if_chain! {
if path_it.peek().is_none();
if let Some(current_item) = current_item;
let item_def_id = current_item.res.def_id();
if cx.tcx.def_kind(item_def_id) == DefKind::Struct;
then {
// Bad `find_map` suggestion. See #4193.
#[allow(clippy::find_map)]
return cx.tcx.inherent_impls(item_def_id).iter()
.flat_map(|&impl_def_id| cx.tcx.item_children(impl_def_id))
.find(|item| item.ident.name.as_str() == *segment)
.map(|item| item.res);
}
}
}
} else {
None
}
}
pub fn qpath_res(cx: &LateContext<'_>, qpath: &hir::QPath<'_>, id: hir::HirId) -> Res {
match qpath {
hir::QPath::Resolved(_, path) => path.res,
hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => {
if cx.tcx.has_typeck_results(id.owner.to_def_id()) {
cx.tcx.typeck(id.owner).qpath_res(qpath, id)
} else {
Res::Err
}
},
}
}
/// Convenience function to get the `DefId` of a trait by path.
/// It could be a trait or trait alias.
pub fn get_trait_def_id(cx: &LateContext<'_>, path: &[&str]) -> Option<DefId> {
let res = match path_to_res(cx, path) {
Some(res) => res,
None => return None,
};
match res {
Res::Def(DefKind::Trait | DefKind::TraitAlias, trait_id) => Some(trait_id),
Res::Err => unreachable!("this trait resolution is impossible: {:?}", &path),
_ => None,
}
}
/// Checks whether a type implements a trait.
/// See also `get_trait_def_id`.
pub fn implements_trait<'tcx>(
cx: &LateContext<'tcx>,
ty: Ty<'tcx>,
trait_id: DefId,
ty_params: &[GenericArg<'tcx>],
) -> bool {
// Do not check on infer_types to avoid panic in evaluate_obligation.
if ty.has_infer_types() {
return false;
}
let ty = cx.tcx.erase_regions(ty);
if ty.has_escaping_bound_vars() {
return false;
}
let ty_params = cx.tcx.mk_substs(ty_params.iter());
cx.tcx.type_implements_trait((trait_id, ty, ty_params, cx.param_env))
}
/// Gets the `hir::TraitRef` of the trait the given method is implemented for.
///
/// Use this if you want to find the `TraitRef` of the `Add` trait in this example:
///
/// ```rust
/// struct Point(isize, isize);
///
/// impl std::ops::Add for Point {
/// type Output = Self;
///
/// fn add(self, other: Self) -> Self {
/// Point(0, 0)
/// }
/// }
/// ```
pub fn trait_ref_of_method<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId) -> Option<&'tcx TraitRef<'tcx>> {
// Get the implemented trait for the current function
let parent_impl = cx.tcx.hir().get_parent_item(hir_id);
if_chain! {
if parent_impl != hir::CRATE_HIR_ID;
if let hir::Node::Item(item) = cx.tcx.hir().get(parent_impl);
if let hir::ItemKind::Impl{ of_trait: trait_ref, .. } = &item.kind;
then { return trait_ref.as_ref(); }
}
None
}
/// Checks whether this type implements `Drop`.
pub fn has_drop<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
match ty.ty_adt_def() {
Some(def) => def.has_dtor(cx.tcx),
None => false,
}
}
/// Returns the method names and argument list of nested method call expressions that make up
/// `expr`. method/span lists are sorted with the most recent call first.
pub fn method_calls<'tcx>(
expr: &'tcx Expr<'tcx>,
max_depth: usize,
) -> (Vec<Symbol>, Vec<&'tcx [Expr<'tcx>]>, Vec<Span>) {
let mut method_names = Vec::with_capacity(max_depth);
let mut arg_lists = Vec::with_capacity(max_depth);
let mut spans = Vec::with_capacity(max_depth);
let mut current = expr;
for _ in 0..max_depth {
if let ExprKind::MethodCall(path, span, args, _) = ¤t.kind {
if args.iter().any(|e| e.span.from_expansion()) {
break;
}
method_names.push(path.ident.name);
arg_lists.push(&**args);
spans.push(*span);
current = &args[0];
} else {
break;
}
}
(method_names, arg_lists, spans)
}
/// Matches an `Expr` against a chain of methods, and return the matched `Expr`s.
///
/// For example, if `expr` represents the `.baz()` in `foo.bar().baz()`,
/// `method_chain_args(expr, &["bar", "baz"])` will return a `Vec`
/// containing the `Expr`s for
/// `.bar()` and `.baz()`
pub fn method_chain_args<'a>(expr: &'a Expr<'_>, methods: &[&str]) -> Option<Vec<&'a [Expr<'a>]>> {
let mut current = expr;
let mut matched = Vec::with_capacity(methods.len());
for method_name in methods.iter().rev() {
// method chains are stored last -> first
if let ExprKind::MethodCall(ref path, _, ref args, _) = current.kind {
if path.ident.name.as_str() == *method_name {
if args.iter().any(|e| e.span.from_expansion()) {
return None;
}
matched.push(&**args); // build up `matched` backwards
current = &args[0] // go to parent expression
} else {
return None;
}
} else {
return None;
}
}
// Reverse `matched` so that it is in the same order as `methods`.
matched.reverse();
Some(matched)
}
/// Returns `true` if the provided `def_id` is an entrypoint to a program.
pub fn is_entrypoint_fn(cx: &LateContext<'_>, def_id: DefId) -> bool {
cx.tcx
.entry_fn(LOCAL_CRATE)
.map_or(false, |(entry_fn_def_id, _)| def_id == entry_fn_def_id.to_def_id())
}
/// Returns `true` if the expression is in the program's `#[panic_handler]`.
pub fn is_in_panic_handler(cx: &LateContext<'_>, e: &Expr<'_>) -> bool {
let parent = cx.tcx.hir().get_parent_item(e.hir_id);
let def_id = cx.tcx.hir().local_def_id(parent).to_def_id();
Some(def_id) == cx.tcx.lang_items().panic_impl()
}
/// Gets the name of the item the expression is in, if available.
pub fn get_item_name(cx: &LateContext<'_>, expr: &Expr<'_>) -> Option<Symbol> {
let parent_id = cx.tcx.hir().get_parent_item(expr.hir_id);
match cx.tcx.hir().find(parent_id) {
Some(
Node::Item(Item { ident, .. })
| Node::TraitItem(TraitItem { ident, .. })
| Node::ImplItem(ImplItem { ident, .. }),
) => Some(ident.name),
_ => None,
}
}
/// Gets the name of a `Pat`, if any.
pub fn get_pat_name(pat: &Pat<'_>) -> Option<Symbol> {
match pat.kind {
PatKind::Binding(.., ref spname, _) => Some(spname.name),
PatKind::Path(ref qpath) => single_segment_path(qpath).map(|ps| ps.ident.name),
PatKind::Box(ref p) | PatKind::Ref(ref p, _) => get_pat_name(&*p),
_ => None,
}
}
struct ContainsName {
name: Symbol,
result: bool,
}
impl<'tcx> Visitor<'tcx> for ContainsName {
type Map = Map<'tcx>;
fn visit_name(&mut self, _: Span, name: Symbol) {
if self.name == name {
self.result = true;
}
}
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
}
/// Checks if an `Expr` contains a certain name.
pub fn contains_name(name: Symbol, expr: &Expr<'_>) -> bool {
let mut cn = ContainsName { name, result: false };
cn.visit_expr(expr);
cn.result
}
/// Returns `true` if `expr` contains a return expression
pub fn contains_return(expr: &hir::Expr<'_>) -> bool {
struct RetCallFinder {
found: bool,
}
impl<'tcx> hir::intravisit::Visitor<'tcx> for RetCallFinder {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
if self.found {
return;
}
if let hir::ExprKind::Ret(..) = &expr.kind {
self.found = true;
} else {
hir::intravisit::walk_expr(self, expr);
}
}
fn nested_visit_map(&mut self) -> hir::intravisit::NestedVisitorMap<Self::Map> {
hir::intravisit::NestedVisitorMap::None
}
}
let mut visitor = RetCallFinder { found: false };
visitor.visit_expr(expr);
visitor.found
}
/// Converts a span to a code snippet if available, otherwise use default.
///
/// This is useful if you want to provide suggestions for your lint or more generally, if you want
/// to convert a given `Span` to a `str`.
///
/// # Example
/// ```rust,ignore
/// snippet(cx, expr.span, "..")
/// ```
pub fn snippet<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet_opt(cx, span).map_or_else(|| Cow::Borrowed(default), From::from)
}
/// Same as `snippet`, but it adapts the applicability level by following rules:
///
/// - Applicability level `Unspecified` will never be changed.
/// - If the span is inside a macro, change the applicability level to `MaybeIncorrect`.
/// - If the default value is used and the applicability level is `MachineApplicable`, change it to
/// `HasPlaceholders`
pub fn snippet_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
applicability: &mut Applicability,
) -> Cow<'a, str> {
if *applicability != Applicability::Unspecified && span.from_expansion() {
*applicability = Applicability::MaybeIncorrect;
}
snippet_opt(cx, span).map_or_else(
|| {
if *applicability == Applicability::MachineApplicable {
*applicability = Applicability::HasPlaceholders;
}
Cow::Borrowed(default)
},
From::from,
)
}
/// Same as `snippet`, but should only be used when it's clear that the input span is
/// not a macro argument.
pub fn snippet_with_macro_callsite<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet(cx, span.source_callsite(), default)
}
/// Converts a span to a code snippet. Returns `None` if not available.
pub fn snippet_opt<T: LintContext>(cx: &T, span: Span) -> Option<String> {
cx.sess().source_map().span_to_snippet(span).ok()
}
/// Converts a span (from a block) to a code snippet if available, otherwise use default.
///
/// This trims the code of indentation, except for the first line. Use it for blocks or block-like
/// things which need to be printed as such.
///
/// The `indent_relative_to` arg can be used, to provide a span, where the indentation of the
/// resulting snippet of the given span.
///
/// # Example
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", None)
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// }
/// ```
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", Some(if_expr.span))
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// } // aligned with `if`
/// ```
/// Note that the first line of the snippet always has 0 indentation.
pub fn snippet_block<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let snip = snippet(cx, span, default);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_block`, but adapts the applicability level by the rules of
/// `snippet_with_applicability`.
pub fn snippet_block_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
applicability: &mut Applicability,
) -> Cow<'a, str> {
let snip = snippet_with_applicability(cx, span, default, applicability);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Returns a new Span that extends the original Span to the first non-whitespace char of the first
/// line.
///
/// ```rust,ignore
/// let x = ();
/// // ^^
/// // will be converted to
/// let x = ();
/// // ^^^^^^^^^^
/// ```
pub fn first_line_of_span<T: LintContext>(cx: &T, span: Span) -> Span {
first_char_in_first_line(cx, span).map_or(span, |first_char_pos| span.with_lo(first_char_pos))
}
fn first_char_in_first_line<T: LintContext>(cx: &T, span: Span) -> Option<BytePos> {
let line_span = line_span(cx, span);
snippet_opt(cx, line_span).and_then(|snip| {
snip.find(|c: char| !c.is_whitespace())
.map(|pos| line_span.lo() + BytePos::from_usize(pos))
})
}
/// Returns the indentation of the line of a span
///
/// ```rust,ignore
/// let x = ();
/// // ^^ -- will return 0
/// let x = ();
/// // ^^ -- will return 4
/// ```
pub fn indent_of<T: LintContext>(cx: &T, span: Span) -> Option<usize> {
snippet_opt(cx, line_span(cx, span)).and_then(|snip| snip.find(|c: char| !c.is_whitespace()))
}
/// Returns the positon just before rarrow
///
/// ```rust,ignore
/// fn into(self) -> () {}
/// ^
/// // in case of unformatted code
/// fn into2(self)-> () {}
/// ^
/// fn into3(self) -> () {}
/// ^
/// ```
#[allow(clippy::needless_pass_by_value)]
pub fn position_before_rarrow(s: String) -> Option<usize> {
s.rfind("->").map(|rpos| {
let mut rpos = rpos;
let chars: Vec<char> = s.chars().collect();
while rpos > 1 {
if let Some(c) = chars.get(rpos - 1) {
if c.is_whitespace() {
rpos -= 1;
continue;
}
}
break;
}
rpos
})
}
/// Extends the span to the beginning of the spans line, incl. whitespaces.
///
/// ```rust,ignore
/// let x = ();
/// // ^^
/// // will be converted to
/// let x = ();
/// // ^^^^^^^^^^^^^^
/// ```
fn line_span<T: LintContext>(cx: &T, span: Span) -> Span {
let span = original_sp(span, DUMMY_SP);
let source_map_and_line = cx.sess().source_map().lookup_line(span.lo()).unwrap();
let line_no = source_map_and_line.line;
let line_start = source_map_and_line.sf.lines[line_no];
Span::new(line_start, span.hi(), span.ctxt())
}
/// Like `snippet_block`, but add braces if the expr is not an `ExprKind::Block`.
/// Also takes an `Option<String>` which can be put inside the braces.
pub fn expr_block<'a, T: LintContext>(
cx: &T,
expr: &Expr<'_>,
option: Option<String>,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let code = snippet_block(cx, expr.span, default, indent_relative_to);
let string = option.unwrap_or_default();
if expr.span.from_expansion() {
Cow::Owned(format!("{{ {} }}", snippet_with_macro_callsite(cx, expr.span, default)))
} else if let ExprKind::Block(_, _) = expr.kind {
Cow::Owned(format!("{}{}", code, string))
} else if string.is_empty() {
Cow::Owned(format!("{{ {} }}", code))
} else {
Cow::Owned(format!("{{\n{};\n{}\n}}", code, string))
}
}
/// Reindent a multiline string with possibility of ignoring the first line.
#[allow(clippy::needless_pass_by_value)]
pub fn reindent_multiline(s: Cow<'_, str>, ignore_first: bool, indent: Option<usize>) -> Cow<'_, str> {
let s_space = reindent_multiline_inner(&s, ignore_first, indent, ' ');
let s_tab = reindent_multiline_inner(&s_space, ignore_first, indent, '\t');
reindent_multiline_inner(&s_tab, ignore_first, indent, ' ').into()
}
fn reindent_multiline_inner(s: &str, ignore_first: bool, indent: Option<usize>, ch: char) -> String {
let x = s
.lines()
.skip(ignore_first as usize)
.filter_map(|l| {
if l.is_empty() {
None
} else {
// ignore empty lines
Some(l.char_indices().find(|&(_, x)| x != ch).unwrap_or((l.len(), ch)).0)
}
})
.min()
.unwrap_or(0);
let indent = indent.unwrap_or(0);
s.lines()
.enumerate()
.map(|(i, l)| {
if (ignore_first && i == 0) || l.is_empty() {
l.to_owned()
} else if x > indent {
l.split_at(x - indent).1.to_owned()
} else {
" ".repeat(indent - x) + l
}
})
.collect::<Vec<String>>()
.join("\n")
}
/// Gets the parent expression, if any –- this is useful to constrain a lint.
pub fn get_parent_expr<'tcx>(cx: &LateContext<'tcx>, e: &Expr<'_>) -> Option<&'tcx Expr<'tcx>> {
let map = &cx.tcx.hir();
let hir_id = e.hir_id;
let parent_id = map.get_parent_node(hir_id);
if hir_id == parent_id {
return None;
}
map.find(parent_id).and_then(|node| {
if let Node::Expr(parent) = node {
Some(parent)
} else {
None
}
})
}
pub fn get_enclosing_block<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId) -> Option<&'tcx Block<'tcx>> {
let map = &cx.tcx.hir();
let enclosing_node = map
.get_enclosing_scope(hir_id)
.and_then(|enclosing_id| map.find(enclosing_id));
enclosing_node.and_then(|node| match node {
Node::Block(block) => Some(block),
Node::Item(&Item {
kind: ItemKind::Fn(_, _, eid),
..
})
| Node::ImplItem(&ImplItem {
kind: ImplItemKind::Fn(_, eid),
..
}) => match cx.tcx.hir().body(eid).value.kind {
ExprKind::Block(ref block, _) => Some(block),
_ => None,
},
_ => None,
})
}
/// Returns the base type for HIR references and pointers.
pub fn walk_ptrs_hir_ty<'tcx>(ty: &'tcx hir::Ty<'tcx>) -> &'tcx hir::Ty<'tcx> {
match ty.kind {
TyKind::Ptr(ref mut_ty) | TyKind::Rptr(_, ref mut_ty) => walk_ptrs_hir_ty(&mut_ty.ty),
_ => ty,
}
}
/// Returns the base type for references and raw pointers, and count reference
/// depth.
pub fn walk_ptrs_ty_depth(ty: Ty<'_>) -> (Ty<'_>, usize) {
fn inner(ty: Ty<'_>, depth: usize) -> (Ty<'_>, usize) {
match ty.kind() {
ty::Ref(_, ty, _) => inner(ty, depth + 1),
_ => (ty, depth),
}
}
inner(ty, 0)
}
/// Checks whether the given expression is a constant integer of the given value.
/// unlike `is_integer_literal`, this version does const folding
pub fn is_integer_const(cx: &LateContext<'_>, e: &Expr<'_>, value: u128) -> bool {
if is_integer_literal(e, value) {
return true;
}
let map = cx.tcx.hir();
let parent_item = map.get_parent_item(e.hir_id);
if let Some((Constant::Int(v), _)) = map
.maybe_body_owned_by(parent_item)
.and_then(|body_id| constant(cx, cx.tcx.typeck_body(body_id), e))
{
value == v
} else {
false
}
}
/// Checks whether the given expression is a constant literal of the given value.
pub fn is_integer_literal(expr: &Expr<'_>, value: u128) -> bool {
// FIXME: use constant folding
if let ExprKind::Lit(ref spanned) = expr.kind {
if let LitKind::Int(v, _) = spanned.node {
return v == value;
}
}
false
}
/// Returns `true` if the given `Expr` has been coerced before.
///
/// Examples of coercions can be found in the Nomicon at
/// <https://doc.rust-lang.org/nomicon/coercions.html>.
///
/// See `rustc_middle::ty::adjustment::Adjustment` and `rustc_typeck::check::coercion` for more
/// information on adjustments and coercions.
pub fn is_adjusted(cx: &LateContext<'_>, e: &Expr<'_>) -> bool {
cx.typeck_results().adjustments().get(e.hir_id).is_some()
}
/// Returns the pre-expansion span if is this comes from an expansion of the
/// macro `name`.
/// See also `is_direct_expn_of`.
#[must_use]
pub fn is_expn_of(mut span: Span, name: &str) -> Option<Span> {
loop {
if span.from_expansion() {
let data = span.ctxt().outer_expn_data();
let new_span = data.call_site;
if let ExpnKind::Macro(MacroKind::Bang, mac_name) = data.kind {
if mac_name.as_str() == name {
return Some(new_span);
}
}
span = new_span;
} else {
return None;
}
}
}
/// Returns the pre-expansion span if the span directly comes from an expansion
/// of the macro `name`.
/// The difference with `is_expn_of` is that in
/// ```rust,ignore
/// foo!(bar!(42));
/// ```
/// `42` is considered expanded from `foo!` and `bar!` by `is_expn_of` but only
/// `bar!` by
/// `is_direct_expn_of`.
#[must_use]
pub fn is_direct_expn_of(span: Span, name: &str) -> Option<Span> {
if span.from_expansion() {
let data = span.ctxt().outer_expn_data();
let new_span = data.call_site;
if let ExpnKind::Macro(MacroKind::Bang, mac_name) = data.kind {
if mac_name.as_str() == name {
return Some(new_span);
}
}
}
None
}
/// Convenience function to get the return type of a function.
pub fn return_ty<'tcx>(cx: &LateContext<'tcx>, fn_item: hir::HirId) -> Ty<'tcx> {
let fn_def_id = cx.tcx.hir().local_def_id(fn_item);
let ret_ty = cx.tcx.fn_sig(fn_def_id).output();
cx.tcx.erase_late_bound_regions(ret_ty)
}
/// Walks into `ty` and returns `true` if any inner type is the same as `other_ty`
pub fn contains_ty(ty: Ty<'_>, other_ty: Ty<'_>) -> bool {
ty.walk().any(|inner| match inner.unpack() {
GenericArgKind::Type(inner_ty) => ty::TyS::same_type(other_ty, inner_ty),
GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => false,
})
}
/// Returns `true` if the given type is an `unsafe` function.
pub fn type_is_unsafe_function<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
match ty.kind() {
ty::FnDef(..) | ty::FnPtr(_) => ty.fn_sig(cx.tcx).unsafety() == Unsafety::Unsafe,
_ => false,
}
}
pub fn is_copy<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
ty.is_copy_modulo_regions(cx.tcx.at(DUMMY_SP), cx.param_env)
}
/// Checks if an expression is constructing a tuple-like enum variant or struct
pub fn is_ctor_or_promotable_const_function(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
if let ExprKind::Call(ref fun, _) = expr.kind {
if let ExprKind::Path(ref qp) = fun.kind {
let res = cx.qpath_res(qp, fun.hir_id);
return match res {
def::Res::Def(DefKind::Variant | DefKind::Ctor(..), ..) => true,
def::Res::Def(_, def_id) => cx.tcx.is_promotable_const_fn(def_id),
_ => false,
};
}
}
false
}
/// Returns `true` if a pattern is refutable.
// TODO: should be implemented using rustc/mir_build/thir machinery
pub fn is_refutable(cx: &LateContext<'_>, pat: &Pat<'_>) -> bool {
fn is_enum_variant(cx: &LateContext<'_>, qpath: &QPath<'_>, id: HirId) -> bool {
matches!(
cx.qpath_res(qpath, id),
def::Res::Def(DefKind::Variant, ..) | Res::Def(DefKind::Ctor(def::CtorOf::Variant, _), _)
)
}
fn are_refutable<'a, I: Iterator<Item = &'a Pat<'a>>>(cx: &LateContext<'_>, mut i: I) -> bool {
i.any(|pat| is_refutable(cx, pat))
}
match pat.kind {
PatKind::Wild => false,
PatKind::Binding(_, _, _, pat) => pat.map_or(false, |pat| is_refutable(cx, pat)),
PatKind::Box(ref pat) | PatKind::Ref(ref pat, _) => is_refutable(cx, pat),
PatKind::Lit(..) | PatKind::Range(..) => true,
PatKind::Path(ref qpath) => is_enum_variant(cx, qpath, pat.hir_id),
PatKind::Or(ref pats) => {
// TODO: should be the honest check, that pats is exhaustive set
are_refutable(cx, pats.iter().map(|pat| &**pat))
},
PatKind::Tuple(ref pats, _) => are_refutable(cx, pats.iter().map(|pat| &**pat)),
PatKind::Struct(ref qpath, ref fields, _) => {
is_enum_variant(cx, qpath, pat.hir_id) || are_refutable(cx, fields.iter().map(|field| &*field.pat))
},
PatKind::TupleStruct(ref qpath, ref pats, _) => {
is_enum_variant(cx, qpath, pat.hir_id) || are_refutable(cx, pats.iter().map(|pat| &**pat))
},
PatKind::Slice(ref head, ref middle, ref tail) => {
match &cx.typeck_results().node_type(pat.hir_id).kind() {
ty::Slice(..) => {
// [..] is the only irrefutable slice pattern.
!head.is_empty() || middle.is_none() || !tail.is_empty()
},
ty::Array(..) => are_refutable(cx, head.iter().chain(middle).chain(tail.iter()).map(|pat| &**pat)),
_ => {
// unreachable!()
true
},
}
},
}
}
/// Checks for the `#[automatically_derived]` attribute all `#[derive]`d
/// implementations have.
pub fn is_automatically_derived(attrs: &[ast::Attribute]) -> bool {
attrs.iter().any(|attr| attr.has_name(rustc_sym::automatically_derived))
}
/// Remove blocks around an expression.
///
/// Ie. `x`, `{ x }` and `{{{{ x }}}}` all give `x`. `{ x; y }` and `{}` return
/// themselves.
pub fn remove_blocks<'tcx>(mut expr: &'tcx Expr<'tcx>) -> &'tcx Expr<'tcx> {
while let ExprKind::Block(ref block, ..) = expr.kind {
match (block.stmts.is_empty(), block.expr.as_ref()) {
(true, Some(e)) => expr = e,
_ => break,
}
}
expr
}
pub fn is_self(slf: &Param<'_>) -> bool {
if let PatKind::Binding(.., name, _) = slf.pat.kind {
name.name == kw::SelfLower
} else {
false
}
}
pub fn is_self_ty(slf: &hir::Ty<'_>) -> bool {
if_chain! {
if let TyKind::Path(ref qp) = slf.kind;
if let QPath::Resolved(None, ref path) = *qp;
if let Res::SelfTy(..) = path.res;
then {
return true
}
}
false
}
pub fn iter_input_pats<'tcx>(decl: &FnDecl<'_>, body: &'tcx Body<'_>) -> impl Iterator<Item = &'tcx Param<'tcx>> {
(0..decl.inputs.len()).map(move |i| &body.params[i])
}
/// Checks if a given expression is a match expression expanded from the `?`
/// operator or the `try` macro.
pub fn is_try<'tcx>(expr: &'tcx Expr<'tcx>) -> Option<&'tcx Expr<'tcx>> {
fn is_ok(arm: &Arm<'_>) -> bool {
if_chain! {
if let PatKind::TupleStruct(ref path, ref pat, None) = arm.pat.kind;
if match_qpath(path, &paths::RESULT_OK[1..]);
if let PatKind::Binding(_, hir_id, _, None) = pat[0].kind;
if let ExprKind::Path(QPath::Resolved(None, ref path)) = arm.body.kind;
if let Res::Local(lid) = path.res;
if lid == hir_id;
then {
return true;
}
}
false
}
fn is_err(arm: &Arm<'_>) -> bool {
if let PatKind::TupleStruct(ref path, _, _) = arm.pat.kind {
match_qpath(path, &paths::RESULT_ERR[1..])
} else {
false
}
}
if let ExprKind::Match(_, ref arms, ref source) = expr.kind {
// desugared from a `?` operator
if let MatchSource::TryDesugar = *source {
return Some(expr);
}
if_chain! {
if arms.len() == 2;
if arms[0].guard.is_none();
if arms[1].guard.is_none();
if (is_ok(&arms[0]) && is_err(&arms[1])) ||
(is_ok(&arms[1]) && is_err(&arms[0]));
then {
return Some(expr);
}
}
}
None
}
/// Returns `true` if the lint is allowed in the current context
///
/// Useful for skipping long running code when it's unnecessary
pub fn is_allowed(cx: &LateContext<'_>, lint: &'static Lint, id: HirId) -> bool {
cx.tcx.lint_level_at_node(lint, id).0 == Level::Allow
}
pub fn get_arg_name(pat: &Pat<'_>) -> Option<Symbol> {
match pat.kind {
PatKind::Binding(.., ident, None) => Some(ident.name),
PatKind::Ref(ref subpat, _) => get_arg_name(subpat),
_ => None,
}
}
pub fn int_bits(tcx: TyCtxt<'_>, ity: ast::IntTy) -> u64 {
Integer::from_attr(&tcx, attr::IntType::SignedInt(ity)).size().bits()
}
#[allow(clippy::cast_possible_wrap)]
/// Turn a constant int byte representation into an i128
pub fn sext(tcx: TyCtxt<'_>, u: u128, ity: ast::IntTy) -> i128 {
let amt = 128 - int_bits(tcx, ity);
((u as i128) << amt) >> amt
}
#[allow(clippy::cast_sign_loss)]
/// clip unused bytes
pub fn unsext(tcx: TyCtxt<'_>, u: i128, ity: ast::IntTy) -> u128 {
let amt = 128 - int_bits(tcx, ity);
((u as u128) << amt) >> amt
}
/// clip unused bytes
pub fn clip(tcx: TyCtxt<'_>, u: u128, ity: ast::UintTy) -> u128 {
let bits = Integer::from_attr(&tcx, attr::IntType::UnsignedInt(ity)).size().bits();
let amt = 128 - bits;
(u << amt) >> amt
}
/// Removes block comments from the given `Vec` of lines.
///
/// # Examples
///
/// ```rust,ignore
/// without_block_comments(vec!["/*", "foo", "*/"]);
/// // => vec![]
///
/// without_block_comments(vec!["bar", "/*", "foo", "*/"]);
/// // => vec!["bar"]
/// ```
pub fn without_block_comments(lines: Vec<&str>) -> Vec<&str> {
let mut without = vec![];
let mut nest_level = 0;
for line in lines {
if line.contains("/*") {
nest_level += 1;
continue;
} else if line.contains("*/") {
nest_level -= 1;
continue;
}
if nest_level == 0 {
without.push(line);
}
}
without
}
pub fn any_parent_is_automatically_derived(tcx: TyCtxt<'_>, node: HirId) -> bool {
let map = &tcx.hir();
let mut prev_enclosing_node = None;
let mut enclosing_node = node;
while Some(enclosing_node) != prev_enclosing_node {
if is_automatically_derived(map.attrs(enclosing_node)) {
return true;
}
prev_enclosing_node = Some(enclosing_node);
enclosing_node = map.get_parent_item(enclosing_node);
}
false
}
/// Returns true if ty has `iter` or `iter_mut` methods
pub fn has_iter_method(cx: &LateContext<'_>, probably_ref_ty: Ty<'_>) -> Option<&'static str> {
// FIXME: instead of this hard-coded list, we should check if `<adt>::iter`
// exists and has the desired signature. Unfortunately FnCtxt is not exported
// so we can't use its `lookup_method` method.
let into_iter_collections: [&[&str]; 13] = [
&paths::VEC,
&paths::OPTION,
&paths::RESULT,
&paths::BTREESET,
&paths::BTREEMAP,
&paths::VEC_DEQUE,
&paths::LINKED_LIST,
&paths::BINARY_HEAP,
&paths::HASHSET,
&paths::HASHMAP,
&paths::PATH_BUF,
&paths::PATH,
&paths::RECEIVER,
];
let ty_to_check = match probably_ref_ty.kind() {
ty::Ref(_, ty_to_check, _) => ty_to_check,
_ => probably_ref_ty,
};
let def_id = match ty_to_check.kind() {
ty::Array(..) => return Some("array"),
ty::Slice(..) => return Some("slice"),
ty::Adt(adt, _) => adt.did,
_ => return None,
};
for path in &into_iter_collections {
if match_def_path(cx, def_id, path) {
return Some(*path.last().unwrap());
}
}
None
}
/// Matches a function call with the given path and returns the arguments.
///
/// Usage:
///
/// ```rust,ignore
/// if let Some(args) = match_function_call(cx, cmp_max_call, &paths::CMP_MAX);
/// ```
pub fn match_function_call<'tcx>(
cx: &LateContext<'tcx>,
expr: &'tcx Expr<'_>,
path: &[&str],
) -> Option<&'tcx [Expr<'tcx>]> {
if_chain! {
if let ExprKind::Call(ref fun, ref args) = expr.kind;
if let ExprKind::Path(ref qpath) = fun.kind;
if let Some(fun_def_id) = cx.qpath_res(qpath, fun.hir_id).opt_def_id();
if match_def_path(cx, fun_def_id, path);
then {
return Some(&args)
}
};
None
}
/// Checks if `Ty` is normalizable. This function is useful
/// to avoid crashes on `layout_of`.
pub fn is_normalizable<'tcx>(cx: &LateContext<'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> bool {
cx.tcx.infer_ctxt().enter(|infcx| {
let cause = rustc_middle::traits::ObligationCause::dummy();
infcx.at(&cause, param_env).normalize(ty).is_ok()
})
}
pub fn match_def_path<'tcx>(cx: &LateContext<'tcx>, did: DefId, syms: &[&str]) -> bool {
// We have to convert `syms` to `&[Symbol]` here because rustc's `match_def_path`
// accepts only that. We should probably move to Symbols in Clippy as well.
let syms = syms.iter().map(|p| Symbol::intern(p)).collect::<Vec<Symbol>>();
cx.match_def_path(did, &syms)
}
pub fn match_panic_call<'tcx>(cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) -> Option<&'tcx [Expr<'tcx>]> {
match_function_call(cx, expr, &paths::BEGIN_PANIC)
.or_else(|| match_function_call(cx, expr, &paths::BEGIN_PANIC_FMT))
.or_else(|| match_function_call(cx, expr, &paths::PANIC_ANY))
.or_else(|| match_function_call(cx, expr, &paths::PANICKING_PANIC))
.or_else(|| match_function_call(cx, expr, &paths::PANICKING_PANIC_FMT))
.or_else(|| match_function_call(cx, expr, &paths::PANICKING_PANIC_STR))
}
pub fn match_panic_def_id(cx: &LateContext<'_>, did: DefId) -> bool {
match_def_path(cx, did, &paths::BEGIN_PANIC)
|| match_def_path(cx, did, &paths::BEGIN_PANIC_FMT)
|| match_def_path(cx, did, &paths::PANIC_ANY)
|| match_def_path(cx, did, &paths::PANICKING_PANIC)
|| match_def_path(cx, did, &paths::PANICKING_PANIC_FMT)
|| match_def_path(cx, did, &paths::PANICKING_PANIC_STR)
}
/// Returns the list of condition expressions and the list of blocks in a
/// sequence of `if/else`.
/// E.g., this returns `([a, b], [c, d, e])` for the expression
/// `if a { c } else if b { d } else { e }`.
pub fn if_sequence<'tcx>(
mut expr: &'tcx Expr<'tcx>,
) -> (SmallVec<[&'tcx Expr<'tcx>; 1]>, SmallVec<[&'tcx Block<'tcx>; 1]>) {
let mut conds = SmallVec::new();
let mut blocks: SmallVec<[&Block<'_>; 1]> = SmallVec::new();
while let Some((ref cond, ref then_expr, ref else_expr)) = higher::if_block(&expr) {
conds.push(&**cond);
if let ExprKind::Block(ref block, _) = then_expr.kind {
blocks.push(block);
} else {
panic!("ExprKind::If node is not an ExprKind::Block");
}
if let Some(ref else_expr) = *else_expr {
expr = else_expr;
} else {
break;
}
}
// final `else {..}`
if !blocks.is_empty() {
if let ExprKind::Block(ref block, _) = expr.kind {
blocks.push(&**block);
}
}
(conds, blocks)
}
pub fn parent_node_is_if_expr(expr: &Expr<'_>, cx: &LateContext<'_>) -> bool {
let map = cx.tcx.hir();
let parent_id = map.get_parent_node(expr.hir_id);
let parent_node = map.get(parent_id);
match parent_node {
Node::Expr(e) => higher::if_block(&e).is_some(),
Node::Arm(e) => higher::if_block(&e.body).is_some(),
_ => false,
}
}
// Finds the attribute with the given name, if any
pub fn attr_by_name<'a>(attrs: &'a [Attribute], name: &'_ str) -> Option<&'a Attribute> {
attrs
.iter()
.find(|attr| attr.ident().map_or(false, |ident| ident.as_str() == name))
}
// Finds the `#[must_use]` attribute, if any
pub fn must_use_attr(attrs: &[Attribute]) -> Option<&Attribute> {
attr_by_name(attrs, "must_use")
}
// Returns whether the type has #[must_use] attribute
pub fn is_must_use_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
match ty.kind() {
ty::Adt(ref adt, _) => must_use_attr(&cx.tcx.get_attrs(adt.did)).is_some(),
ty::Foreign(ref did) => must_use_attr(&cx.tcx.get_attrs(*did)).is_some(),
ty::Slice(ref ty)
| ty::Array(ref ty, _)
| ty::RawPtr(ty::TypeAndMut { ref ty, .. })
| ty::Ref(_, ref ty, _) => {
// for the Array case we don't need to care for the len == 0 case
// because we don't want to lint functions returning empty arrays
is_must_use_ty(cx, *ty)
},
ty::Tuple(ref substs) => substs.types().any(|ty| is_must_use_ty(cx, ty)),
ty::Opaque(ref def_id, _) => {
for (predicate, _) in cx.tcx.explicit_item_bounds(*def_id) {
if let ty::PredicateAtom::Trait(trait_predicate, _) = predicate.skip_binders() {
if must_use_attr(&cx.tcx.get_attrs(trait_predicate.trait_ref.def_id)).is_some() {
return true;
}
}
}
false
},
ty::Dynamic(binder, _) => {
for predicate in binder.iter() {
if let ty::ExistentialPredicate::Trait(ref trait_ref) = predicate.skip_binder() {
if must_use_attr(&cx.tcx.get_attrs(trait_ref.def_id)).is_some() {
return true;
}
}
}
false
},
_ => false,
}
}
// check if expr is calling method or function with #[must_use] attribute
pub fn is_must_use_func_call(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
let did = match expr.kind {
ExprKind::Call(ref path, _) => if_chain! {
if let ExprKind::Path(ref qpath) = path.kind;
if let def::Res::Def(_, did) = cx.qpath_res(qpath, path.hir_id);
then {
Some(did)
} else {
None
}
},
ExprKind::MethodCall(_, _, _, _) => cx.typeck_results().type_dependent_def_id(expr.hir_id),
_ => None,
};
did.map_or(false, |did| must_use_attr(&cx.tcx.get_attrs(did)).is_some())
}
pub fn is_no_std_crate(krate: &Crate<'_>) -> bool {
krate.item.attrs.iter().any(|attr| {
if let ast::AttrKind::Normal(ref attr, _) = attr.kind {
attr.path == symbol::sym::no_std
} else {
false
}
})
}
/// Check if parent of a hir node is a trait implementation block.
/// For example, `f` in
/// ```rust,ignore
/// impl Trait for S {
/// fn f() {}
/// }
/// ```
pub fn is_trait_impl_item(cx: &LateContext<'_>, hir_id: HirId) -> bool {
if let Some(Node::Item(item)) = cx.tcx.hir().find(cx.tcx.hir().get_parent_node(hir_id)) {
matches!(item.kind, ItemKind::Impl{ of_trait: Some(_), .. })
} else {
false
}
}
/// Check if it's even possible to satisfy the `where` clause for the item.
///
/// `trivial_bounds` feature allows functions with unsatisfiable bounds, for example:
///
/// ```ignore
/// fn foo() where i32: Iterator {
/// for _ in 2i32 {}
/// }
/// ```
pub fn fn_has_unsatisfiable_preds(cx: &LateContext<'_>, did: DefId) -> bool {
use rustc_trait_selection::traits;
let predicates =
cx.tcx
.predicates_of(did)
.predicates
.iter()
.filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None });
traits::impossible_predicates(
cx.tcx,
traits::elaborate_predicates(cx.tcx, predicates)
.map(|o| o.predicate)
.collect::<Vec<_>>(),
)
}
/// Returns the `DefId` of the callee if the given expression is a function or method call.
pub fn fn_def_id(cx: &LateContext<'_>, expr: &Expr<'_>) -> Option<DefId> {
match &expr.kind {
ExprKind::MethodCall(..) => cx.typeck_results().type_dependent_def_id(expr.hir_id),
ExprKind::Call(
Expr {
kind: ExprKind::Path(qpath),
..
},
..,
) => cx.typeck_results().qpath_res(qpath, expr.hir_id).opt_def_id(),
_ => None,
}
}
pub fn run_lints(cx: &LateContext<'_>, lints: &[&'static Lint], id: HirId) -> bool {
lints.iter().any(|lint| {
matches!(
cx.tcx.lint_level_at_node(lint, id),
(Level::Forbid | Level::Deny | Level::Warn, _)
)
})
}
/// Returns true iff the given type is a primitive (a bool or char, any integer or floating-point
/// number type, a str, or an array, slice, or tuple of those types).
pub fn is_recursively_primitive_type(ty: Ty<'_>) -> bool {
match ty.kind() {
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => true,
ty::Ref(_, inner, _) if *inner.kind() == ty::Str => true,
ty::Array(inner_type, _) | ty::Slice(inner_type) => is_recursively_primitive_type(inner_type),
ty::Tuple(inner_types) => inner_types.types().all(is_recursively_primitive_type),
_ => false,
}
}
/// Returns Option<String> where String is a textual representation of the type encapsulated in the
/// slice iff the given expression is a slice of primitives (as defined in the
/// `is_recursively_primitive_type` function) and None otherwise.
pub fn is_slice_of_primitives(cx: &LateContext<'_>, expr: &Expr<'_>) -> Option<String> {
let expr_type = cx.typeck_results().expr_ty_adjusted(expr);
let expr_kind = expr_type.kind();
let is_primitive = match expr_kind {
ty::Slice(element_type) => is_recursively_primitive_type(element_type),
ty::Ref(_, inner_ty, _) if matches!(inner_ty.kind(), &ty::Slice(_)) => {
if let ty::Slice(element_type) = inner_ty.kind() {
is_recursively_primitive_type(element_type)
} else {
unreachable!()
}
},
_ => false,
};
if is_primitive {
// if we have wrappers like Array, Slice or Tuple, print these
// and get the type enclosed in the slice ref
match expr_type.peel_refs().walk().nth(1).unwrap().expect_ty().kind() {
ty::Slice(..) => return Some("slice".into()),
ty::Array(..) => return Some("array".into()),
ty::Tuple(..) => return Some("tuple".into()),
_ => {
// is_recursively_primitive_type() should have taken care
// of the rest and we can rely on the type that is found
let refs_peeled = expr_type.peel_refs();
return Some(refs_peeled.walk().last().unwrap().to_string());
},
}
}
None
}
/// returns list of all pairs (a, b) from `exprs` such that `eq(a, b)`
/// `hash` must be comformed with `eq`
pub fn search_same<T, Hash, Eq>(exprs: &[T], hash: Hash, eq: Eq) -> Vec<(&T, &T)>
where
Hash: Fn(&T) -> u64,
Eq: Fn(&T, &T) -> bool,
{
if exprs.len() == 2 && eq(&exprs[0], &exprs[1]) {
return vec![(&exprs[0], &exprs[1])];
}
let mut match_expr_list: Vec<(&T, &T)> = Vec::new();
let mut map: FxHashMap<_, Vec<&_>> =
FxHashMap::with_capacity_and_hasher(exprs.len(), BuildHasherDefault::default());
for expr in exprs {
match map.entry(hash(expr)) {
Entry::Occupied(mut o) => {
for o in o.get() {
if eq(o, expr) {
match_expr_list.push((o, expr));
}
}
o.get_mut().push(expr);
},
Entry::Vacant(v) => {
v.insert(vec![expr]);
},
}
}
match_expr_list
}
#[macro_export]
macro_rules! unwrap_cargo_metadata {
($cx: ident, $lint: ident, $deps: expr) => {{
let mut command = cargo_metadata::MetadataCommand::new();
if !$deps {
command.no_deps();
}
match command.exec() {
Ok(metadata) => metadata,
Err(err) => {
span_lint($cx, $lint, DUMMY_SP, &format!("could not read cargo metadata: {}", err));
return;
},
}
}};
}
#[cfg(test)]
mod test {
use super::{reindent_multiline, without_block_comments};
#[test]
fn test_reindent_multiline_single_line() {
assert_eq!("", reindent_multiline("".into(), false, None));
assert_eq!("...", reindent_multiline("...".into(), false, None));
assert_eq!("...", reindent_multiline(" ...".into(), false, None));
assert_eq!("...", reindent_multiline("\t...".into(), false, None));
assert_eq!("...", reindent_multiline("\t\t...".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_block() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
assert_eq!("\
if x {
\ty
} else {
\tz
}", reindent_multiline(" if x {
\ty
} else {
\tz
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_empty_line() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_lines_deeper() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline("\
if x {
y
} else {
z
}".into(), true, Some(8)));
}
#[test]
fn test_without_block_comments_lines_without_block_comments() {
let result = without_block_comments(vec!["/*", "", "*/"]);
println!("result: {:?}", result);
assert!(result.is_empty());
let result = without_block_comments(vec!["", "/*", "", "*/", "#[crate_type = \"lib\"]", "/*", "", "*/", ""]);
assert_eq!(result, vec!["", "#[crate_type = \"lib\"]", ""]);
let result = without_block_comments(vec!["/* rust", "", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* one-line comment */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested", "/* multi-line", "comment", "*/", "test", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested /* inline /* comment */ test */ */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["foo", "bar", "baz"]);
assert_eq!(result, vec!["foo", "bar", "baz"]);
}
}
|
#![cfg(feature = "compiletest")]
use compiletest_rs as compiletest;
#[test]
fn ui() {
let config = compiletest::Config {
mode: compiletest::common::Mode::Ui,
src_base: std::path::PathBuf::from("tests/ui"),
target_rustcflags: Some(String::from(
"\
--edition=2018 \
-L deps/target/debug/deps \
-Z unstable-options \
--extern serde_derive \
",
)),
..Default::default()
};
compiletest::run_tests(&config);
}
Make compiletest setup consistent with serde_json
#![cfg(feature = "compiletest")]
use compiletest_rs as compiletest;
#[test]
fn ui() {
compiletest::run_tests(&compiletest::Config {
mode: compiletest::common::Mode::Ui,
src_base: std::path::PathBuf::from("tests/ui"),
target_rustcflags: Some(String::from(
"\
--edition=2018 \
-L deps/target/debug/deps \
-Z unstable-options \
--extern serde_derive \
",
)),
..Default::default()
});
}
|
pub use crate::passes::BoxedResolver;
use crate::util;
use rustc_ast::token;
use rustc_ast::{self as ast, MetaItemKind};
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::OnDrop;
use rustc_errors::registry::Registry;
use rustc_errors::{ErrorReported, Handler};
use rustc_lint::LintStore;
use rustc_middle::ty;
use rustc_parse::new_parser_from_source_str;
use rustc_session::config::{self, ErrorOutputType, Input, OutputFilenames};
use rustc_session::early_error;
use rustc_session::lint;
use rustc_session::parse::{CrateConfig, ParseSess};
use rustc_session::{DiagnosticOutput, Session};
use rustc_span::source_map::{FileLoader, FileName};
use std::path::PathBuf;
use std::result;
use std::sync::{Arc, Mutex};
pub type Result<T> = result::Result<T, ErrorReported>;
/// Represents a compiler session.
///
/// Can be used to run `rustc_interface` queries.
/// Created by passing [`Config`] to [`run_compiler`].
pub struct Compiler {
pub(crate) sess: Lrc<Session>,
codegen_backend: Lrc<Box<dyn CodegenBackend>>,
pub(crate) input: Input,
pub(crate) input_path: Option<PathBuf>,
pub(crate) output_dir: Option<PathBuf>,
pub(crate) output_file: Option<PathBuf>,
pub(crate) register_lints: Option<Box<dyn Fn(&Session, &mut LintStore) + Send + Sync>>,
pub(crate) override_queries:
Option<fn(&Session, &mut ty::query::Providers, &mut ty::query::Providers)>,
}
impl Compiler {
pub fn session(&self) -> &Lrc<Session> {
&self.sess
}
pub fn codegen_backend(&self) -> &Lrc<Box<dyn CodegenBackend>> {
&self.codegen_backend
}
pub fn input(&self) -> &Input {
&self.input
}
pub fn output_dir(&self) -> &Option<PathBuf> {
&self.output_dir
}
pub fn output_file(&self) -> &Option<PathBuf> {
&self.output_file
}
pub fn register_lints(&self) -> &Option<Box<dyn Fn(&Session, &mut LintStore) + Send + Sync>> {
&self.register_lints
}
pub fn build_output_filenames(
&self,
sess: &Session,
attrs: &[ast::Attribute],
) -> OutputFilenames {
util::build_output_filenames(
&self.input,
&self.output_dir,
&self.output_file,
&attrs,
&sess,
)
}
}
/// Converts strings provided as `--cfg [cfgspec]` into a `crate_cfg`.
pub fn parse_cfgspecs(cfgspecs: Vec<String>) -> FxHashSet<(String, Option<String>)> {
rustc_span::with_default_session_globals(move || {
let cfg = cfgspecs
.into_iter()
.map(|s| {
let sess = ParseSess::with_silent_emitter();
let filename = FileName::cfg_spec_source_code(&s);
let mut parser = new_parser_from_source_str(&sess, filename, s.to_string());
macro_rules! error {
($reason: expr) => {
early_error(
ErrorOutputType::default(),
&format!(concat!("invalid `--cfg` argument: `{}` (", $reason, ")"), s),
);
};
}
match &mut parser.parse_meta_item() {
Ok(meta_item) if parser.token == token::Eof => {
if meta_item.path.segments.len() != 1 {
error!("argument key must be an identifier");
}
match &meta_item.kind {
MetaItemKind::List(..) => {
error!(r#"expected `key` or `key="value"`"#);
}
MetaItemKind::NameValue(lit) if !lit.kind.is_str() => {
error!("argument value must be a string");
}
MetaItemKind::NameValue(..) | MetaItemKind::Word => {
let ident = meta_item.ident().expect("multi-segment cfg key");
return (ident.name, meta_item.value_str());
}
}
}
Ok(..) => {}
Err(err) => err.cancel(),
}
error!(r#"expected `key` or `key="value"`"#);
})
.collect::<CrateConfig>();
cfg.into_iter().map(|(a, b)| (a.to_string(), b.map(|b| b.to_string()))).collect()
})
}
/// The compiler configuration
pub struct Config {
/// Command line options
pub opts: config::Options,
/// cfg! configuration in addition to the default ones
pub crate_cfg: FxHashSet<(String, Option<String>)>,
pub input: Input,
pub input_path: Option<PathBuf>,
pub output_dir: Option<PathBuf>,
pub output_file: Option<PathBuf>,
pub file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
pub diagnostic_output: DiagnosticOutput,
/// Set to capture stderr output during compiler execution
pub stderr: Option<Arc<Mutex<Vec<u8>>>>,
pub lint_caps: FxHashMap<lint::LintId, lint::Level>,
/// This is a callback from the driver that is called when [`ParseSess`] is created.
pub parse_sess_created: Option<Box<dyn FnOnce(&mut ParseSess) + Send>>,
/// This is a callback from the driver that is called when we're registering lints;
/// it is called during plugin registration when we have the LintStore in a non-shared state.
///
/// Note that if you find a Some here you probably want to call that function in the new
/// function being registered.
pub register_lints: Option<Box<dyn Fn(&Session, &mut LintStore) + Send + Sync>>,
/// This is a callback from the driver that is called just after we have populated
/// the list of queries.
///
/// The second parameter is local providers and the third parameter is external providers.
pub override_queries:
Option<fn(&Session, &mut ty::query::Providers, &mut ty::query::Providers)>,
/// This is a callback from the driver that is called to create a codegen backend.
pub make_codegen_backend:
Option<Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>>,
/// Registry of diagnostics codes.
pub registry: Registry,
}
pub fn create_compiler_and_run<R>(config: Config, f: impl FnOnce(&Compiler) -> R) -> R {
let registry = &config.registry;
let (mut sess, codegen_backend) = util::create_session(
config.opts,
config.crate_cfg,
config.diagnostic_output,
config.file_loader,
config.input_path.clone(),
config.lint_caps,
config.make_codegen_backend,
registry.clone(),
);
if let Some(parse_sess_created) = config.parse_sess_created {
parse_sess_created(&mut Lrc::get_mut(&mut sess).unwrap().parse_sess);
}
let compiler = Compiler {
sess,
codegen_backend,
input: config.input,
input_path: config.input_path,
output_dir: config.output_dir,
output_file: config.output_file,
register_lints: config.register_lints,
override_queries: config.override_queries,
};
rustc_span::with_source_map(compiler.sess.parse_sess.clone_source_map(), move || {
let r = {
let _sess_abort_error = OnDrop(|| {
compiler.sess.finish_diagnostics(registry);
});
f(&compiler)
};
let prof = compiler.sess.prof.clone();
prof.generic_activity("drop_compiler").run(move || drop(compiler));
r
})
}
pub fn run_compiler<R: Send>(mut config: Config, f: impl FnOnce(&Compiler) -> R + Send) -> R {
tracing::trace!("run_compiler");
let stderr = config.stderr.take();
util::setup_callbacks_and_run_in_thread_pool_with_globals(
config.opts.edition,
config.opts.debugging_opts.threads,
&stderr,
|| create_compiler_and_run(config, f),
)
}
pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
eprintln!("query stack during panic:");
// Be careful relying on global state here: this code is called from
// a panic hook, which means that the global `Handler` may be in a weird
// state if it was responsible for triggering the panic.
let i = ty::tls::with_context_opt(|icx| {
if let Some(icx) = icx {
icx.tcx.queries.try_print_query_stack(icx.tcx, icx.query, handler, num_frames)
} else {
0
}
});
if num_frames == None || num_frames >= Some(i) {
eprintln!("end of query stack");
} else {
eprintln!("we're just showing a limited slice of the query stack");
}
}
Change the `.unwrap` to `.expect` with a helpful message
pub use crate::passes::BoxedResolver;
use crate::util;
use rustc_ast::token;
use rustc_ast::{self as ast, MetaItemKind};
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::OnDrop;
use rustc_errors::registry::Registry;
use rustc_errors::{ErrorReported, Handler};
use rustc_lint::LintStore;
use rustc_middle::ty;
use rustc_parse::new_parser_from_source_str;
use rustc_session::config::{self, ErrorOutputType, Input, OutputFilenames};
use rustc_session::early_error;
use rustc_session::lint;
use rustc_session::parse::{CrateConfig, ParseSess};
use rustc_session::{DiagnosticOutput, Session};
use rustc_span::source_map::{FileLoader, FileName};
use std::path::PathBuf;
use std::result;
use std::sync::{Arc, Mutex};
pub type Result<T> = result::Result<T, ErrorReported>;
/// Represents a compiler session.
///
/// Can be used to run `rustc_interface` queries.
/// Created by passing [`Config`] to [`run_compiler`].
pub struct Compiler {
pub(crate) sess: Lrc<Session>,
codegen_backend: Lrc<Box<dyn CodegenBackend>>,
pub(crate) input: Input,
pub(crate) input_path: Option<PathBuf>,
pub(crate) output_dir: Option<PathBuf>,
pub(crate) output_file: Option<PathBuf>,
pub(crate) register_lints: Option<Box<dyn Fn(&Session, &mut LintStore) + Send + Sync>>,
pub(crate) override_queries:
Option<fn(&Session, &mut ty::query::Providers, &mut ty::query::Providers)>,
}
impl Compiler {
pub fn session(&self) -> &Lrc<Session> {
&self.sess
}
pub fn codegen_backend(&self) -> &Lrc<Box<dyn CodegenBackend>> {
&self.codegen_backend
}
pub fn input(&self) -> &Input {
&self.input
}
pub fn output_dir(&self) -> &Option<PathBuf> {
&self.output_dir
}
pub fn output_file(&self) -> &Option<PathBuf> {
&self.output_file
}
pub fn register_lints(&self) -> &Option<Box<dyn Fn(&Session, &mut LintStore) + Send + Sync>> {
&self.register_lints
}
pub fn build_output_filenames(
&self,
sess: &Session,
attrs: &[ast::Attribute],
) -> OutputFilenames {
util::build_output_filenames(
&self.input,
&self.output_dir,
&self.output_file,
&attrs,
&sess,
)
}
}
/// Converts strings provided as `--cfg [cfgspec]` into a `crate_cfg`.
pub fn parse_cfgspecs(cfgspecs: Vec<String>) -> FxHashSet<(String, Option<String>)> {
rustc_span::with_default_session_globals(move || {
let cfg = cfgspecs
.into_iter()
.map(|s| {
let sess = ParseSess::with_silent_emitter();
let filename = FileName::cfg_spec_source_code(&s);
let mut parser = new_parser_from_source_str(&sess, filename, s.to_string());
macro_rules! error {
($reason: expr) => {
early_error(
ErrorOutputType::default(),
&format!(concat!("invalid `--cfg` argument: `{}` (", $reason, ")"), s),
);
};
}
match &mut parser.parse_meta_item() {
Ok(meta_item) if parser.token == token::Eof => {
if meta_item.path.segments.len() != 1 {
error!("argument key must be an identifier");
}
match &meta_item.kind {
MetaItemKind::List(..) => {
error!(r#"expected `key` or `key="value"`"#);
}
MetaItemKind::NameValue(lit) if !lit.kind.is_str() => {
error!("argument value must be a string");
}
MetaItemKind::NameValue(..) | MetaItemKind::Word => {
let ident = meta_item.ident().expect("multi-segment cfg key");
return (ident.name, meta_item.value_str());
}
}
}
Ok(..) => {}
Err(err) => err.cancel(),
}
error!(r#"expected `key` or `key="value"`"#);
})
.collect::<CrateConfig>();
cfg.into_iter().map(|(a, b)| (a.to_string(), b.map(|b| b.to_string()))).collect()
})
}
/// The compiler configuration
pub struct Config {
/// Command line options
pub opts: config::Options,
/// cfg! configuration in addition to the default ones
pub crate_cfg: FxHashSet<(String, Option<String>)>,
pub input: Input,
pub input_path: Option<PathBuf>,
pub output_dir: Option<PathBuf>,
pub output_file: Option<PathBuf>,
pub file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
pub diagnostic_output: DiagnosticOutput,
/// Set to capture stderr output during compiler execution
pub stderr: Option<Arc<Mutex<Vec<u8>>>>,
pub lint_caps: FxHashMap<lint::LintId, lint::Level>,
/// This is a callback from the driver that is called when [`ParseSess`] is created.
pub parse_sess_created: Option<Box<dyn FnOnce(&mut ParseSess) + Send>>,
/// This is a callback from the driver that is called when we're registering lints;
/// it is called during plugin registration when we have the LintStore in a non-shared state.
///
/// Note that if you find a Some here you probably want to call that function in the new
/// function being registered.
pub register_lints: Option<Box<dyn Fn(&Session, &mut LintStore) + Send + Sync>>,
/// This is a callback from the driver that is called just after we have populated
/// the list of queries.
///
/// The second parameter is local providers and the third parameter is external providers.
pub override_queries:
Option<fn(&Session, &mut ty::query::Providers, &mut ty::query::Providers)>,
/// This is a callback from the driver that is called to create a codegen backend.
pub make_codegen_backend:
Option<Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>>,
/// Registry of diagnostics codes.
pub registry: Registry,
}
pub fn create_compiler_and_run<R>(config: Config, f: impl FnOnce(&Compiler) -> R) -> R {
let registry = &config.registry;
let (mut sess, codegen_backend) = util::create_session(
config.opts,
config.crate_cfg,
config.diagnostic_output,
config.file_loader,
config.input_path.clone(),
config.lint_caps,
config.make_codegen_backend,
registry.clone(),
);
if let Some(parse_sess_created) = config.parse_sess_created {
parse_sess_created(
&mut Lrc::get_mut(&mut sess)
.expect("create_session() should never share the returned session")
.parse_sess,
);
}
let compiler = Compiler {
sess,
codegen_backend,
input: config.input,
input_path: config.input_path,
output_dir: config.output_dir,
output_file: config.output_file,
register_lints: config.register_lints,
override_queries: config.override_queries,
};
rustc_span::with_source_map(compiler.sess.parse_sess.clone_source_map(), move || {
let r = {
let _sess_abort_error = OnDrop(|| {
compiler.sess.finish_diagnostics(registry);
});
f(&compiler)
};
let prof = compiler.sess.prof.clone();
prof.generic_activity("drop_compiler").run(move || drop(compiler));
r
})
}
pub fn run_compiler<R: Send>(mut config: Config, f: impl FnOnce(&Compiler) -> R + Send) -> R {
tracing::trace!("run_compiler");
let stderr = config.stderr.take();
util::setup_callbacks_and_run_in_thread_pool_with_globals(
config.opts.edition,
config.opts.debugging_opts.threads,
&stderr,
|| create_compiler_and_run(config, f),
)
}
pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
eprintln!("query stack during panic:");
// Be careful relying on global state here: this code is called from
// a panic hook, which means that the global `Handler` may be in a weird
// state if it was responsible for triggering the panic.
let i = ty::tls::with_context_opt(|icx| {
if let Some(icx) = icx {
icx.tcx.queries.try_print_query_stack(icx.tcx, icx.query, handler, num_frames)
} else {
0
}
});
if num_frames == None || num_frames >= Some(i) {
eprintln!("end of query stack");
} else {
eprintln!("we're just showing a limited slice of the query stack");
}
}
|
use crate::command::studio;
use clap::{App,
AppSettings,
Arg,
ArgMatches};
use habitat_common::{cli::{file_into_idents,
is_toml_file,
BINLINK_DIR_ENVVAR,
DEFAULT_BINLINK_DIR,
PACKAGE_TARGET_ENVVAR,
RING_ENVVAR,
RING_KEY_ENVVAR},
types::{AutomateAuthToken,
EventStreamConnectMethod,
EventStreamMetadata,
EventStreamServerCertificate,
GossipListenAddr,
HttpListenAddr,
ListenCtlAddr},
FeatureFlag};
use habitat_core::{crypto::{keys::PairType,
CACHE_KEY_PATH_ENV_VAR},
env::Config,
fs::CACHE_KEY_PATH,
os::process::ShutdownTimeout,
package::{ident,
Identifiable,
PackageIdent,
PackageTarget},
service::{HealthCheckInterval,
ServiceGroup},
ChannelIdent};
use habitat_sup_protocol;
use rants::Address as NatsAddress;
use std::{net::{Ipv4Addr,
SocketAddr},
path::Path,
result,
str::FromStr};
use url::Url;
pub fn get(feature_flags: FeatureFlag) -> App<'static, 'static> {
let alias_apply = sub_config_apply().about("Alias for 'config apply'")
.aliases(&["ap", "app", "appl"])
.setting(AppSettings::Hidden);
let alias_install =
sub_pkg_install(feature_flags).about("Alias for 'pkg install'")
.aliases(&["i", "in", "ins", "inst", "insta", "instal"])
.setting(AppSettings::Hidden);
let alias_setup = sub_cli_setup().about("Alias for 'cli setup'")
.aliases(&["set", "setu"])
.setting(AppSettings::Hidden);
let alias_start = sub_svc_start().about("Alias for 'svc start'")
.aliases(&["sta", "star"])
.setting(AppSettings::Hidden);
let alias_stop = sub_svc_stop().about("Alias for 'svc stop'")
.aliases(&["sto"])
.setting(AppSettings::Hidden);
clap_app!(hab =>
(about: "\"A Habitat is the natural environment for your services\" - Alan Turing")
(version: super::VERSION)
(author: "\nAuthors: The Habitat Maintainers <humans@habitat.sh>\n")
(@setting GlobalVersion)
(@setting ArgRequiredElseHelp)
(@subcommand license =>
(about: "Commands relating to Habitat license agreements")
(@setting ArgRequiredElseHelp)
(@subcommand accept =>
(about: "Accept the Chef Binary Distribution Agreement without prompting"))
)
(@subcommand cli =>
(about: "Commands relating to Habitat runtime config")
(aliases: &["cl"])
(@setting ArgRequiredElseHelp)
(subcommand: sub_cli_setup().aliases(&["s", "se", "set", "setu"]))
(subcommand: sub_cli_completers().aliases(&["c", "co", "com", "comp"]))
)
(@subcommand config =>
(about: "Commands relating to a Service's runtime config")
(aliases: &["co", "con", "conf", "confi"])
(@setting ArgRequiredElseHelp)
(subcommand: sub_config_apply().aliases(&["ap", "app", "appl"]))
(@subcommand show =>
(about: "Displays the default configuration options for a service")
(aliases: &["sh", "sho"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
)
)
(@subcommand file =>
(about: "Commands relating to Habitat files")
(aliases: &["f", "fi", "fil"])
(@setting ArgRequiredElseHelp)
(@subcommand upload =>
(about: "Uploads a file to be shared between members of a Service Group")
(aliases: &["u", "up", "upl", "uplo", "uploa"])
(@arg SERVICE_GROUP: +required +takes_value {valid_service_group}
"Target service group service.group[@organization] (ex: redis.default or foo.default@bazcorp)")
(@arg VERSION_NUMBER: +required
"A version number (positive integer) for this configuration (ex: 42)")
(@arg FILE: +required {file_exists} "Path to local file on disk")
(@arg USER: -u --user +takes_value "Name of the user key")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
(arg: arg_cache_key_path("Path to search for encryption keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
)
(@subcommand bldr =>
(about: "Commands relating to Habitat Builder")
(aliases: &["b", "bl", "bld"])
(@setting ArgRequiredElseHelp)
(@subcommand job =>
(about: "Commands relating to Habitat Builder jobs")
(aliases: &["j", "jo"])
(@setting ArgRequiredElseHelp)
(@subcommand start =>
(about: "Schedule a build job or group of jobs")
(aliases: &["s", "st", "sta", "star"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"The origin and name of the package to schedule a job for (eg: core/redis)")
(arg: arg_target())
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the cli.toml or HAB_BLDR_URL environment variable if defined. \
(default: https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg GROUP: -g --group "Schedule jobs for this package and all of its reverse \
dependencies")
)
(@subcommand cancel =>
(about: "Cancel a build job group and any in-progress builds")
(aliases: &["c", "ca", "can", "cance", "cancel"])
(@arg GROUP_ID: +required +takes_value
"The job group id that was returned from \"hab bldr job start\" \
(ex: 771100000000000000)")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg FORCE: -f --force
"Don't prompt for confirmation")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand promote =>
(about: "Promote packages from a completed build job to a specified channel")
(aliases: &["p", "pr", "pro", "prom", "promo", "promot"])
(@arg GROUP_ID: +required +takes_value
"The job id that was returned from \"hab bldr job start\" \
(ex: 771100000000000000)")
(@arg CHANNEL: +takes_value +required "The target channel name")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"Limit the promotable packages to the specified origin")
(@arg INTERACTIVE: -i --interactive
"Allow editing the list of promotable packages")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand demote =>
(about: "Demote packages from a completed build job from a specified channel")
(aliases: &["d", "de", "dem", "demo", "demot"])
(@arg GROUP_ID: +required +takes_value
"The job id that was returned from \"hab bldr start\" \
(ex: 771100000000000000)")
(@arg CHANNEL: +takes_value +required "The name of the channel to demote from")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"Limit the demotable packages to the specified origin")
(@arg INTERACTIVE: -i --interactive
"Allow editing the list of demotable packages")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand status =>
(about: "Get the status of one or more job groups")
(aliases: &["stat", "statu"])
(@group status =>
(@attributes +required)
(@arg GROUP_ID: +takes_value
"The group id that was returned from \"hab bldr job start\" \
(ex: 771100000000000000)")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"Show the status of recent job groups created in this origin \
(default: 10 most recent)")
)
(@arg LIMIT: -l --limit +takes_value {valid_numeric::<usize>}
"Limit how many job groups to retrieve, ordered by most recent \
(default: 10)")
(@arg SHOW_JOBS: -s --showjobs
"Show the status of all build jobs for a retrieved job group")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
)
)
(@subcommand channel =>
(about: "Commands relating to Habitat Builder channels")
(aliases: &["c", "ch", "cha", "chan", "chann", "channe"])
(@setting ArgRequiredElseHelp)
(@subcommand promote =>
(about: "Atomically promotes all packages in channel")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg ORIGIN: -o --origin +required +takes_value {valid_origin}
"The origin for the channels. Default is from \
'HAB_ORIGIN' or cli.toml")
(@arg SOURCE_CHANNEL: +required +takes_value
"The channel from which all packages will be selected for promotion")
(@arg TARGET_CHANNEL: +required +takes_value
"The channel to which packages will be promoted")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand demote =>
(about: "Atomically demotes selected packages in a target channel")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg ORIGIN: -o --origin +required +takes_value {valid_origin}
"The origin for the channels. Default is from \
'HAB_ORIGIN' or cli.toml")
(@arg SOURCE_CHANNEL: +required +takes_value
"The channel from which all packages will be selected for demotion")
(@arg TARGET_CHANNEL: +required +takes_value
"The channel selected packages will be removed from")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand create =>
(about: "Creates a new channel")
(aliases: &["c", "cr", "cre", "crea", "creat"])
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg CHANNEL: +required + takes_value "The channel name")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"Sets the origin to which the channel will belong. Default is from \
'HAB_ORIGIN' or cli.toml")
)
(@subcommand destroy =>
(about: "Destroys a channel")
(aliases: &["d", "de", "des", "dest", "destr", "destro"])
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg CHANNEL: +required + takes_value "The channel name")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"Sets the origin to which the channel belongs. Default is from 'HAB_ORIGIN'\
or cli.toml")
)
(@subcommand list =>
(about: "Lists origin channels")
(aliases: &["l", "li", "lis"])
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg ORIGIN: +takes_value {valid_origin}
"The origin for which channels will be listed. Default is from 'HAB_ORIGIN'\
or cli.toml")
)
)
)
(@subcommand origin =>
(about: "Commands relating to Habitat Builder origins")
(aliases: &["o", "or", "ori", "orig", "origi"])
(@setting ArgRequiredElseHelp)
(@subcommand create =>
(about: "Creates a new Builder origin")
(aliases: &["cre", "crea"])
(@arg ORIGIN: +required {valid_origin} "The origin to be created")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the `HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand delete =>
(about: "Removes an unused/empty origin")
(aliases: &["del", "dele"])
(@arg ORIGIN: +required {valid_origin} "The origin name")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the `HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand transfer =>
(about: "Transfers ownership of an origin to another member of that origin")
(@arg ORIGIN: +required {valid_origin} "The origin name")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the `HAB_BLDR_URL` environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg NEW_OWNER_ACCOUNT: +required +takes_value {non_empty} "The account name of the new origin owner")
)
(@subcommand key =>
(about: "Commands relating to Habitat origin key maintenance")
(aliases: &["k", "ke"])
(@setting ArgRequiredElseHelp)
(@subcommand download =>
(about: "Download origin key(s)")
(aliases: &["d", "do", "dow", "down", "downl", "downlo", "downloa"])
(arg: arg_cache_key_path("Path to download origin keys to. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
(@arg ORIGIN: +required {valid_origin} "The origin name" )
(@arg REVISION: "The origin key revision")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg WITH_SECRET: -s --secret
"Download origin private key instead of origin public key")
(@arg WITH_ENCRYPTION: -e --encryption
"Download public encryption key instead of origin public key")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder \
(required for downloading origin private keys)")
)
(@subcommand export =>
(about: "Outputs the latest origin key contents to stdout")
(aliases: &["e", "ex", "exp", "expo", "expor"])
(@arg ORIGIN: +required +takes_value {valid_origin})
(@arg PAIR_TYPE: -t --type +takes_value {valid_pair_type}
"Export either the 'public' or 'secret' key. The 'secret' key is the origin private key")
(arg: arg_cache_key_path("Path to export origin keys from. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand generate =>
(about: "Generates a Habitat origin key pair")
(aliases: &["g", "ge", "gen", "gene", "gener", "genera", "generat"])
(@arg ORIGIN: {valid_origin} "The origin name")
(arg: arg_cache_key_path("Path to store generated keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand import =>
(about: "Reads a stdin stream containing a public or private origin key \
contents and writes the key to disk")
(aliases: &["i", "im", "imp", "impo", "impor"])
(arg: arg_cache_key_path("Path to import origin keys to. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand upload =>
(@group upload =>
(@attributes +required)
(@arg ORIGIN: {valid_origin} "The origin name")
(@arg PUBLIC_FILE: --pubfile +takes_value {file_exists}
"Path to a local public origin key file on disk")
)
(about: "Upload origin keys to Builder")
(aliases: &["u", "up", "upl", "uplo", "uploa"])
(arg: arg_cache_key_path("Path to upload origin keys from. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
(@arg WITH_SECRET: -s --secret conflicts_with[PUBLIC_FILE]
"Upload origin private key in addition to the public key")
(@arg SECRET_FILE: --secfile +takes_value {file_exists} conflicts_with[ORIGIN]
"Path to a local origin private key file on disk")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
)
(@subcommand secret =>
(about: "Commands related to secret management")
(@setting ArgRequiredElseHelp)
(@subcommand upload =>
(about: "Create and upload a secret for your origin.")
(@arg KEY_NAME: +required +takes_value
"The name of the variable key to be injected into the studio. \
Ex: KEY=\"some_value\"")
(@arg SECRET: +required +takes_value
"The contents of the variable to be injected into the studio.")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"The origin for which the secret will be uploaded. Default is from \
'HAB_ORIGIN' or cli.toml")
(arg: arg_cache_key_path("Path to public encryption key. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand delete =>
(about: "Delete a secret for your origin")
(@arg KEY_NAME: +required +takes_value
"The name of the variable key to be injected into the studio.")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"The origin for which the secret will be deleted. Default is from \
'HAB_ORIGIN' or cli.toml")
)
(@subcommand list =>
(about: "List all secrets for your origin")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"The origin for which secrets will be listed. Default is from 'HAB_ORIGIN' \
or cli.toml")
)
)
)
(@subcommand pkg =>
(about: "Commands relating to Habitat packages")
(aliases: &["p", "pk", "package"])
(@setting ArgRequiredElseHelp)
(@subcommand binds =>
(about: "Displays the binds for a service")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-statis/1.42.2)")
)
(@subcommand binlink =>
(about: "Creates a binlink for a package binary in a common 'PATH' location")
(aliases: &["bi", "bin", "binl", "binli", "binlin"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
(@arg BINARY: +takes_value
"The command to binlink (ex: bash)")
(@arg DEST_DIR: -d --dest +takes_value {non_empty} env(BINLINK_DIR_ENVVAR) default_value(DEFAULT_BINLINK_DIR)
"Sets the destination directory")
(@arg FORCE: -f --force "Overwrite existing binlinks")
)
(subcommand: sub_pkg_build())
(@subcommand config =>
(about: "Displays the default configuration options for a service")
(aliases: &["conf", "cfg"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
)
(subcommand: sub_pkg_download())
(@subcommand env =>
(about: "Prints the runtime environment of a specific installed package")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
)
(@subcommand exec =>
(about: "Executes a command using the 'PATH' context of an installed package")
(aliases: &["exe"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
(@arg CMD: +required +takes_value
"The command to execute (ex: ls)")
(@arg ARGS: +takes_value +multiple
"Arguments to the command (ex: -l /tmp)")
)
(@subcommand export =>
(about: "Exports the package to the specified format")
(aliases: &["exp"])
(@arg FORMAT: +required +takes_value
"The export format (ex: aci, cf, docker, kubernetes, mesos, or tar)")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2) or \
filepath to a Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg CHANNEL: --channel -c +takes_value default_value[stable] env(ChannelIdent::ENVVAR)
"Retrieve the container's package from the specified release channel")
)
(@subcommand hash =>
(about: "Generates a blake2b hashsum from a target at any given filepath")
(aliases: &["ha", "has"])
(@arg SOURCE: +takes_value {file_exists} "A filepath of the target")
)
(subcommand: sub_pkg_install(feature_flags).aliases(
&["i", "in", "ins", "inst", "insta", "instal"]))
(@subcommand path =>
(about: "Prints the path to a specific installed release of a package")
(aliases: &["p", "pa", "pat"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
)
(@subcommand list =>
(about: "List all versions of installed packages")
(aliases: &["li"])
(@group prefix =>
(@attributes +required)
(@arg ALL: -a --all
"List all installed packages")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"An origin to list")
(@arg PKG_IDENT: +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2).")
)
)
(@subcommand provides =>
(about: "Search installed Habitat packages for a given file")
(@arg FILE: +required +takes_value
"File name to find")
(@arg FULL_RELEASES: -r
"Show fully qualified package names \
(ex: core/busybox-static/1.24.2/20160708162350)")
(@arg FULL_PATHS: -p "Show full path to file")
)
(@subcommand search =>
(about: "Search for a package in Builder")
(@arg SEARCH_TERM: +required +takes_value "Search term")
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg LIMIT: -l --limit +takes_value default_value("50") {valid_numeric::<usize>}
"Limit how many packages to retrieve")
)
(@subcommand sign =>
(about: "Signs an archive with an origin key, generating a Habitat Artifact")
(aliases: &["s", "si", "sig"])
(@arg ORIGIN: --origin +takes_value {valid_origin} "Origin key used to create signature")
(@arg SOURCE: +required {file_exists}
"A path to a source archive file \
(ex: /home/acme-redis-3.0.7-21120102031201.tar.xz)")
(@arg DEST: +required
"The destination path to the signed Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
(arg: arg_cache_key_path("Path to search for origin keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand uninstall =>
(about: "Safely uninstall a package and dependencies from the local filesystem")
(aliases: &["un", "unin"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2/21120102031201)")
(@arg DRYRUN: -d --dryrun "Just show what would be uninstalled, don't actually do it")
(@arg EXCLUDE: --exclude +takes_value +multiple {valid_ident}
"Identifier of one or more packages that should not be uninstalled. \
(ex: core/redis, core/busybox-static/1.42.2/21120102031201)")
(@arg NO_DEPS: --("no-deps") "Don't uninstall dependencies")
)
// alas no hyphens in subcommand names..
// https://github.com/clap-rs/clap/issues/1297
(@subcommand bulkupload =>
(about: "Bulk Uploads Habitat Artifacts to a Depot from a local directory.")
(aliases: &["bul", "bulk"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Depot \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg CHANNEL: --channel -c +takes_value
"Optional additional release channel to upload package to. \
Packages are always uploaded to `unstable`, regardless \
of the value of this option.")
(@arg FORCE: --force "Skip checking availability of package and \
force uploads, potentially overwriting a stored copy of a package.")
(@arg AUTO_BUILD: --("auto-build") "Enable auto-build for all packages in this upload. \
Only applicable to SaaS Builder.")
(@arg AUTO_CREATE_ORIGINS: --("auto-create-origins") "Skip the confirmation prompt and \
automatically create origins that do not exist in the target Builder.")
(@arg UPLOAD_DIRECTORY: +required {dir_exists}
"Directory Path from which artifacts will be uploaded.")
)
(@subcommand upload =>
(about: "Uploads a local Habitat Artifact to Builder")
(aliases: &["u", "up", "upl", "uplo", "uploa"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg CHANNEL: --channel -c +takes_value
"Optional additional release channel to upload package to. \
Packages are always uploaded to `unstable`, regardless \
of the value of this option.")
(@arg FORCE: --force "Skips checking availability of package and \
force uploads, potentially overwriting a stored copy of a package. \
(default: false)")
(@arg NO_BUILD: --("no-build") "Disable auto-build for all packages in this upload.")
(@arg HART_FILE: +required +multiple {file_exists}
"One or more filepaths to a Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
(arg: arg_cache_key_path("Path to search for public origin keys to upload. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand delete =>
(about: "Removes a package from Builder")
(aliases: &["del", "dele"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg PKG_IDENT: +required +takes_value {valid_fully_qualified_ident} "A fully qualified package identifier \
(ex: core/busybox-static/1.42.2/20170513215502)")
(arg: arg_target())
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand promote =>
(about: "Promote a package to a specified channel")
(aliases: &["pr", "pro", "promo", "promot"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg PKG_IDENT: +required +takes_value {valid_fully_qualified_ident} "A fully qualified package identifier \
(ex: core/busybox-static/1.42.2/20170513215502)")
(@arg CHANNEL: +required +takes_value "Promote to the specified release channel")
(arg: arg_target())
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand demote =>
(about: "Demote a package from a specified channel")
(aliases: &["de", "dem", "demo", "demot"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg PKG_IDENT: +required +takes_value {valid_fully_qualified_ident} "A fully qualified package identifier \
(ex: core/busybox-static/1.42.2/20170513215502)")
(@arg CHANNEL: +required +takes_value "Demote from the specified release channel")
(arg: arg_target())
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand channels =>
(about: "Find out what channels a package belongs to")
(aliases: &["ch", "cha", "chan", "chann", "channe", "channel"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg PKG_IDENT: +required +takes_value {valid_fully_qualified_ident} "A fully qualified package identifier \
(ex: core/busybox-static/1.42.2/20170513215502)")
(arg: arg_target())
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand verify =>
(about: "Verifies a Habitat Artifact with an origin key")
(aliases: &["v", "ve", "ver", "veri", "verif"])
(@arg SOURCE: +required {file_exists} "A path to a Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
(arg: arg_cache_key_path("Path to search for public origin keys for verification. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand header =>
(about: "Returns the Habitat Artifact header")
(aliases: &["hea", "head", "heade", "header"])
(@setting Hidden)
(@arg SOURCE: +required {file_exists} "A path to a Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
)
(@subcommand info =>
(about: "Returns the Habitat Artifact information")
(aliases: &["inf", "info"])
(@arg TO_JSON: -j --json "Output will be rendered in json")
(@arg SOURCE: +required {file_exists} "A path to a Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
)
(@subcommand dependencies =>
(about: "Returns the Habitat Artifact dependencies. By default it will return \
the direct dependencies of the package")
(aliases: &["dep", "deps"])
(@arg TRANSITIVE: -t --transitive "Show transitive dependencies")
(@arg REVERSE: -r --reverse "Show packages which are dependant on this one")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
)
)
(@subcommand plan =>
(about: "Commands relating to plans and other app-specific configuration.")
(aliases: &["pl", "pla"])
(@setting ArgRequiredElseHelp)
(@subcommand init =>
(about: "Generates common package specific configuration files. Executing without \
argument will create a `habitat` directory in your current folder for the \
plan. If `PKG_NAME` is specified it will create a folder with that name. \
Environment variables (those starting with 'pkg_') that are set will be used \
in the generated plan")
(aliases: &["i", "in", "ini"])
(@arg PKG_NAME: +takes_value "Name for the new app")
(@arg ORIGIN: --origin -o +takes_value {valid_origin} "Origin for the new app")
(@arg MIN: --min -m "Create a minimal plan file")
(@arg SCAFFOLDING: --scaffolding -s +takes_value
"Specify explicit Scaffolding for your app (ex: node, ruby)")
)
(@subcommand render =>
(about: "Renders plan config files")
(aliases: &["r", "re", "ren", "rend", "rende"])
(@arg TEMPLATE_PATH: +required {file_exists} "Path to config to render")
(@arg DEFAULT_TOML: -d --("default-toml") +takes_value default_value("./default.toml") "Path to default.toml")
(@arg USER_TOML: -u --("user-toml") +takes_value "Path to user.toml, defaults to none")
(@arg MOCK_DATA: -m --("mock-data") +takes_value "Path to json file with mock data for template, defaults to none")
(@arg PRINT: -p --("print") "Prints config to STDOUT")
(@arg RENDER_DIR: -r --("render-dir") +takes_value default_value("./results") "Path to render templates")
(@arg NO_RENDER: -n --("no-render") "Don't write anything to disk, ignores --render-dir")
(@arg QUIET: -q --("no-verbose") --quiet
"Don't print any helper messages. When used with `--print` will only print config file")
)
)
(@subcommand ring =>
(about: "Commands relating to Habitat rings")
(aliases: &["r", "ri", "rin"])
(@setting ArgRequiredElseHelp)
(@subcommand key =>
(about: "Commands relating to Habitat ring keys")
(aliases: &["k", "ke"])
(@setting ArgRequiredElseHelp)
(@subcommand export =>
(about: "Outputs the latest ring key contents to stdout")
(aliases: &["e", "ex", "exp", "expo", "expor"])
(@arg RING: +required +takes_value "Ring key name")
(arg: arg_cache_key_path("Path to search for keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand import =>
(about: "Reads a stdin stream containing ring key contents and writes \
the key to disk")
(aliases: &["i", "im", "imp", "impo", "impor"])
(arg: arg_cache_key_path("Path to store imported keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand generate =>
(about: "Generates a Habitat ring key")
(aliases: &["g", "ge", "gen", "gene", "gener", "genera", "generat"])
(@arg RING: +required +takes_value "Ring key name")
(arg: arg_cache_key_path("Path to store generated keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
)
)
(subcommand: sup_commands(feature_flags))
(@subcommand svc =>
(about: "Commands relating to Habitat services")
(aliases: &["sv", "ser", "serv", "service"])
(@setting ArgRequiredElseHelp)
(@subcommand key =>
(about: "Commands relating to Habitat service keys")
(aliases: &["k", "ke"])
(@setting ArgRequiredElseHelp)
(@subcommand generate =>
(about: "Generates a Habitat service key")
(aliases: &["g", "ge", "gen", "gene", "gener", "genera", "generat"])
(@arg SERVICE_GROUP: +required +takes_value {valid_service_group}
"Target service group service.group[@organization] (ex: redis.default or foo.default@bazcorp)")
(@arg ORG: "The service organization")
(arg: arg_cache_key_path("Path to store generated keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
)
(subcommand: sub_svc_load().aliases(&["l", "lo", "loa"]))
(subcommand: sub_svc_start().aliases(&["star"]))
(subcommand: sub_svc_status().aliases(&["stat", "statu"]))
(subcommand: sub_svc_stop().aliases(&["sto"]))
(subcommand: sub_svc_unload().aliases(&["u", "un", "unl", "unlo", "unloa"]))
)
(@subcommand studio =>
(about: "Commands relating to Habitat Studios")
(aliases: &["stu", "stud", "studi"])
)
(@subcommand supportbundle =>
(about: "Create a tarball of Habitat Supervisor data to send to support")
(aliases: &["supp", "suppo", "suppor", "support-bundle"])
)
(@subcommand user =>
(about: "Commands relating to Habitat users")
(aliases: &["u", "us", "use"])
(@setting ArgRequiredElseHelp)
(@subcommand key =>
(about: "Commands relating to Habitat user keys")
(aliases: &["k", "ke"])
(@setting ArgRequiredElseHelp)
(@subcommand generate =>
(about: "Generates a Habitat user key")
(aliases: &["g", "ge", "gen", "gene", "gener", "genera", "generat"])
(@arg USER: +required +takes_value "Name of the user key")
(arg: arg_cache_key_path("Path to store generated keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
)
)
(subcommand: alias_apply)
(subcommand: alias_install)
(subcommand: alias_run())
(subcommand: alias_setup)
(subcommand: alias_start)
(subcommand: alias_stop)
(subcommand: alias_term())
(after_help: "\nALIASES:\
\n apply Alias for: 'config apply'\
\n install Alias for: 'pkg install'\
\n run Alias for: 'sup run'\
\n setup Alias for: 'cli setup'\
\n start Alias for: 'svc start'\
\n stop Alias for: 'svc stop'\
\n term Alias for: 'sup term'\
\n"
)
)
}
fn alias_run() -> App<'static, 'static> {
clap_app!(@subcommand run =>
(about: "Run the Habitat Supervisor")
(@setting Hidden)
)
}
fn alias_term() -> App<'static, 'static> {
clap_app!(@subcommand term =>
(about: "Gracefully terminate the Habitat Supervisor and all of its running services")
(@setting Hidden)
)
}
fn sub_cli_setup() -> App<'static, 'static> {
clap_app!(@subcommand setup =>
(about: "Sets up the CLI with reasonable defaults.")
(arg: arg_cache_key_path("Path to search for or create keys in. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
}
pub fn sup_commands(feature_flags: FeatureFlag) -> App<'static, 'static> {
// Define all of the `hab sup *` subcommands in one place.
// This removes the need to duplicate this in `hab-sup`.
// The 'sup' App name here is significant for the `hab` binary as it
// is inserted as a named subcommand. For the `hab-sup` binary, it is
// the top-level App name (not a named subcommand) and therefore is not
// significant since we override `usage` below.
clap_app!(("sup") =>
(about: "The Habitat Supervisor")
(version: super::VERSION)
(author: "\nAuthors: The Habitat Maintainers <humans@habitat.sh>\n")
// set custom usage string, otherwise the binary
// is displayed as the clap_app name, which may or may not be different.
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
(usage: "hab sup <SUBCOMMAND>")
(@setting VersionlessSubcommands)
(@setting SubcommandRequiredElseHelp)
(subcommand: sub_sup_bash().aliases(&["b", "ba", "bas"]))
(subcommand: sub_sup_depart().aliases(&["d", "de", "dep", "depa", "depart"]))
(subcommand: sub_sup_run(feature_flags).aliases(&["r", "ru"]))
(subcommand: sub_sup_secret().aliases(&["sec", "secr"]))
(subcommand: sub_sup_sh().aliases(&[]))
(subcommand: sub_svc_status().aliases(&["stat", "statu"]))
(subcommand: sub_sup_term().aliases(&["ter"]))
)
}
fn sub_cli_completers() -> App<'static, 'static> {
let sub = clap_app!(@subcommand completers =>
(about: "Creates command-line completers for your shell."));
let supported_shells = ["bash", "fish", "zsh", "powershell"];
// The clap_app! macro above is great but does not support the ability to specify a range of
// possible values. We wanted to fail here with an unsupported shell instead of pushing off a
// bad value to clap.
sub.arg(Arg::with_name("SHELL").help("The name of the shell you want to generate the \
command-completion. Supported Shells: bash, fish, zsh, \
powershell")
.short("s")
.long("shell")
.required(true)
.takes_value(true)
.possible_values(&supported_shells))
}
// We need a default_value so that the argument can be required and validated. We hide the
// default because it's a special value that will be internally mapped according to the
// user type. This is to allow us to apply consistent validation to the env var override.
fn arg_cache_key_path(help_text: &'static str) -> Arg<'static, 'static> {
Arg::with_name("CACHE_KEY_PATH").long("cache-key-path")
.required(true)
.validator(non_empty)
.env(CACHE_KEY_PATH_ENV_VAR)
.default_value(CACHE_KEY_PATH)
.hide_default_value(true)
.help(&help_text)
}
fn arg_target() -> Arg<'static, 'static> {
Arg::with_name("PKG_TARGET").takes_value(true)
.validator(valid_target)
.env(PACKAGE_TARGET_ENVVAR)
.help("A package target (ex: x86_64-windows) (default: system \
appropriate target")
}
fn sub_pkg_build() -> App<'static, 'static> {
let mut sub = clap_app!(@subcommand build =>
(about: "Builds a Plan using a Studio")
(@arg HAB_ORIGIN_KEYS: -k --keys +takes_value
"Installs secret origin keys (ex: \"unicorn\", \"acme,other,acme-ops\")")
(@arg HAB_STUDIO_ROOT: -r --root +takes_value
"Sets the Studio root (default: /hab/studios/<DIR_NAME>)")
(@arg SRC_PATH: -s --src +takes_value
"Sets the source path (default: $PWD)")
(@arg PLAN_CONTEXT: +required +takes_value
"A directory containing a plan file \
or a `habitat/` directory which contains the plan file")
(arg: arg_cache_key_path("Path to search for origin keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
);
// Only a truly native/local Studio can be reused--the Docker implementation will always be
// ephemeral
if studio::native_studio_support() {
sub = sub.arg(Arg::with_name("REUSE").help("Reuses a previous Studio for the build \
(default: clean up before building)")
.short("R")
.long("reuse"))
.arg(Arg::with_name("DOCKER").help("Uses a Dockerized Studio for the build")
.short("D")
.long("docker"));
}
sub
}
fn sub_pkg_download() -> App<'static, 'static> {
let sub = clap_app!(@subcommand download =>
(about: "Download Habitat artifacts (including dependencies and keys) from Builder")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg BLDR_URL: --url -u +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined.")
(@arg CHANNEL: --channel -c +takes_value default_value[stable] env(ChannelIdent::ENVVAR)
"Download from the specified release channel. Overridden if channel is specified in toml file.")
(@arg DOWNLOAD_DIRECTORY: --("download-directory") +takes_value "The path to store downloaded artifacts")
(@arg PKG_IDENT_FILE: --file +takes_value +multiple {valid_ident_or_toml_file}
"File with newline separated package identifiers, or TOML file (ending with .toml extension)")
(@arg PKG_IDENT: +multiple {valid_ident}
"One or more Habitat package identifiers (ex: acme/redis)")
(@arg PKG_TARGET: --target -t +takes_value {valid_target}
"Target architecture to fetch. E.g. x86_64-linux. Overridden if architecture is specified in toml file.")
(@arg VERIFY: --verify
"Verify package integrity after download (Warning: this can be slow)")
);
sub
}
fn sub_pkg_install(feature_flags: FeatureFlag) -> App<'static, 'static> {
let mut sub = clap_app!(@subcommand install =>
(about: "Installs a Habitat package from Builder or locally from a Habitat Artifact")
(@arg BLDR_URL: --url -u +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg CHANNEL: --channel -c +takes_value default_value[stable] env(ChannelIdent::ENVVAR)
"Install from the specified release channel")
(@arg PKG_IDENT_OR_ARTIFACT: +required +multiple
"One or more Habitat package identifiers (ex: acme/redis) and/or filepaths \
to a Habitat Artifact (ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
(@arg BINLINK: -b --binlink
"Binlink all binaries from installed package(s) into BINLINK_DIR")
(@arg BINLINK_DIR: --("binlink-dir") +takes_value {non_empty} env(BINLINK_DIR_ENVVAR)
default_value(DEFAULT_BINLINK_DIR) "Binlink all binaries from installed package(s) into BINLINK_DIR")
(@arg FORCE: -f --force "Overwrite existing binlinks")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg IGNORE_INSTALL_HOOK: --("ignore-install-hook") "Do not run any install hooks")
);
if feature_flags.contains(FeatureFlag::OFFLINE_INSTALL) {
sub = sub.arg(Arg::with_name("OFFLINE").help("Install packages in offline mode")
.long("offline"));
};
if feature_flags.contains(FeatureFlag::IGNORE_LOCAL) {
sub = sub.arg(Arg::with_name("IGNORE_LOCAL").help("Do not use locally-installed \
packages when a corresponding \
package cannot be installed from \
Builder")
.long("ignore-local"));
};
sub
}
fn sub_config_apply() -> App<'static, 'static> {
clap_app!(@subcommand apply =>
(about: "Sets a configuration to be shared by members of a Service Group")
(@arg SERVICE_GROUP: +required {valid_service_group}
"Target service group service.group[@organization] (ex: redis.default or foo.default@bazcorp)")
(@arg VERSION_NUMBER: +required
"A version number (positive integer) for this configuration (ex: 42)")
(@arg FILE: {file_exists_or_stdin}
"Path to local file on disk (ex: /tmp/config.toml, default: <stdin>)")
(@arg USER: -u --user +takes_value "Name of a user key to use for encryption")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
(arg: arg_cache_key_path("Path to search for encryption keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
}
// the following sup related functions are
// public due to their utilization in `hab-sup`
// for consistency, all supervisor related clap subcommands are defined in this module
pub fn sub_sup_depart() -> App<'static, 'static> {
clap_app!(@subcommand depart =>
(about: "Depart a Supervisor from the gossip ring; kicking and banning the target \
from joining again with the same member-id")
(@arg MEMBER_ID: +required +takes_value "The member-id of the Supervisor to depart")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
)
}
pub fn sub_sup_secret() -> App<'static, 'static> {
clap_app!(@subcommand secret =>
(about: "Commands relating to a Habitat Supervisor's Control Gateway secret")
(@setting ArgRequiredElseHelp)
(@subcommand generate =>
(about: "Generate a secret key to use as a Supervisor's Control Gateway secret")
)
)
}
pub fn sub_sup_bash() -> App<'static, 'static> {
clap_app!(@subcommand bash =>
(about: "Start an interactive Bash-like shell")
// set custom usage string, otherwise the binary
// is displayed confusingly as `hab-sup`
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
(usage: "hab sup bash")
)
}
pub fn sub_sup_run(_feature_flags: FeatureFlag) -> App<'static, 'static> {
let sub = clap_app!(@subcommand run =>
(about: "Run the Habitat Supervisor")
// set custom usage string, otherwise the binary
// is displayed confusingly as `hab-sup`
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
(usage: "hab sup run [FLAGS] [OPTIONS] [--] [PKG_IDENT_OR_ARTIFACT]")
(@arg LISTEN_GOSSIP: --("listen-gossip") env(GossipListenAddr::ENVVAR) default_value(GossipListenAddr::default_as_str()) {valid_socket_addr}
"The listen address for the Gossip System Gateway.")
(@arg LOCAL_GOSSIP_MODE: --("local-gossip-mode") conflicts_with("LISTEN_GOSSIP") conflicts_with("PEER") conflicts_with("PEER_WATCH_FILE")
"Start the supervisor in local mode.")
(@arg LISTEN_HTTP: --("listen-http") env(HttpListenAddr::ENVVAR) default_value(HttpListenAddr::default_as_str()) {valid_socket_addr}
"The listen address for the HTTP Gateway.")
(@arg HTTP_DISABLE: --("http-disable") -D
"Disable the HTTP Gateway completely [default: false]")
(@arg LISTEN_CTL: --("listen-ctl") env(ListenCtlAddr::ENVVAR) default_value(ListenCtlAddr::default_as_str()) {valid_socket_addr}
"The listen address for the Control Gateway. If not specified, the value will \
be taken from the HAB_LISTEN_CTL environment variable if defined. [default: 127.0.0.1:9632]")
(@arg ORGANIZATION: --org +takes_value
"The organization that the Supervisor and its subsequent services are part of.")
(@arg PEER: --peer +takes_value +multiple
"The listen address of one or more initial peers (IP[:PORT])")
(@arg PERMANENT_PEER: --("permanent-peer") -I "If this Supervisor is a permanent peer")
(@arg PEER_WATCH_FILE: --("peer-watch-file") +takes_value conflicts_with("PEER")
"Watch this file for connecting to the ring"
)
(arg: arg_cache_key_path("Path to search for encryption keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
(@arg RING: --ring -r env(RING_ENVVAR) conflicts_with("RING_KEY") {non_empty}
"The name of the ring used by the Supervisor when running with wire encryption. \
(ex: hab sup run --ring myring)")
(@arg RING_KEY: --("ring-key") env(RING_KEY_ENVVAR) conflicts_with("RING") +hidden {non_empty}
"The contents of the ring key when running with wire encryption. \
(Note: This option is explicitly undocumented and for testing purposes only. Do not use it in a production system. Use the corresponding environment variable instead.)
(ex: hab sup run --ring-key 'SYM-SEC-1 \
foo-20181113185935 \
GCrBOW6CCN75LMl0j2V5QqQ6nNzWm6and9hkKBSUFPI=')")
(@arg CHANNEL: --channel +takes_value default_value[stable]
"Receive Supervisor updates from the specified release channel")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg CONFIG_DIR: --("config-from") +takes_value {dir_exists}
"Use package config from this path, rather than the package itself")
(@arg AUTO_UPDATE: --("auto-update") -A "Enable automatic updates for the Supervisor \
itself")
(@arg KEY_FILE: --key +takes_value {file_exists} requires[CERT_FILE]
"Used for enabling TLS for the HTTP gateway. Read private key from KEY_FILE. \
This should be a RSA private key or PKCS8-encoded private key, in PEM format.")
(@arg CERT_FILE: --certs +takes_value {file_exists} requires[KEY_FILE]
"Used for enabling TLS for the HTTP gateway. Read server certificates from CERT_FILE. \
This should contain PEM-format certificates in the right order (the first certificate \
should certify KEY_FILE, the last should be a root CA).")
(@arg CA_CERT_FILE: --("ca-certs") +takes_value {file_exists} requires[CERT_FILE] requires[KEY_FILE]
"Used for enabling client-authentication with TLS for the HTTP gateway. Read CA certificate from CA_CERT_FILE. \
This should contain PEM-format certificate that can be used to validate client requests.")
// === Optional arguments to additionally load an initial service for the Supervisor
(@arg PKG_IDENT_OR_ARTIFACT: +takes_value "Load the given Habitat package as part of \
the Supervisor startup specified by a package identifier \
(ex: core/redis) or filepath to a Habitat Artifact \
(ex: /home/core-redis-3.0.7-21120102031201-x86_64-linux.hart).")
(@arg GROUP: --group +takes_value
"The service group; shared config and topology [default: default].")
(@arg TOPOLOGY: --topology -t +takes_value possible_value[standalone leader]
"Service topology; [default: none]")
(@arg STRATEGY: --strategy -s +takes_value {valid_update_strategy}
"The update strategy; [default: none] [values: none, at-once, rolling]")
(@arg BIND: --bind +takes_value +multiple
"One or more service groups to bind to a configuration")
(@arg BINDING_MODE: --("binding-mode") +takes_value {valid_binding_mode}
"Governs how the presence or absence of binds affects service startup. `strict` blocks \
startup until all binds are present. [default: strict] [values: relaxed, strict]")
(@arg VERBOSE: -v "Verbose output; shows file and line/column numbers")
(@arg NO_COLOR: --("no-color") "Turn ANSI color off")
(@arg JSON: --("json-logging") "Use structured JSON logging for the Supervisor. \
Implies NO_COLOR")
(@arg HEALTH_CHECK_INTERVAL: --("health-check-interval") -i +takes_value {valid_health_check_interval}
"The interval (seconds) on which to run health checks [default: 30]")
(@arg SYS_IP_ADDRESS: --("sys-ip-address") +takes_value {valid_ipv4_address}
"The IPv4 address to use as the `sys.ip` template variable. If this \
argument is not set, the supervisor tries to dynamically determine \
an IP address. If that fails, the supervisor defaults to using \
`127.0.0.1`.")
);
let sub = add_event_stream_options(sub);
add_shutdown_timeout_option(sub)
}
pub fn sub_sup_sh() -> App<'static, 'static> {
clap_app!(@subcommand sh =>
(about: "Start an interactive Bourne-like shell")
// set custom usage string, otherwise the binary
// is displayed confusingly as `hab-sup`
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
(usage: "hab sup sh")
)
}
pub fn sub_sup_term() -> App<'static, 'static> {
clap_app!(@subcommand term =>
(about: "Gracefully terminate the Habitat Supervisor and all of its running services")
// set custom usage string, otherwise the binary
// is displayed confusingly as `hab-sup`
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
(usage: "hab sup term [OPTIONS]")
)
}
fn sub_svc_start() -> App<'static, 'static> {
clap_app!(@subcommand start =>
(about: "Start a loaded, but stopped, Habitat service.")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A Habitat package identifier (ex: core/redis)")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
)
}
// `hab svc status` is the canonical location for this command, but we
// have historically used `hab sup status` as an alias.
pub fn sub_svc_status() -> App<'static, 'static> {
clap_app!(@subcommand status =>
(about: "Query the status of Habitat services.")
(@arg PKG_IDENT: +takes_value {valid_ident} "A Habitat package identifier (ex: core/redis)")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
)
}
pub fn parse_optional_arg<T: FromStr>(name: &str, m: &ArgMatches) -> Option<T>
where <T as std::str::FromStr>::Err: std::fmt::Debug
{
m.value_of(name).map(|s| s.parse().expect("Valid argument"))
}
fn sub_svc_stop() -> App<'static, 'static> {
let sub = clap_app!(@subcommand stop =>
(about: "Stop a running Habitat service.")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A Habitat package identifier (ex: core/redis)")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
);
add_shutdown_timeout_option(sub)
}
fn sub_svc_load() -> App<'static, 'static> {
let mut sub = clap_app!(@subcommand load =>
(about: "Load a service to be started and supervised by Habitat from a package \
identifier. If an installed package doesn't satisfy the given package \
identifier, a suitable package will be installed from Builder.")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A Habitat package identifier (ex: core/redis)")
(@arg CHANNEL: --channel +takes_value default_value[stable]
"Receive package updates from the specified release channel")
(@arg GROUP: --group +takes_value
"The service group; shared config and topology [default: default].")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg TOPOLOGY: --topology -t +takes_value possible_value[standalone leader]
"Service topology; [default: none]")
(@arg STRATEGY: --strategy -s +takes_value {valid_update_strategy}
"The update strategy; [default: none] [values: none, at-once, rolling]")
(@arg BIND: --bind +takes_value +multiple
"One or more service groups to bind to a configuration")
(@arg BINDING_MODE: --("binding-mode") +takes_value {valid_binding_mode}
"Governs how the presence or absence of binds affects service startup. `strict` blocks \
startup until all binds are present. [default: strict] [values: relaxed, strict]")
(@arg FORCE: --force -f "Load or reload an already loaded service. If the service \
was previously loaded and running this operation will also restart the service")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
(@arg HEALTH_CHECK_INTERVAL: --("health-check-interval") -i +takes_value {valid_health_check_interval}
"The interval (seconds) on which to run health checks [default: 30]")
);
if cfg!(windows) {
sub = sub.arg(Arg::with_name("PASSWORD").long("password")
.takes_value(true)
.help("Password of the service user"));
}
add_shutdown_timeout_option(sub)
}
fn sub_svc_unload() -> App<'static, 'static> {
let sub = clap_app!(@subcommand unload =>
(about: "Unload a service loaded by the Habitat Supervisor. If the service is \
running it will additionally be stopped.")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A Habitat package identifier (ex: core/redis)")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
);
add_shutdown_timeout_option(sub)
}
fn add_event_stream_options(app: App<'static, 'static>) -> App<'static, 'static> {
// Create shorter alias so formating works correctly
type ConnectMethod = EventStreamConnectMethod;
app.arg(Arg::with_name("EVENT_STREAM_APPLICATION").help("The name of the application for \
event stream purposes. This \
will be attached to all events \
generated by this Supervisor.")
.long("event-stream-application")
.required(false)
.takes_value(true)
.validator(non_empty))
.arg(Arg::with_name("EVENT_STREAM_ENVIRONMENT").help("The name of the environment for \
event stream purposes. This \
will be attached to all events \
generated by this Supervisor.")
.long("event-stream-environment")
.required(false)
.takes_value(true)
.validator(non_empty))
.arg(Arg::with_name(ConnectMethod::ARG_NAME).help("How long in seconds to wait for an \
event stream connection before exiting \
the supervisor. Set to '0' to \
immediately start the supervisor and \
continue running regardless of the \
event stream status.")
.long("event-stream-connect-timeout")
.required(false)
.takes_value(true)
.env(ConnectMethod::ENVVAR)
.default_value("0")
.validator(valid_numeric::<u64>))
.arg(Arg::with_name("EVENT_STREAM_URL").help("The event stream connection string \
(host:port) used by this Supervisor to send \
events to a messaging server.")
.long("event-stream-url")
.required(false)
.requires_all(&[
"EVENT_STREAM_APPLICATION",
"EVENT_STREAM_ENVIRONMENT",
AutomateAuthToken::ARG_NAME
])
.takes_value(true)
.validator(nats_address))
.arg(Arg::with_name("EVENT_STREAM_SITE").help("The name of the site where this Supervisor \
is running. It is used for event stream \
purposes.")
.long("event-stream-site")
.required(false)
.takes_value(true)
.validator(non_empty))
.arg(Arg::with_name(AutomateAuthToken::ARG_NAME).help("An authentication token for \
streaming events to an messaging \
server.")
.long("event-stream-token")
.required(false)
.takes_value(true)
.validator(AutomateAuthToken::validate)
.env(AutomateAuthToken::ENVVAR))
.arg(Arg::with_name(EventStreamMetadata::ARG_NAME).help("An arbitrary key-value pair to \
add to each event generated by \
this Supervisor")
.long("event-meta")
.takes_value(true)
.multiple(true)
.validator(EventStreamMetadata::validate))
.arg(Arg::with_name("EVENT_STREAM_SERVER_CERTIFICATE").help("The path to the event stream \
server's certificate in PEM \
format used to establish a \
TLS connection")
.long("event-stream-server-certificate")
.required(false)
.takes_value(true)
.validator(EventStreamServerCertificate::validate))
}
// CLAP Validation Functions
////////////////////////////////////////////////////////////////////////
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_binding_mode(val: String) -> result::Result<(), String> {
match habitat_sup_protocol::types::BindingMode::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => Err(format!("Binding mode: '{}' is not valid", &val)),
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_pair_type(val: String) -> result::Result<(), String> {
match PairType::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => {
Err(format!("PAIR_TYPE: {} is invalid, must be one of \
(public, secret)",
&val))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_service_group(val: String) -> result::Result<(), String> {
ServiceGroup::validate(&val).map_err(|e| e.to_string())
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn dir_exists(val: String) -> result::Result<(), String> {
if Path::new(&val).is_dir() {
Ok(())
} else {
Err(format!("Directory: '{}' cannot be found", &val))
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn file_exists(val: String) -> result::Result<(), String> {
if Path::new(&val).is_file() {
Ok(())
} else {
Err(format!("File: '{}' cannot be found", &val))
}
}
fn file_exists_or_stdin(val: String) -> result::Result<(), String> {
if val == "-" {
Ok(())
} else {
file_exists(val)
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_ipv4_address(val: String) -> result::Result<(), String> {
match Ipv4Addr::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => {
Err(format!("'{}' is not a valid IPv4 address, eg: \
'192.168.1.105'",
val))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_socket_addr(val: String) -> result::Result<(), String> {
match SocketAddr::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => {
Err("Socket address should include both IP and port, eg: '0.0.0.0:9700'".to_string())
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_url(val: String) -> result::Result<(), String> {
match Url::parse(&val) {
Ok(_) => Ok(()),
Err(_) => Err(format!("URL: '{}' is not valid", &val)),
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_numeric<T: FromStr>(val: String) -> result::Result<(), String> {
match val.parse::<T>() {
Ok(_) => Ok(()),
Err(_) => Err(format!("'{}' is not a valid number", &val)),
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_health_check_interval(val: String) -> result::Result<(), String> {
match HealthCheckInterval::from_str(&val) {
Ok(_) => Ok(()),
Err(e) => {
Err(format!("'{}' is not a valid value for health check \
interval: {}",
val, e))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_update_strategy(val: String) -> result::Result<(), String> {
match habitat_sup_protocol::types::UpdateStrategy::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => Err(format!("Update strategy: '{}' is not valid", &val)),
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_ident(val: String) -> result::Result<(), String> {
match PackageIdent::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => {
Err(format!("'{}' is not valid. Package identifiers have the \
form origin/name[/version[/release]]",
&val))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_ident_or_toml_file(val: String) -> result::Result<(), String> {
if is_toml_file(&val) {
// We could do some more validation (parse the whole toml file and check it) but that seems
// excessive.
Ok(())
} else {
valid_ident_file(val)
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_ident_file(val: String) -> result::Result<(), String> {
file_into_idents(&val).map(|_| ())
.map_err(|e| e.to_string())
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_target(val: String) -> result::Result<(), String> {
match PackageTarget::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => {
let targets: Vec<_> = PackageTarget::targets().map(std::convert::AsRef::as_ref)
.collect();
Err(format!("'{}' is not valid. Valid targets are in the form \
architecture-platform (currently Habitat allows \
the following: {})",
&val,
targets.join(", ")))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_fully_qualified_ident(val: String) -> result::Result<(), String> {
match PackageIdent::from_str(&val) {
Ok(ref ident) if ident.fully_qualified() => Ok(()),
_ => {
Err(format!("'{}' is not valid. Fully qualified package \
identifiers have the form \
origin/name/version/release",
&val))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_origin(val: String) -> result::Result<(), String> {
if ident::is_valid_origin_name(&val) {
Ok(())
} else {
Err(format!("'{}' is not valid. A valid origin contains a-z, \
0-9, and _ or - after the first character",
&val))
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_shutdown_timeout(val: String) -> result::Result<(), String> {
match ShutdownTimeout::from_str(&val) {
Ok(_) => Ok(()),
Err(e) => {
Err(format!("'{}' is not a valid value for shutdown timeout: \
{}",
val, e))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn nats_address(val: String) -> result::Result<(), String> {
match NatsAddress::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => Err(format!("'{}' is not a valid event stream address", val)),
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn non_empty(val: String) -> result::Result<(), String> {
if val.is_empty() {
Err("must not be empty (check env overrides)".to_string())
} else {
Ok(())
}
}
/// Adds extra configuration option for shutting down a service with a customized timeout.
fn add_shutdown_timeout_option(app: App<'static, 'static>) -> App<'static, 'static> {
app.arg(Arg::with_name("SHUTDOWN_TIMEOUT").help("The number of seconds after sending a \
shutdown signal to wait before killing a \
service process (default: set in plan)")
.long("shutdown-timeout")
.validator(valid_shutdown_timeout)
.takes_value(true))
}
////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
fn no_feature_flags() -> FeatureFlag { FeatureFlag::empty() }
use super::*;
mod sup_commands {
use super::*;
use clap::ErrorKind;
#[test]
fn sup_subcommand_short_help() {
let r = get(no_feature_flags()).get_matches_from_safe(vec!["hab", "sup", "-h"]);
assert!(r.is_err());
// not `ErrorKind::InvalidSubcommand`
assert_eq!(r.unwrap_err().kind, ErrorKind::HelpDisplayed);
}
#[test]
fn sup_subcommand_run_with_peer() {
let r = get(no_feature_flags()).get_matches_from_safe(vec!["hab", "sup", "run",
"--peer", "1.1.1.1"]);
assert!(r.is_ok());
let matches = r.expect("Error while getting matches");
// validate `sup` subcommand
assert_eq!(matches.subcommand_name(), Some("sup"));
let (_, sup_matches) = matches.subcommand();
let sup_matches = sup_matches.expect("Error while getting sup matches");
assert_eq!(sup_matches.subcommand_name(), Some("run"));
let (_, run_matches) = sup_matches.subcommand();
let run_matches = run_matches.expect("Error while getting run matches");
assert_eq!(run_matches.value_of("PEER"), Some("1.1.1.1"));
}
}
mod event_stream_feature {
use super::*;
#[test]
fn run_requires_app_and_env_and_token_and_url() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec!["run"]);
assert!(matches.is_err());
assert_eq!(matches.unwrap_err().kind,
clap::ErrorKind::MissingRequiredArgument);
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_ok());
}
#[test]
fn app_and_env_and_token_and_url_options_require_event_stream_feature() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::UnknownArgument);
assert_eq!(error.info,
Some(vec!["--event-stream-application".to_string()]));
}
#[test]
fn app_option_must_take_a_value() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::EmptyValue);
assert_eq!(error.info,
Some(vec!["EVENT_STREAM_APPLICATION".to_string()]));
}
#[test]
fn app_option_cannot_be_empty() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn env_option_must_take_a_value() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::EmptyValue);
assert_eq!(error.info,
Some(vec!["EVENT_STREAM_ENVIRONMENT".to_string()]));
}
#[test]
fn env_option_cannot_be_empty() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn event_meta_flag_requires_event_stream_feature() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-meta",
"foo=bar",
"--event-stream-application",
"MY_APP",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-environment",
"MY_ENV",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::UnknownArgument);
assert_eq!(error.info, Some(vec!["--event-meta".to_string()]));
}
#[test]
fn event_meta_can_be_repeated() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-meta",
"foo=bar",
"--event-meta",
"blah=boo",
"--event-meta",
"monkey=pants",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_ok());
let matches = matches.unwrap();
let meta = matches.values_of(EventStreamMetadata::ARG_NAME)
.expect("didn't have metadata")
.collect::<Vec<_>>();
assert_eq!(meta, ["foo=bar", "blah=boo", "monkey=pants"]);
}
#[test]
fn event_meta_cannot_be_empty() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-meta",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
assert_eq!(matches.unwrap_err().kind, clap::ErrorKind::EmptyValue);
}
#[test]
fn event_meta_must_have_an_equal_sign() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-meta",
"foobar",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
assert_eq!(matches.unwrap_err().kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn event_meta_key_cannot_be_empty() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-meta",
"=bar",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
assert_eq!(matches.unwrap_err().kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn event_meta_value_cannot_be_empty() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-meta",
"foo=",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
assert_eq!(matches.unwrap_err().kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn token_option_must_take_a_value() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-url",
"127.0.0.1:4222",
"--event-stream-token",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::EmptyValue);
assert_eq!(error.info,
Some(vec![AutomateAuthToken::ARG_NAME.to_string()]));
}
#[test]
fn token_option_cannot_be_empty() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn site_option_must_take_a_value() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
"--event-stream-site",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::EmptyValue);
assert_eq!(error.info, Some(vec!["EVENT_STREAM_SITE".to_string()]));
}
#[test]
fn site_option_cannot_be_empty() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
"--event-stream-site",
"",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn url_option_must_take_a_value() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::EmptyValue);
assert_eq!(error.info, Some(vec!["EVENT_STREAM_URL".to_string()]));
}
#[test]
fn url_option_cannot_be_empty() {
let matches = sub_sup_run(FeatureFlag::empty()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::ValueValidation);
}
}
}
Fix tests
Signed-off-by: David McNeil <e57c3c663692ef9cceb57f3c25154b8f67832195@gmail.com>
use crate::command::studio;
use clap::{App,
AppSettings,
Arg,
ArgMatches};
use habitat_common::{cli::{file_into_idents,
is_toml_file,
BINLINK_DIR_ENVVAR,
DEFAULT_BINLINK_DIR,
PACKAGE_TARGET_ENVVAR,
RING_ENVVAR,
RING_KEY_ENVVAR},
types::{AutomateAuthToken,
EventStreamConnectMethod,
EventStreamMetadata,
EventStreamServerCertificate,
GossipListenAddr,
HttpListenAddr,
ListenCtlAddr},
FeatureFlag};
use habitat_core::{crypto::{keys::PairType,
CACHE_KEY_PATH_ENV_VAR},
env::Config,
fs::CACHE_KEY_PATH,
os::process::ShutdownTimeout,
package::{ident,
Identifiable,
PackageIdent,
PackageTarget},
service::{HealthCheckInterval,
ServiceGroup},
ChannelIdent};
use habitat_sup_protocol;
use rants::Address as NatsAddress;
use std::{net::{Ipv4Addr,
SocketAddr},
path::Path,
result,
str::FromStr};
use url::Url;
pub fn get(feature_flags: FeatureFlag) -> App<'static, 'static> {
let alias_apply = sub_config_apply().about("Alias for 'config apply'")
.aliases(&["ap", "app", "appl"])
.setting(AppSettings::Hidden);
let alias_install =
sub_pkg_install(feature_flags).about("Alias for 'pkg install'")
.aliases(&["i", "in", "ins", "inst", "insta", "instal"])
.setting(AppSettings::Hidden);
let alias_setup = sub_cli_setup().about("Alias for 'cli setup'")
.aliases(&["set", "setu"])
.setting(AppSettings::Hidden);
let alias_start = sub_svc_start().about("Alias for 'svc start'")
.aliases(&["sta", "star"])
.setting(AppSettings::Hidden);
let alias_stop = sub_svc_stop().about("Alias for 'svc stop'")
.aliases(&["sto"])
.setting(AppSettings::Hidden);
clap_app!(hab =>
(about: "\"A Habitat is the natural environment for your services\" - Alan Turing")
(version: super::VERSION)
(author: "\nAuthors: The Habitat Maintainers <humans@habitat.sh>\n")
(@setting GlobalVersion)
(@setting ArgRequiredElseHelp)
(@subcommand license =>
(about: "Commands relating to Habitat license agreements")
(@setting ArgRequiredElseHelp)
(@subcommand accept =>
(about: "Accept the Chef Binary Distribution Agreement without prompting"))
)
(@subcommand cli =>
(about: "Commands relating to Habitat runtime config")
(aliases: &["cl"])
(@setting ArgRequiredElseHelp)
(subcommand: sub_cli_setup().aliases(&["s", "se", "set", "setu"]))
(subcommand: sub_cli_completers().aliases(&["c", "co", "com", "comp"]))
)
(@subcommand config =>
(about: "Commands relating to a Service's runtime config")
(aliases: &["co", "con", "conf", "confi"])
(@setting ArgRequiredElseHelp)
(subcommand: sub_config_apply().aliases(&["ap", "app", "appl"]))
(@subcommand show =>
(about: "Displays the default configuration options for a service")
(aliases: &["sh", "sho"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
)
)
(@subcommand file =>
(about: "Commands relating to Habitat files")
(aliases: &["f", "fi", "fil"])
(@setting ArgRequiredElseHelp)
(@subcommand upload =>
(about: "Uploads a file to be shared between members of a Service Group")
(aliases: &["u", "up", "upl", "uplo", "uploa"])
(@arg SERVICE_GROUP: +required +takes_value {valid_service_group}
"Target service group service.group[@organization] (ex: redis.default or foo.default@bazcorp)")
(@arg VERSION_NUMBER: +required
"A version number (positive integer) for this configuration (ex: 42)")
(@arg FILE: +required {file_exists} "Path to local file on disk")
(@arg USER: -u --user +takes_value "Name of the user key")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
(arg: arg_cache_key_path("Path to search for encryption keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
)
(@subcommand bldr =>
(about: "Commands relating to Habitat Builder")
(aliases: &["b", "bl", "bld"])
(@setting ArgRequiredElseHelp)
(@subcommand job =>
(about: "Commands relating to Habitat Builder jobs")
(aliases: &["j", "jo"])
(@setting ArgRequiredElseHelp)
(@subcommand start =>
(about: "Schedule a build job or group of jobs")
(aliases: &["s", "st", "sta", "star"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"The origin and name of the package to schedule a job for (eg: core/redis)")
(arg: arg_target())
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the cli.toml or HAB_BLDR_URL environment variable if defined. \
(default: https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg GROUP: -g --group "Schedule jobs for this package and all of its reverse \
dependencies")
)
(@subcommand cancel =>
(about: "Cancel a build job group and any in-progress builds")
(aliases: &["c", "ca", "can", "cance", "cancel"])
(@arg GROUP_ID: +required +takes_value
"The job group id that was returned from \"hab bldr job start\" \
(ex: 771100000000000000)")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg FORCE: -f --force
"Don't prompt for confirmation")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand promote =>
(about: "Promote packages from a completed build job to a specified channel")
(aliases: &["p", "pr", "pro", "prom", "promo", "promot"])
(@arg GROUP_ID: +required +takes_value
"The job id that was returned from \"hab bldr job start\" \
(ex: 771100000000000000)")
(@arg CHANNEL: +takes_value +required "The target channel name")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"Limit the promotable packages to the specified origin")
(@arg INTERACTIVE: -i --interactive
"Allow editing the list of promotable packages")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand demote =>
(about: "Demote packages from a completed build job from a specified channel")
(aliases: &["d", "de", "dem", "demo", "demot"])
(@arg GROUP_ID: +required +takes_value
"The job id that was returned from \"hab bldr start\" \
(ex: 771100000000000000)")
(@arg CHANNEL: +takes_value +required "The name of the channel to demote from")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"Limit the demotable packages to the specified origin")
(@arg INTERACTIVE: -i --interactive
"Allow editing the list of demotable packages")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand status =>
(about: "Get the status of one or more job groups")
(aliases: &["stat", "statu"])
(@group status =>
(@attributes +required)
(@arg GROUP_ID: +takes_value
"The group id that was returned from \"hab bldr job start\" \
(ex: 771100000000000000)")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"Show the status of recent job groups created in this origin \
(default: 10 most recent)")
)
(@arg LIMIT: -l --limit +takes_value {valid_numeric::<usize>}
"Limit how many job groups to retrieve, ordered by most recent \
(default: 10)")
(@arg SHOW_JOBS: -s --showjobs
"Show the status of all build jobs for a retrieved job group")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
)
)
(@subcommand channel =>
(about: "Commands relating to Habitat Builder channels")
(aliases: &["c", "ch", "cha", "chan", "chann", "channe"])
(@setting ArgRequiredElseHelp)
(@subcommand promote =>
(about: "Atomically promotes all packages in channel")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg ORIGIN: -o --origin +required +takes_value {valid_origin}
"The origin for the channels. Default is from \
'HAB_ORIGIN' or cli.toml")
(@arg SOURCE_CHANNEL: +required +takes_value
"The channel from which all packages will be selected for promotion")
(@arg TARGET_CHANNEL: +required +takes_value
"The channel to which packages will be promoted")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand demote =>
(about: "Atomically demotes selected packages in a target channel")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg ORIGIN: -o --origin +required +takes_value {valid_origin}
"The origin for the channels. Default is from \
'HAB_ORIGIN' or cli.toml")
(@arg SOURCE_CHANNEL: +required +takes_value
"The channel from which all packages will be selected for demotion")
(@arg TARGET_CHANNEL: +required +takes_value
"The channel selected packages will be removed from")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand create =>
(about: "Creates a new channel")
(aliases: &["c", "cr", "cre", "crea", "creat"])
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg CHANNEL: +required + takes_value "The channel name")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"Sets the origin to which the channel will belong. Default is from \
'HAB_ORIGIN' or cli.toml")
)
(@subcommand destroy =>
(about: "Destroys a channel")
(aliases: &["d", "de", "des", "dest", "destr", "destro"])
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg CHANNEL: +required + takes_value "The channel name")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"Sets the origin to which the channel belongs. Default is from 'HAB_ORIGIN'\
or cli.toml")
)
(@subcommand list =>
(about: "Lists origin channels")
(aliases: &["l", "li", "lis"])
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg ORIGIN: +takes_value {valid_origin}
"The origin for which channels will be listed. Default is from 'HAB_ORIGIN'\
or cli.toml")
)
)
)
(@subcommand origin =>
(about: "Commands relating to Habitat Builder origins")
(aliases: &["o", "or", "ori", "orig", "origi"])
(@setting ArgRequiredElseHelp)
(@subcommand create =>
(about: "Creates a new Builder origin")
(aliases: &["cre", "crea"])
(@arg ORIGIN: +required {valid_origin} "The origin to be created")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the `HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand delete =>
(about: "Removes an unused/empty origin")
(aliases: &["del", "dele"])
(@arg ORIGIN: +required {valid_origin} "The origin name")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the `HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand transfer =>
(about: "Transfers ownership of an origin to another member of that origin")
(@arg ORIGIN: +required {valid_origin} "The origin name")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the `HAB_BLDR_URL` environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg NEW_OWNER_ACCOUNT: +required +takes_value {non_empty} "The account name of the new origin owner")
)
(@subcommand key =>
(about: "Commands relating to Habitat origin key maintenance")
(aliases: &["k", "ke"])
(@setting ArgRequiredElseHelp)
(@subcommand download =>
(about: "Download origin key(s)")
(aliases: &["d", "do", "dow", "down", "downl", "downlo", "downloa"])
(arg: arg_cache_key_path("Path to download origin keys to. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
(@arg ORIGIN: +required {valid_origin} "The origin name" )
(@arg REVISION: "The origin key revision")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg WITH_SECRET: -s --secret
"Download origin private key instead of origin public key")
(@arg WITH_ENCRYPTION: -e --encryption
"Download public encryption key instead of origin public key")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder \
(required for downloading origin private keys)")
)
(@subcommand export =>
(about: "Outputs the latest origin key contents to stdout")
(aliases: &["e", "ex", "exp", "expo", "expor"])
(@arg ORIGIN: +required +takes_value {valid_origin})
(@arg PAIR_TYPE: -t --type +takes_value {valid_pair_type}
"Export either the 'public' or 'secret' key. The 'secret' key is the origin private key")
(arg: arg_cache_key_path("Path to export origin keys from. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand generate =>
(about: "Generates a Habitat origin key pair")
(aliases: &["g", "ge", "gen", "gene", "gener", "genera", "generat"])
(@arg ORIGIN: {valid_origin} "The origin name")
(arg: arg_cache_key_path("Path to store generated keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand import =>
(about: "Reads a stdin stream containing a public or private origin key \
contents and writes the key to disk")
(aliases: &["i", "im", "imp", "impo", "impor"])
(arg: arg_cache_key_path("Path to import origin keys to. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand upload =>
(@group upload =>
(@attributes +required)
(@arg ORIGIN: {valid_origin} "The origin name")
(@arg PUBLIC_FILE: --pubfile +takes_value {file_exists}
"Path to a local public origin key file on disk")
)
(about: "Upload origin keys to Builder")
(aliases: &["u", "up", "upl", "uplo", "uploa"])
(arg: arg_cache_key_path("Path to upload origin keys from. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
(@arg WITH_SECRET: -s --secret conflicts_with[PUBLIC_FILE]
"Upload origin private key in addition to the public key")
(@arg SECRET_FILE: --secfile +takes_value {file_exists} conflicts_with[ORIGIN]
"Path to a local origin private key file on disk")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
)
(@subcommand secret =>
(about: "Commands related to secret management")
(@setting ArgRequiredElseHelp)
(@subcommand upload =>
(about: "Create and upload a secret for your origin.")
(@arg KEY_NAME: +required +takes_value
"The name of the variable key to be injected into the studio. \
Ex: KEY=\"some_value\"")
(@arg SECRET: +required +takes_value
"The contents of the variable to be injected into the studio.")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"The origin for which the secret will be uploaded. Default is from \
'HAB_ORIGIN' or cli.toml")
(arg: arg_cache_key_path("Path to public encryption key. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand delete =>
(about: "Delete a secret for your origin")
(@arg KEY_NAME: +required +takes_value
"The name of the variable key to be injected into the studio.")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"The origin for which the secret will be deleted. Default is from \
'HAB_ORIGIN' or cli.toml")
)
(@subcommand list =>
(about: "List all secrets for your origin")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"The origin for which secrets will be listed. Default is from 'HAB_ORIGIN' \
or cli.toml")
)
)
)
(@subcommand pkg =>
(about: "Commands relating to Habitat packages")
(aliases: &["p", "pk", "package"])
(@setting ArgRequiredElseHelp)
(@subcommand binds =>
(about: "Displays the binds for a service")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-statis/1.42.2)")
)
(@subcommand binlink =>
(about: "Creates a binlink for a package binary in a common 'PATH' location")
(aliases: &["bi", "bin", "binl", "binli", "binlin"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
(@arg BINARY: +takes_value
"The command to binlink (ex: bash)")
(@arg DEST_DIR: -d --dest +takes_value {non_empty} env(BINLINK_DIR_ENVVAR) default_value(DEFAULT_BINLINK_DIR)
"Sets the destination directory")
(@arg FORCE: -f --force "Overwrite existing binlinks")
)
(subcommand: sub_pkg_build())
(@subcommand config =>
(about: "Displays the default configuration options for a service")
(aliases: &["conf", "cfg"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
)
(subcommand: sub_pkg_download())
(@subcommand env =>
(about: "Prints the runtime environment of a specific installed package")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
)
(@subcommand exec =>
(about: "Executes a command using the 'PATH' context of an installed package")
(aliases: &["exe"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
(@arg CMD: +required +takes_value
"The command to execute (ex: ls)")
(@arg ARGS: +takes_value +multiple
"Arguments to the command (ex: -l /tmp)")
)
(@subcommand export =>
(about: "Exports the package to the specified format")
(aliases: &["exp"])
(@arg FORMAT: +required +takes_value
"The export format (ex: aci, cf, docker, kubernetes, mesos, or tar)")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2) or \
filepath to a Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg CHANNEL: --channel -c +takes_value default_value[stable] env(ChannelIdent::ENVVAR)
"Retrieve the container's package from the specified release channel")
)
(@subcommand hash =>
(about: "Generates a blake2b hashsum from a target at any given filepath")
(aliases: &["ha", "has"])
(@arg SOURCE: +takes_value {file_exists} "A filepath of the target")
)
(subcommand: sub_pkg_install(feature_flags).aliases(
&["i", "in", "ins", "inst", "insta", "instal"]))
(@subcommand path =>
(about: "Prints the path to a specific installed release of a package")
(aliases: &["p", "pa", "pat"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
)
(@subcommand list =>
(about: "List all versions of installed packages")
(aliases: &["li"])
(@group prefix =>
(@attributes +required)
(@arg ALL: -a --all
"List all installed packages")
(@arg ORIGIN: -o --origin +takes_value {valid_origin}
"An origin to list")
(@arg PKG_IDENT: +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2).")
)
)
(@subcommand provides =>
(about: "Search installed Habitat packages for a given file")
(@arg FILE: +required +takes_value
"File name to find")
(@arg FULL_RELEASES: -r
"Show fully qualified package names \
(ex: core/busybox-static/1.24.2/20160708162350)")
(@arg FULL_PATHS: -p "Show full path to file")
)
(@subcommand search =>
(about: "Search for a package in Builder")
(@arg SEARCH_TERM: +required +takes_value "Search term")
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg LIMIT: -l --limit +takes_value default_value("50") {valid_numeric::<usize>}
"Limit how many packages to retrieve")
)
(@subcommand sign =>
(about: "Signs an archive with an origin key, generating a Habitat Artifact")
(aliases: &["s", "si", "sig"])
(@arg ORIGIN: --origin +takes_value {valid_origin} "Origin key used to create signature")
(@arg SOURCE: +required {file_exists}
"A path to a source archive file \
(ex: /home/acme-redis-3.0.7-21120102031201.tar.xz)")
(@arg DEST: +required
"The destination path to the signed Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
(arg: arg_cache_key_path("Path to search for origin keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand uninstall =>
(about: "Safely uninstall a package and dependencies from the local filesystem")
(aliases: &["un", "unin"])
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2/21120102031201)")
(@arg DRYRUN: -d --dryrun "Just show what would be uninstalled, don't actually do it")
(@arg EXCLUDE: --exclude +takes_value +multiple {valid_ident}
"Identifier of one or more packages that should not be uninstalled. \
(ex: core/redis, core/busybox-static/1.42.2/21120102031201)")
(@arg NO_DEPS: --("no-deps") "Don't uninstall dependencies")
)
// alas no hyphens in subcommand names..
// https://github.com/clap-rs/clap/issues/1297
(@subcommand bulkupload =>
(about: "Bulk Uploads Habitat Artifacts to a Depot from a local directory.")
(aliases: &["bul", "bulk"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Depot \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg CHANNEL: --channel -c +takes_value
"Optional additional release channel to upload package to. \
Packages are always uploaded to `unstable`, regardless \
of the value of this option.")
(@arg FORCE: --force "Skip checking availability of package and \
force uploads, potentially overwriting a stored copy of a package.")
(@arg AUTO_BUILD: --("auto-build") "Enable auto-build for all packages in this upload. \
Only applicable to SaaS Builder.")
(@arg AUTO_CREATE_ORIGINS: --("auto-create-origins") "Skip the confirmation prompt and \
automatically create origins that do not exist in the target Builder.")
(@arg UPLOAD_DIRECTORY: +required {dir_exists}
"Directory Path from which artifacts will be uploaded.")
)
(@subcommand upload =>
(about: "Uploads a local Habitat Artifact to Builder")
(aliases: &["u", "up", "upl", "uplo", "uploa"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg CHANNEL: --channel -c +takes_value
"Optional additional release channel to upload package to. \
Packages are always uploaded to `unstable`, regardless \
of the value of this option.")
(@arg FORCE: --force "Skips checking availability of package and \
force uploads, potentially overwriting a stored copy of a package. \
(default: false)")
(@arg NO_BUILD: --("no-build") "Disable auto-build for all packages in this upload.")
(@arg HART_FILE: +required +multiple {file_exists}
"One or more filepaths to a Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
(arg: arg_cache_key_path("Path to search for public origin keys to upload. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand delete =>
(about: "Removes a package from Builder")
(aliases: &["del", "dele"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg PKG_IDENT: +required +takes_value {valid_fully_qualified_ident} "A fully qualified package identifier \
(ex: core/busybox-static/1.42.2/20170513215502)")
(arg: arg_target())
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand promote =>
(about: "Promote a package to a specified channel")
(aliases: &["pr", "pro", "promo", "promot"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg PKG_IDENT: +required +takes_value {valid_fully_qualified_ident} "A fully qualified package identifier \
(ex: core/busybox-static/1.42.2/20170513215502)")
(@arg CHANNEL: +required +takes_value "Promote to the specified release channel")
(arg: arg_target())
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand demote =>
(about: "Demote a package from a specified channel")
(aliases: &["de", "dem", "demo", "demot"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg PKG_IDENT: +required +takes_value {valid_fully_qualified_ident} "A fully qualified package identifier \
(ex: core/busybox-static/1.42.2/20170513215502)")
(@arg CHANNEL: +required +takes_value "Demote from the specified release channel")
(arg: arg_target())
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand channels =>
(about: "Find out what channels a package belongs to")
(aliases: &["ch", "cha", "chan", "chann", "channe", "channel"])
(@arg BLDR_URL: -u --url +takes_value {valid_url} "Specify an alternate Builder \
endpoint. If not specified, the value will be taken from the HAB_BLDR_URL \
environment variable if defined. (default: https://bldr.habitat.sh)")
(@arg PKG_IDENT: +required +takes_value {valid_fully_qualified_ident} "A fully qualified package identifier \
(ex: core/busybox-static/1.42.2/20170513215502)")
(arg: arg_target())
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
)
(@subcommand verify =>
(about: "Verifies a Habitat Artifact with an origin key")
(aliases: &["v", "ve", "ver", "veri", "verif"])
(@arg SOURCE: +required {file_exists} "A path to a Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
(arg: arg_cache_key_path("Path to search for public origin keys for verification. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand header =>
(about: "Returns the Habitat Artifact header")
(aliases: &["hea", "head", "heade", "header"])
(@setting Hidden)
(@arg SOURCE: +required {file_exists} "A path to a Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
)
(@subcommand info =>
(about: "Returns the Habitat Artifact information")
(aliases: &["inf", "info"])
(@arg TO_JSON: -j --json "Output will be rendered in json")
(@arg SOURCE: +required {file_exists} "A path to a Habitat Artifact \
(ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
)
(@subcommand dependencies =>
(about: "Returns the Habitat Artifact dependencies. By default it will return \
the direct dependencies of the package")
(aliases: &["dep", "deps"])
(@arg TRANSITIVE: -t --transitive "Show transitive dependencies")
(@arg REVERSE: -r --reverse "Show packages which are dependant on this one")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A package identifier (ex: core/redis, core/busybox-static/1.42.2)")
)
)
(@subcommand plan =>
(about: "Commands relating to plans and other app-specific configuration.")
(aliases: &["pl", "pla"])
(@setting ArgRequiredElseHelp)
(@subcommand init =>
(about: "Generates common package specific configuration files. Executing without \
argument will create a `habitat` directory in your current folder for the \
plan. If `PKG_NAME` is specified it will create a folder with that name. \
Environment variables (those starting with 'pkg_') that are set will be used \
in the generated plan")
(aliases: &["i", "in", "ini"])
(@arg PKG_NAME: +takes_value "Name for the new app")
(@arg ORIGIN: --origin -o +takes_value {valid_origin} "Origin for the new app")
(@arg MIN: --min -m "Create a minimal plan file")
(@arg SCAFFOLDING: --scaffolding -s +takes_value
"Specify explicit Scaffolding for your app (ex: node, ruby)")
)
(@subcommand render =>
(about: "Renders plan config files")
(aliases: &["r", "re", "ren", "rend", "rende"])
(@arg TEMPLATE_PATH: +required {file_exists} "Path to config to render")
(@arg DEFAULT_TOML: -d --("default-toml") +takes_value default_value("./default.toml") "Path to default.toml")
(@arg USER_TOML: -u --("user-toml") +takes_value "Path to user.toml, defaults to none")
(@arg MOCK_DATA: -m --("mock-data") +takes_value "Path to json file with mock data for template, defaults to none")
(@arg PRINT: -p --("print") "Prints config to STDOUT")
(@arg RENDER_DIR: -r --("render-dir") +takes_value default_value("./results") "Path to render templates")
(@arg NO_RENDER: -n --("no-render") "Don't write anything to disk, ignores --render-dir")
(@arg QUIET: -q --("no-verbose") --quiet
"Don't print any helper messages. When used with `--print` will only print config file")
)
)
(@subcommand ring =>
(about: "Commands relating to Habitat rings")
(aliases: &["r", "ri", "rin"])
(@setting ArgRequiredElseHelp)
(@subcommand key =>
(about: "Commands relating to Habitat ring keys")
(aliases: &["k", "ke"])
(@setting ArgRequiredElseHelp)
(@subcommand export =>
(about: "Outputs the latest ring key contents to stdout")
(aliases: &["e", "ex", "exp", "expo", "expor"])
(@arg RING: +required +takes_value "Ring key name")
(arg: arg_cache_key_path("Path to search for keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand import =>
(about: "Reads a stdin stream containing ring key contents and writes \
the key to disk")
(aliases: &["i", "im", "imp", "impo", "impor"])
(arg: arg_cache_key_path("Path to store imported keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
(@subcommand generate =>
(about: "Generates a Habitat ring key")
(aliases: &["g", "ge", "gen", "gene", "gener", "genera", "generat"])
(@arg RING: +required +takes_value "Ring key name")
(arg: arg_cache_key_path("Path to store generated keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
)
)
(subcommand: sup_commands(feature_flags))
(@subcommand svc =>
(about: "Commands relating to Habitat services")
(aliases: &["sv", "ser", "serv", "service"])
(@setting ArgRequiredElseHelp)
(@subcommand key =>
(about: "Commands relating to Habitat service keys")
(aliases: &["k", "ke"])
(@setting ArgRequiredElseHelp)
(@subcommand generate =>
(about: "Generates a Habitat service key")
(aliases: &["g", "ge", "gen", "gene", "gener", "genera", "generat"])
(@arg SERVICE_GROUP: +required +takes_value {valid_service_group}
"Target service group service.group[@organization] (ex: redis.default or foo.default@bazcorp)")
(@arg ORG: "The service organization")
(arg: arg_cache_key_path("Path to store generated keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
)
(subcommand: sub_svc_load().aliases(&["l", "lo", "loa"]))
(subcommand: sub_svc_start().aliases(&["star"]))
(subcommand: sub_svc_status().aliases(&["stat", "statu"]))
(subcommand: sub_svc_stop().aliases(&["sto"]))
(subcommand: sub_svc_unload().aliases(&["u", "un", "unl", "unlo", "unloa"]))
)
(@subcommand studio =>
(about: "Commands relating to Habitat Studios")
(aliases: &["stu", "stud", "studi"])
)
(@subcommand supportbundle =>
(about: "Create a tarball of Habitat Supervisor data to send to support")
(aliases: &["supp", "suppo", "suppor", "support-bundle"])
)
(@subcommand user =>
(about: "Commands relating to Habitat users")
(aliases: &["u", "us", "use"])
(@setting ArgRequiredElseHelp)
(@subcommand key =>
(about: "Commands relating to Habitat user keys")
(aliases: &["k", "ke"])
(@setting ArgRequiredElseHelp)
(@subcommand generate =>
(about: "Generates a Habitat user key")
(aliases: &["g", "ge", "gen", "gene", "gener", "genera", "generat"])
(@arg USER: +required +takes_value "Name of the user key")
(arg: arg_cache_key_path("Path to store generated keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
)
)
(subcommand: alias_apply)
(subcommand: alias_install)
(subcommand: alias_run())
(subcommand: alias_setup)
(subcommand: alias_start)
(subcommand: alias_stop)
(subcommand: alias_term())
(after_help: "\nALIASES:\
\n apply Alias for: 'config apply'\
\n install Alias for: 'pkg install'\
\n run Alias for: 'sup run'\
\n setup Alias for: 'cli setup'\
\n start Alias for: 'svc start'\
\n stop Alias for: 'svc stop'\
\n term Alias for: 'sup term'\
\n"
)
)
}
fn alias_run() -> App<'static, 'static> {
clap_app!(@subcommand run =>
(about: "Run the Habitat Supervisor")
(@setting Hidden)
)
}
fn alias_term() -> App<'static, 'static> {
clap_app!(@subcommand term =>
(about: "Gracefully terminate the Habitat Supervisor and all of its running services")
(@setting Hidden)
)
}
fn sub_cli_setup() -> App<'static, 'static> {
clap_app!(@subcommand setup =>
(about: "Sets up the CLI with reasonable defaults.")
(arg: arg_cache_key_path("Path to search for or create keys in. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
}
pub fn sup_commands(feature_flags: FeatureFlag) -> App<'static, 'static> {
// Define all of the `hab sup *` subcommands in one place.
// This removes the need to duplicate this in `hab-sup`.
// The 'sup' App name here is significant for the `hab` binary as it
// is inserted as a named subcommand. For the `hab-sup` binary, it is
// the top-level App name (not a named subcommand) and therefore is not
// significant since we override `usage` below.
clap_app!(("sup") =>
(about: "The Habitat Supervisor")
(version: super::VERSION)
(author: "\nAuthors: The Habitat Maintainers <humans@habitat.sh>\n")
// set custom usage string, otherwise the binary
// is displayed as the clap_app name, which may or may not be different.
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
(usage: "hab sup <SUBCOMMAND>")
(@setting VersionlessSubcommands)
(@setting SubcommandRequiredElseHelp)
(subcommand: sub_sup_bash().aliases(&["b", "ba", "bas"]))
(subcommand: sub_sup_depart().aliases(&["d", "de", "dep", "depa", "depart"]))
(subcommand: sub_sup_run(feature_flags).aliases(&["r", "ru"]))
(subcommand: sub_sup_secret().aliases(&["sec", "secr"]))
(subcommand: sub_sup_sh().aliases(&[]))
(subcommand: sub_svc_status().aliases(&["stat", "statu"]))
(subcommand: sub_sup_term().aliases(&["ter"]))
)
}
fn sub_cli_completers() -> App<'static, 'static> {
let sub = clap_app!(@subcommand completers =>
(about: "Creates command-line completers for your shell."));
let supported_shells = ["bash", "fish", "zsh", "powershell"];
// The clap_app! macro above is great but does not support the ability to specify a range of
// possible values. We wanted to fail here with an unsupported shell instead of pushing off a
// bad value to clap.
sub.arg(Arg::with_name("SHELL").help("The name of the shell you want to generate the \
command-completion. Supported Shells: bash, fish, zsh, \
powershell")
.short("s")
.long("shell")
.required(true)
.takes_value(true)
.possible_values(&supported_shells))
}
// We need a default_value so that the argument can be required and validated. We hide the
// default because it's a special value that will be internally mapped according to the
// user type. This is to allow us to apply consistent validation to the env var override.
fn arg_cache_key_path(help_text: &'static str) -> Arg<'static, 'static> {
Arg::with_name("CACHE_KEY_PATH").long("cache-key-path")
.required(true)
.validator(non_empty)
.env(CACHE_KEY_PATH_ENV_VAR)
.default_value(CACHE_KEY_PATH)
.hide_default_value(true)
.help(&help_text)
}
fn arg_target() -> Arg<'static, 'static> {
Arg::with_name("PKG_TARGET").takes_value(true)
.validator(valid_target)
.env(PACKAGE_TARGET_ENVVAR)
.help("A package target (ex: x86_64-windows) (default: system \
appropriate target")
}
fn sub_pkg_build() -> App<'static, 'static> {
let mut sub = clap_app!(@subcommand build =>
(about: "Builds a Plan using a Studio")
(@arg HAB_ORIGIN_KEYS: -k --keys +takes_value
"Installs secret origin keys (ex: \"unicorn\", \"acme,other,acme-ops\")")
(@arg HAB_STUDIO_ROOT: -r --root +takes_value
"Sets the Studio root (default: /hab/studios/<DIR_NAME>)")
(@arg SRC_PATH: -s --src +takes_value
"Sets the source path (default: $PWD)")
(@arg PLAN_CONTEXT: +required +takes_value
"A directory containing a plan file \
or a `habitat/` directory which contains the plan file")
(arg: arg_cache_key_path("Path to search for origin keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
);
// Only a truly native/local Studio can be reused--the Docker implementation will always be
// ephemeral
if studio::native_studio_support() {
sub = sub.arg(Arg::with_name("REUSE").help("Reuses a previous Studio for the build \
(default: clean up before building)")
.short("R")
.long("reuse"))
.arg(Arg::with_name("DOCKER").help("Uses a Dockerized Studio for the build")
.short("D")
.long("docker"));
}
sub
}
fn sub_pkg_download() -> App<'static, 'static> {
let sub = clap_app!(@subcommand download =>
(about: "Download Habitat artifacts (including dependencies and keys) from Builder")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg BLDR_URL: --url -u +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined.")
(@arg CHANNEL: --channel -c +takes_value default_value[stable] env(ChannelIdent::ENVVAR)
"Download from the specified release channel. Overridden if channel is specified in toml file.")
(@arg DOWNLOAD_DIRECTORY: --("download-directory") +takes_value "The path to store downloaded artifacts")
(@arg PKG_IDENT_FILE: --file +takes_value +multiple {valid_ident_or_toml_file}
"File with newline separated package identifiers, or TOML file (ending with .toml extension)")
(@arg PKG_IDENT: +multiple {valid_ident}
"One or more Habitat package identifiers (ex: acme/redis)")
(@arg PKG_TARGET: --target -t +takes_value {valid_target}
"Target architecture to fetch. E.g. x86_64-linux. Overridden if architecture is specified in toml file.")
(@arg VERIFY: --verify
"Verify package integrity after download (Warning: this can be slow)")
);
sub
}
fn sub_pkg_install(feature_flags: FeatureFlag) -> App<'static, 'static> {
let mut sub = clap_app!(@subcommand install =>
(about: "Installs a Habitat package from Builder or locally from a Habitat Artifact")
(@arg BLDR_URL: --url -u +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg CHANNEL: --channel -c +takes_value default_value[stable] env(ChannelIdent::ENVVAR)
"Install from the specified release channel")
(@arg PKG_IDENT_OR_ARTIFACT: +required +multiple
"One or more Habitat package identifiers (ex: acme/redis) and/or filepaths \
to a Habitat Artifact (ex: /home/acme-redis-3.0.7-21120102031201-x86_64-linux.hart)")
(@arg BINLINK: -b --binlink
"Binlink all binaries from installed package(s) into BINLINK_DIR")
(@arg BINLINK_DIR: --("binlink-dir") +takes_value {non_empty} env(BINLINK_DIR_ENVVAR)
default_value(DEFAULT_BINLINK_DIR) "Binlink all binaries from installed package(s) into BINLINK_DIR")
(@arg FORCE: -f --force "Overwrite existing binlinks")
(@arg AUTH_TOKEN: -z --auth +takes_value "Authentication token for Builder")
(@arg IGNORE_INSTALL_HOOK: --("ignore-install-hook") "Do not run any install hooks")
);
if feature_flags.contains(FeatureFlag::OFFLINE_INSTALL) {
sub = sub.arg(Arg::with_name("OFFLINE").help("Install packages in offline mode")
.long("offline"));
};
if feature_flags.contains(FeatureFlag::IGNORE_LOCAL) {
sub = sub.arg(Arg::with_name("IGNORE_LOCAL").help("Do not use locally-installed \
packages when a corresponding \
package cannot be installed from \
Builder")
.long("ignore-local"));
};
sub
}
fn sub_config_apply() -> App<'static, 'static> {
clap_app!(@subcommand apply =>
(about: "Sets a configuration to be shared by members of a Service Group")
(@arg SERVICE_GROUP: +required {valid_service_group}
"Target service group service.group[@organization] (ex: redis.default or foo.default@bazcorp)")
(@arg VERSION_NUMBER: +required
"A version number (positive integer) for this configuration (ex: 42)")
(@arg FILE: {file_exists_or_stdin}
"Path to local file on disk (ex: /tmp/config.toml, default: <stdin>)")
(@arg USER: -u --user +takes_value "Name of a user key to use for encryption")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
(arg: arg_cache_key_path("Path to search for encryption keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
)
}
// the following sup related functions are
// public due to their utilization in `hab-sup`
// for consistency, all supervisor related clap subcommands are defined in this module
pub fn sub_sup_depart() -> App<'static, 'static> {
clap_app!(@subcommand depart =>
(about: "Depart a Supervisor from the gossip ring; kicking and banning the target \
from joining again with the same member-id")
(@arg MEMBER_ID: +required +takes_value "The member-id of the Supervisor to depart")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
)
}
pub fn sub_sup_secret() -> App<'static, 'static> {
clap_app!(@subcommand secret =>
(about: "Commands relating to a Habitat Supervisor's Control Gateway secret")
(@setting ArgRequiredElseHelp)
(@subcommand generate =>
(about: "Generate a secret key to use as a Supervisor's Control Gateway secret")
)
)
}
pub fn sub_sup_bash() -> App<'static, 'static> {
clap_app!(@subcommand bash =>
(about: "Start an interactive Bash-like shell")
// set custom usage string, otherwise the binary
// is displayed confusingly as `hab-sup`
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
(usage: "hab sup bash")
)
}
pub fn sub_sup_run(_feature_flags: FeatureFlag) -> App<'static, 'static> {
let sub = clap_app!(@subcommand run =>
(about: "Run the Habitat Supervisor")
// set custom usage string, otherwise the binary
// is displayed confusingly as `hab-sup`
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
(usage: "hab sup run [FLAGS] [OPTIONS] [--] [PKG_IDENT_OR_ARTIFACT]")
(@arg LISTEN_GOSSIP: --("listen-gossip") env(GossipListenAddr::ENVVAR) default_value(GossipListenAddr::default_as_str()) {valid_socket_addr}
"The listen address for the Gossip System Gateway.")
(@arg LOCAL_GOSSIP_MODE: --("local-gossip-mode") conflicts_with("LISTEN_GOSSIP") conflicts_with("PEER") conflicts_with("PEER_WATCH_FILE")
"Start the supervisor in local mode.")
(@arg LISTEN_HTTP: --("listen-http") env(HttpListenAddr::ENVVAR) default_value(HttpListenAddr::default_as_str()) {valid_socket_addr}
"The listen address for the HTTP Gateway.")
(@arg HTTP_DISABLE: --("http-disable") -D
"Disable the HTTP Gateway completely [default: false]")
(@arg LISTEN_CTL: --("listen-ctl") env(ListenCtlAddr::ENVVAR) default_value(ListenCtlAddr::default_as_str()) {valid_socket_addr}
"The listen address for the Control Gateway. If not specified, the value will \
be taken from the HAB_LISTEN_CTL environment variable if defined. [default: 127.0.0.1:9632]")
(@arg ORGANIZATION: --org +takes_value
"The organization that the Supervisor and its subsequent services are part of.")
(@arg PEER: --peer +takes_value +multiple
"The listen address of one or more initial peers (IP[:PORT])")
(@arg PERMANENT_PEER: --("permanent-peer") -I "If this Supervisor is a permanent peer")
(@arg PEER_WATCH_FILE: --("peer-watch-file") +takes_value conflicts_with("PEER")
"Watch this file for connecting to the ring"
)
(arg: arg_cache_key_path("Path to search for encryption keys. \
Default value is hab/cache/keys if root and .hab/cache/keys under the home \
directory otherwise."))
(@arg RING: --ring -r env(RING_ENVVAR) conflicts_with("RING_KEY") {non_empty}
"The name of the ring used by the Supervisor when running with wire encryption. \
(ex: hab sup run --ring myring)")
(@arg RING_KEY: --("ring-key") env(RING_KEY_ENVVAR) conflicts_with("RING") +hidden {non_empty}
"The contents of the ring key when running with wire encryption. \
(Note: This option is explicitly undocumented and for testing purposes only. Do not use it in a production system. Use the corresponding environment variable instead.)
(ex: hab sup run --ring-key 'SYM-SEC-1 \
foo-20181113185935 \
GCrBOW6CCN75LMl0j2V5QqQ6nNzWm6and9hkKBSUFPI=')")
(@arg CHANNEL: --channel +takes_value default_value[stable]
"Receive Supervisor updates from the specified release channel")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg CONFIG_DIR: --("config-from") +takes_value {dir_exists}
"Use package config from this path, rather than the package itself")
(@arg AUTO_UPDATE: --("auto-update") -A "Enable automatic updates for the Supervisor \
itself")
(@arg KEY_FILE: --key +takes_value {file_exists} requires[CERT_FILE]
"Used for enabling TLS for the HTTP gateway. Read private key from KEY_FILE. \
This should be a RSA private key or PKCS8-encoded private key, in PEM format.")
(@arg CERT_FILE: --certs +takes_value {file_exists} requires[KEY_FILE]
"Used for enabling TLS for the HTTP gateway. Read server certificates from CERT_FILE. \
This should contain PEM-format certificates in the right order (the first certificate \
should certify KEY_FILE, the last should be a root CA).")
(@arg CA_CERT_FILE: --("ca-certs") +takes_value {file_exists} requires[CERT_FILE] requires[KEY_FILE]
"Used for enabling client-authentication with TLS for the HTTP gateway. Read CA certificate from CA_CERT_FILE. \
This should contain PEM-format certificate that can be used to validate client requests.")
// === Optional arguments to additionally load an initial service for the Supervisor
(@arg PKG_IDENT_OR_ARTIFACT: +takes_value "Load the given Habitat package as part of \
the Supervisor startup specified by a package identifier \
(ex: core/redis) or filepath to a Habitat Artifact \
(ex: /home/core-redis-3.0.7-21120102031201-x86_64-linux.hart).")
(@arg GROUP: --group +takes_value
"The service group; shared config and topology [default: default].")
(@arg TOPOLOGY: --topology -t +takes_value possible_value[standalone leader]
"Service topology; [default: none]")
(@arg STRATEGY: --strategy -s +takes_value {valid_update_strategy}
"The update strategy; [default: none] [values: none, at-once, rolling]")
(@arg BIND: --bind +takes_value +multiple
"One or more service groups to bind to a configuration")
(@arg BINDING_MODE: --("binding-mode") +takes_value {valid_binding_mode}
"Governs how the presence or absence of binds affects service startup. `strict` blocks \
startup until all binds are present. [default: strict] [values: relaxed, strict]")
(@arg VERBOSE: -v "Verbose output; shows file and line/column numbers")
(@arg NO_COLOR: --("no-color") "Turn ANSI color off")
(@arg JSON: --("json-logging") "Use structured JSON logging for the Supervisor. \
Implies NO_COLOR")
(@arg HEALTH_CHECK_INTERVAL: --("health-check-interval") -i +takes_value {valid_health_check_interval}
"The interval (seconds) on which to run health checks [default: 30]")
(@arg SYS_IP_ADDRESS: --("sys-ip-address") +takes_value {valid_ipv4_address}
"The IPv4 address to use as the `sys.ip` template variable. If this \
argument is not set, the supervisor tries to dynamically determine \
an IP address. If that fails, the supervisor defaults to using \
`127.0.0.1`.")
);
let sub = add_event_stream_options(sub);
add_shutdown_timeout_option(sub)
}
pub fn sub_sup_sh() -> App<'static, 'static> {
clap_app!(@subcommand sh =>
(about: "Start an interactive Bourne-like shell")
// set custom usage string, otherwise the binary
// is displayed confusingly as `hab-sup`
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
(usage: "hab sup sh")
)
}
pub fn sub_sup_term() -> App<'static, 'static> {
clap_app!(@subcommand term =>
(about: "Gracefully terminate the Habitat Supervisor and all of its running services")
// set custom usage string, otherwise the binary
// is displayed confusingly as `hab-sup`
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
(usage: "hab sup term [OPTIONS]")
)
}
fn sub_svc_start() -> App<'static, 'static> {
clap_app!(@subcommand start =>
(about: "Start a loaded, but stopped, Habitat service.")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A Habitat package identifier (ex: core/redis)")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
)
}
// `hab svc status` is the canonical location for this command, but we
// have historically used `hab sup status` as an alias.
pub fn sub_svc_status() -> App<'static, 'static> {
clap_app!(@subcommand status =>
(about: "Query the status of Habitat services.")
(@arg PKG_IDENT: +takes_value {valid_ident} "A Habitat package identifier (ex: core/redis)")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
)
}
pub fn parse_optional_arg<T: FromStr>(name: &str, m: &ArgMatches) -> Option<T>
where <T as std::str::FromStr>::Err: std::fmt::Debug
{
m.value_of(name).map(|s| s.parse().expect("Valid argument"))
}
fn sub_svc_stop() -> App<'static, 'static> {
let sub = clap_app!(@subcommand stop =>
(about: "Stop a running Habitat service.")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A Habitat package identifier (ex: core/redis)")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
);
add_shutdown_timeout_option(sub)
}
fn sub_svc_load() -> App<'static, 'static> {
let mut sub = clap_app!(@subcommand load =>
(about: "Load a service to be started and supervised by Habitat from a package \
identifier. If an installed package doesn't satisfy the given package \
identifier, a suitable package will be installed from Builder.")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A Habitat package identifier (ex: core/redis)")
(@arg CHANNEL: --channel +takes_value default_value[stable]
"Receive package updates from the specified release channel")
(@arg GROUP: --group +takes_value
"The service group; shared config and topology [default: default].")
(@arg BLDR_URL: -u --url +takes_value {valid_url}
"Specify an alternate Builder endpoint. If not specified, the value will \
be taken from the HAB_BLDR_URL environment variable if defined. (default: \
https://bldr.habitat.sh)")
(@arg TOPOLOGY: --topology -t +takes_value possible_value[standalone leader]
"Service topology; [default: none]")
(@arg STRATEGY: --strategy -s +takes_value {valid_update_strategy}
"The update strategy; [default: none] [values: none, at-once, rolling]")
(@arg BIND: --bind +takes_value +multiple
"One or more service groups to bind to a configuration")
(@arg BINDING_MODE: --("binding-mode") +takes_value {valid_binding_mode}
"Governs how the presence or absence of binds affects service startup. `strict` blocks \
startup until all binds are present. [default: strict] [values: relaxed, strict]")
(@arg FORCE: --force -f "Load or reload an already loaded service. If the service \
was previously loaded and running this operation will also restart the service")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
(@arg HEALTH_CHECK_INTERVAL: --("health-check-interval") -i +takes_value {valid_health_check_interval}
"The interval (seconds) on which to run health checks [default: 30]")
);
if cfg!(windows) {
sub = sub.arg(Arg::with_name("PASSWORD").long("password")
.takes_value(true)
.help("Password of the service user"));
}
add_shutdown_timeout_option(sub)
}
fn sub_svc_unload() -> App<'static, 'static> {
let sub = clap_app!(@subcommand unload =>
(about: "Unload a service loaded by the Habitat Supervisor. If the service is \
running it will additionally be stopped.")
(@arg PKG_IDENT: +required +takes_value {valid_ident}
"A Habitat package identifier (ex: core/redis)")
(@arg REMOTE_SUP: --("remote-sup") -r +takes_value
"Address to a remote Supervisor's Control Gateway [default: 127.0.0.1:9632]")
);
add_shutdown_timeout_option(sub)
}
fn add_event_stream_options(app: App<'static, 'static>) -> App<'static, 'static> {
// Create shorter alias so formating works correctly
type ConnectMethod = EventStreamConnectMethod;
app.arg(Arg::with_name("EVENT_STREAM_APPLICATION").help("The name of the application for \
event stream purposes. This \
will be attached to all events \
generated by this Supervisor.")
.long("event-stream-application")
.required(false)
.takes_value(true)
.validator(non_empty))
.arg(Arg::with_name("EVENT_STREAM_ENVIRONMENT").help("The name of the environment for \
event stream purposes. This \
will be attached to all events \
generated by this Supervisor.")
.long("event-stream-environment")
.required(false)
.takes_value(true)
.validator(non_empty))
.arg(Arg::with_name(ConnectMethod::ARG_NAME).help("How long in seconds to wait for an \
event stream connection before exiting \
the supervisor. Set to '0' to \
immediately start the supervisor and \
continue running regardless of the \
event stream status.")
.long("event-stream-connect-timeout")
.required(false)
.takes_value(true)
.env(ConnectMethod::ENVVAR)
.default_value("0")
.validator(valid_numeric::<u64>))
.arg(Arg::with_name("EVENT_STREAM_URL").help("The event stream connection string \
(host:port) used by this Supervisor to send \
events to a messaging server.")
.long("event-stream-url")
.required(false)
.requires_all(&[
"EVENT_STREAM_APPLICATION",
"EVENT_STREAM_ENVIRONMENT",
AutomateAuthToken::ARG_NAME
])
.takes_value(true)
.validator(nats_address))
.arg(Arg::with_name("EVENT_STREAM_SITE").help("The name of the site where this Supervisor \
is running. It is used for event stream \
purposes.")
.long("event-stream-site")
.required(false)
.takes_value(true)
.validator(non_empty))
.arg(Arg::with_name(AutomateAuthToken::ARG_NAME).help("An authentication token for \
streaming events to an messaging \
server.")
.long("event-stream-token")
.required(false)
.takes_value(true)
.validator(AutomateAuthToken::validate)
.env(AutomateAuthToken::ENVVAR))
.arg(Arg::with_name(EventStreamMetadata::ARG_NAME).help("An arbitrary key-value pair to \
add to each event generated by \
this Supervisor")
.long("event-meta")
.takes_value(true)
.multiple(true)
.validator(EventStreamMetadata::validate))
.arg(Arg::with_name("EVENT_STREAM_SERVER_CERTIFICATE").help("The path to the event stream \
server's certificate in PEM \
format used to establish a \
TLS connection")
.long("event-stream-server-certificate")
.required(false)
.takes_value(true)
.validator(EventStreamServerCertificate::validate))
}
// CLAP Validation Functions
////////////////////////////////////////////////////////////////////////
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_binding_mode(val: String) -> result::Result<(), String> {
match habitat_sup_protocol::types::BindingMode::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => Err(format!("Binding mode: '{}' is not valid", &val)),
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_pair_type(val: String) -> result::Result<(), String> {
match PairType::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => {
Err(format!("PAIR_TYPE: {} is invalid, must be one of \
(public, secret)",
&val))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_service_group(val: String) -> result::Result<(), String> {
ServiceGroup::validate(&val).map_err(|e| e.to_string())
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn dir_exists(val: String) -> result::Result<(), String> {
if Path::new(&val).is_dir() {
Ok(())
} else {
Err(format!("Directory: '{}' cannot be found", &val))
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn file_exists(val: String) -> result::Result<(), String> {
if Path::new(&val).is_file() {
Ok(())
} else {
Err(format!("File: '{}' cannot be found", &val))
}
}
fn file_exists_or_stdin(val: String) -> result::Result<(), String> {
if val == "-" {
Ok(())
} else {
file_exists(val)
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_ipv4_address(val: String) -> result::Result<(), String> {
match Ipv4Addr::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => {
Err(format!("'{}' is not a valid IPv4 address, eg: \
'192.168.1.105'",
val))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_socket_addr(val: String) -> result::Result<(), String> {
match SocketAddr::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => {
Err("Socket address should include both IP and port, eg: '0.0.0.0:9700'".to_string())
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_url(val: String) -> result::Result<(), String> {
match Url::parse(&val) {
Ok(_) => Ok(()),
Err(_) => Err(format!("URL: '{}' is not valid", &val)),
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_numeric<T: FromStr>(val: String) -> result::Result<(), String> {
match val.parse::<T>() {
Ok(_) => Ok(()),
Err(_) => Err(format!("'{}' is not a valid number", &val)),
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_health_check_interval(val: String) -> result::Result<(), String> {
match HealthCheckInterval::from_str(&val) {
Ok(_) => Ok(()),
Err(e) => {
Err(format!("'{}' is not a valid value for health check \
interval: {}",
val, e))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_update_strategy(val: String) -> result::Result<(), String> {
match habitat_sup_protocol::types::UpdateStrategy::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => Err(format!("Update strategy: '{}' is not valid", &val)),
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_ident(val: String) -> result::Result<(), String> {
match PackageIdent::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => {
Err(format!("'{}' is not valid. Package identifiers have the \
form origin/name[/version[/release]]",
&val))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_ident_or_toml_file(val: String) -> result::Result<(), String> {
if is_toml_file(&val) {
// We could do some more validation (parse the whole toml file and check it) but that seems
// excessive.
Ok(())
} else {
valid_ident_file(val)
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_ident_file(val: String) -> result::Result<(), String> {
file_into_idents(&val).map(|_| ())
.map_err(|e| e.to_string())
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_target(val: String) -> result::Result<(), String> {
match PackageTarget::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => {
let targets: Vec<_> = PackageTarget::targets().map(std::convert::AsRef::as_ref)
.collect();
Err(format!("'{}' is not valid. Valid targets are in the form \
architecture-platform (currently Habitat allows \
the following: {})",
&val,
targets.join(", ")))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_fully_qualified_ident(val: String) -> result::Result<(), String> {
match PackageIdent::from_str(&val) {
Ok(ref ident) if ident.fully_qualified() => Ok(()),
_ => {
Err(format!("'{}' is not valid. Fully qualified package \
identifiers have the form \
origin/name/version/release",
&val))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_origin(val: String) -> result::Result<(), String> {
if ident::is_valid_origin_name(&val) {
Ok(())
} else {
Err(format!("'{}' is not valid. A valid origin contains a-z, \
0-9, and _ or - after the first character",
&val))
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn valid_shutdown_timeout(val: String) -> result::Result<(), String> {
match ShutdownTimeout::from_str(&val) {
Ok(_) => Ok(()),
Err(e) => {
Err(format!("'{}' is not a valid value for shutdown timeout: \
{}",
val, e))
}
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn nats_address(val: String) -> result::Result<(), String> {
match NatsAddress::from_str(&val) {
Ok(_) => Ok(()),
Err(_) => Err(format!("'{}' is not a valid event stream address", val)),
}
}
#[allow(clippy::needless_pass_by_value)] // Signature required by CLAP
fn non_empty(val: String) -> result::Result<(), String> {
if val.is_empty() {
Err("must not be empty (check env overrides)".to_string())
} else {
Ok(())
}
}
/// Adds extra configuration option for shutting down a service with a customized timeout.
fn add_shutdown_timeout_option(app: App<'static, 'static>) -> App<'static, 'static> {
app.arg(Arg::with_name("SHUTDOWN_TIMEOUT").help("The number of seconds after sending a \
shutdown signal to wait before killing a \
service process (default: set in plan)")
.long("shutdown-timeout")
.validator(valid_shutdown_timeout)
.takes_value(true))
}
////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
fn no_feature_flags() -> FeatureFlag { FeatureFlag::empty() }
use super::*;
mod sup_commands {
use super::*;
use clap::ErrorKind;
#[test]
fn sup_subcommand_short_help() {
let r = get(no_feature_flags()).get_matches_from_safe(vec!["hab", "sup", "-h"]);
assert!(r.is_err());
// not `ErrorKind::InvalidSubcommand`
assert_eq!(r.unwrap_err().kind, ErrorKind::HelpDisplayed);
}
#[test]
fn sup_subcommand_run_with_peer() {
let r = get(no_feature_flags()).get_matches_from_safe(vec!["hab", "sup", "run",
"--peer", "1.1.1.1"]);
assert!(r.is_ok());
let matches = r.expect("Error while getting matches");
// validate `sup` subcommand
assert_eq!(matches.subcommand_name(), Some("sup"));
let (_, sup_matches) = matches.subcommand();
let sup_matches = sup_matches.expect("Error while getting sup matches");
assert_eq!(sup_matches.subcommand_name(), Some("run"));
let (_, run_matches) = sup_matches.subcommand();
let run_matches = run_matches.expect("Error while getting run matches");
assert_eq!(run_matches.value_of("PEER"), Some("1.1.1.1"));
}
}
mod event_stream_feature {
use super::*;
#[test]
fn app_and_env_and_token_options_required_if_url_option() {
let matches =
sub_sup_run(no_feature_flags()).get_matches_from_safe(vec!["run",
"--event-stream-url",
"127.0.0.1:4222",]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::MissingRequiredArgument);
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::MissingRequiredArgument);
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::MissingRequiredArgument);
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_ok());
}
#[test]
fn app_option_must_take_a_value() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::EmptyValue);
assert_eq!(error.info,
Some(vec!["EVENT_STREAM_APPLICATION".to_string()]));
}
#[test]
fn app_option_cannot_be_empty() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn env_option_must_take_a_value() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::EmptyValue);
assert_eq!(error.info,
Some(vec!["EVENT_STREAM_ENVIRONMENT".to_string()]));
}
#[test]
fn env_option_cannot_be_empty() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn event_meta_can_be_repeated() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-meta",
"foo=bar",
"--event-meta",
"blah=boo",
"--event-meta",
"monkey=pants",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_ok());
let matches = matches.unwrap();
let meta = matches.values_of(EventStreamMetadata::ARG_NAME)
.expect("didn't have metadata")
.collect::<Vec<_>>();
assert_eq!(meta, ["foo=bar", "blah=boo", "monkey=pants"]);
}
#[test]
fn event_meta_cannot_be_empty() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-meta",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
assert_eq!(matches.unwrap_err().kind, clap::ErrorKind::EmptyValue);
}
#[test]
fn event_meta_must_have_an_equal_sign() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-meta",
"foobar",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
assert_eq!(matches.unwrap_err().kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn event_meta_key_cannot_be_empty() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-meta",
"=bar",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
assert_eq!(matches.unwrap_err().kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn event_meta_value_cannot_be_empty() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-meta",
"foo=",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
assert_eq!(matches.unwrap_err().kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn token_option_must_take_a_value() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-url",
"127.0.0.1:4222",
"--event-stream-token",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::EmptyValue);
assert_eq!(error.info,
Some(vec![AutomateAuthToken::ARG_NAME.to_string()]));
}
#[test]
fn token_option_cannot_be_empty() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"",
"--event-stream-url",
"127.0.0.1:4222",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn site_option_must_take_a_value() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
"--event-stream-site",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::EmptyValue);
assert_eq!(error.info, Some(vec!["EVENT_STREAM_SITE".to_string()]));
}
#[test]
fn site_option_cannot_be_empty() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"127.0.0.1:4222",
"--event-stream-site",
"",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::ValueValidation);
}
#[test]
fn url_option_must_take_a_value() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::EmptyValue);
assert_eq!(error.info, Some(vec!["EVENT_STREAM_URL".to_string()]));
}
#[test]
fn url_option_cannot_be_empty() {
let matches = sub_sup_run(no_feature_flags()).get_matches_from_safe(vec![
"run",
"--event-stream-application",
"MY_APP",
"--event-stream-environment",
"MY_ENV",
"--event-stream-token",
"MY_TOKEN",
"--event-stream-url",
"",
]);
assert!(matches.is_err());
let error = matches.unwrap_err();
assert_eq!(error.kind, clap::ErrorKind::ValueValidation);
}
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The `Constellation`, Servo's Grand Central Station
//!
//! The constellation tracks all information kept globally by the
//! browser engine, which includes:
//!
//! * The set of all `EventLoop` objects. Each event loop is
//! the constellation's view of a script thread. The constellation
//! interacts with a script thread by message-passing.
//!
//! * The set of all `Pipeline` objects. Each pipeline gives the
//! constellation's view of a `Window`, with its script thread and
//! layout threads. Pipelines may share script threads, but not
//! layout threads.
//!
//! * The set of all `Frame` objects. Each frame gives the constellation's
//! view of a browsing context. Each browsing context stores an independent
//! session history, created by navigation of that frame. The session
//! history can be traversed, for example by the back and forwards UI,
//! so each session history maintains a list of past and future pipelines,
//! as well as the current active pipeline.
//!
//! There are two kinds of frames: top-level frames (for example tabs
//! in a browser UI), and nested frames (typically caused by `iframe`
//! elements). Frames have a hierarchy (typically caused by `iframe`s
//! containing `iframe`s), giving rise to a frame tree with a root frame.
//! The logical relationship between these types is:
//!
//! ```
//! +---------+ +------------+ +-------------+
//! | Frame | --parent?--> | Pipeline | --event_loop--> | EventLoop |
//! | | --current--> | | | |
//! | | --prev*----> | | <---pipeline*-- | |
//! | | --next*----> | | +-------------+
//! | | | |
//! | | <----frame-- | |
//! +---------+ +------------+
//! ```
//
//! Complicating matters, there are also mozbrowser iframes, which are top-level
//! frames with a parent.
//!
//! The constellation also maintains channels to threads, including:
//!
//! * The script and layout threads.
//! * The graphics compositor.
//! * The font cache, image cache, and resource manager, which load
//! and cache shared fonts, images, or other resources.
//! * The service worker manager.
//! * The devtools, debugger and webdriver servers.
//!
//! The constellation passes messages between the threads, and updates its state
//! to track the evolving state of the frame tree.
//!
//! The constellation acts as a logger, tracking any `warn!` messages from threads,
//! and converting any `error!` or `panic!` into a crash report, which is filed
//! using an appropriate `mozbrowsererror` event.
//!
//! Since there is only one constellation, and its responsibilities include crash reporting,
//! it is very important that it does not panic.
use backtrace::Backtrace;
use bluetooth_traits::BluetoothRequest;
use canvas::canvas_paint_thread::CanvasPaintThread;
use canvas::webgl_paint_thread::WebGLPaintThread;
use canvas_traits::CanvasMsg;
use compositing::SendableFrameTree;
use compositing::compositor_thread::CompositorProxy;
use compositing::compositor_thread::Msg as ToCompositorMsg;
use debugger;
use devtools_traits::{ChromeToDevtoolsControlMsg, DevtoolsControlMsg};
use euclid::scale_factor::ScaleFactor;
use euclid::size::{Size2D, TypedSize2D};
use event_loop::EventLoop;
use frame::{Frame, FrameChange, FrameTreeIterator, FullFrameTreeIterator};
use gfx::font_cache_thread::FontCacheThread;
use gfx_traits::Epoch;
use ipc_channel::ipc::{self, IpcSender};
use ipc_channel::router::ROUTER;
use layout_traits::LayoutThreadFactory;
use log::{Log, LogLevel, LogLevelFilter, LogMetadata, LogRecord};
use msg::constellation_msg::{FrameId, FrameType, PipelineId};
use msg::constellation_msg::{Key, KeyModifiers, KeyState};
use msg::constellation_msg::{PipelineNamespace, PipelineNamespaceId, TraversalDirection};
use net_traits::{self, IpcSend, ResourceThreads};
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::pub_domains::reg_host;
use net_traits::storage_thread::{StorageThreadMsg, StorageType};
use offscreen_gl_context::{GLContextAttributes, GLLimits};
use pipeline::{InitialPipelineState, Pipeline};
use profile_traits::mem;
use profile_traits::time;
use rand::{Rng, SeedableRng, StdRng, random};
use script_traits::{AnimationState, AnimationTickType, CompositorEvent};
use script_traits::{ConstellationControlMsg, ConstellationMsg as FromCompositorMsg};
use script_traits::{DocumentState, LayoutControlMsg, LoadData};
use script_traits::{IFrameLoadInfo, IFrameLoadInfoWithData, IFrameSandboxState, TimerEventRequest};
use script_traits::{LayoutMsg as FromLayoutMsg, ScriptMsg as FromScriptMsg, ScriptThreadFactory};
use script_traits::{LogEntry, ServiceWorkerMsg, webdriver_msg};
use script_traits::{MozBrowserErrorType, MozBrowserEvent, WebDriverCommandMsg, WindowSizeData};
use script_traits::{SWManagerMsg, ScopeThings, WindowSizeType};
use servo_config::opts;
use servo_config::prefs::PREFS;
use servo_remutex::ReentrantMutex;
use servo_url::ServoUrl;
use std::borrow::ToOwned;
use std::collections::{HashMap, VecDeque};
use std::io::Error as IOError;
use std::iter::once;
use std::marker::PhantomData;
use std::process;
use std::rc::{Rc, Weak};
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender, channel};
use std::thread;
use std::time::Instant;
use style_traits::PagePx;
use style_traits::cursor::Cursor;
use style_traits::viewport::ViewportConstraints;
use timer_scheduler::TimerScheduler;
use webrender_traits;
/// The `Constellation` itself. In the servo browser, there is one
/// constellation, which maintains all of the browser global data.
/// In embedded applications, there may be more than one constellation,
/// which are independent of each other.
///
/// The constellation may be in a different process from the pipelines,
/// and communicates using IPC.
///
/// It is parameterized over a `LayoutThreadFactory` and a
/// `ScriptThreadFactory` (which in practice are implemented by
/// `LayoutThread` in the `layout` crate, and `ScriptThread` in
/// the `script` crate). Script and layout communicate using a `Message`
/// type.
pub struct Constellation<Message, LTF, STF> {
/// An IPC channel for script threads to send messages to the constellation.
/// This is the script threads' view of `script_receiver`.
script_sender: IpcSender<FromScriptMsg>,
/// A channel for the constellation to receive messages from script threads.
/// This is the constellation's view of `script_sender`.
script_receiver: Receiver<FromScriptMsg>,
/// An IPC channel for layout threads to send messages to the constellation.
/// This is the layout threads' view of `layout_receiver`.
layout_sender: IpcSender<FromLayoutMsg>,
/// A channel for the constellation to receive messages from layout threads.
/// This is the constellation's view of `layout_sender`.
layout_receiver: Receiver<FromLayoutMsg>,
/// A channel for the constellation to receive messages from the compositor thread.
compositor_receiver: Receiver<FromCompositorMsg>,
/// A channel (the implementation of which is port-specific) for the
/// constellation to send messages to the compositor thread.
compositor_proxy: Box<CompositorProxy>,
/// Channels for the constellation to send messages to the public
/// resource-related threads. There are two groups of resource
/// threads: one for public browsing, and one for private
/// browsing.
public_resource_threads: ResourceThreads,
/// Channels for the constellation to send messages to the private
/// resource-related threads. There are two groups of resource
/// threads: one for public browsing, and one for private
/// browsing.
private_resource_threads: ResourceThreads,
/// A channel for the constellation to send messages to the image
/// cache thread.
image_cache_thread: ImageCacheThread,
/// A channel for the constellation to send messages to the font
/// cache thread.
font_cache_thread: FontCacheThread,
/// A channel for the constellation to send messages to the
/// debugger thread.
debugger_chan: Option<debugger::Sender>,
/// A channel for the constellation to send messages to the
/// devtools thread.
devtools_chan: Option<Sender<DevtoolsControlMsg>>,
/// An IPC channel for the constellation to send messages to the
/// bluetooth thread.
bluetooth_thread: IpcSender<BluetoothRequest>,
/// An IPC channel for the constellation to send messages to the
/// Service Worker Manager thread.
swmanager_chan: Option<IpcSender<ServiceWorkerMsg>>,
/// An IPC channel for Service Worker Manager threads to send
/// messages to the constellation. This is the SW Manager thread's
/// view of `swmanager_receiver`.
swmanager_sender: IpcSender<SWManagerMsg>,
/// A channel for the constellation to receive messages from the
/// Service Worker Manager thread. This is the constellation's view of
/// `swmanager_sender`.
swmanager_receiver: Receiver<SWManagerMsg>,
/// A channel for the constellation to send messages to the
/// time profiler thread.
time_profiler_chan: time::ProfilerChan,
/// A channel for the constellation to send messages to the
/// memory profiler thread.
mem_profiler_chan: mem::ProfilerChan,
/// A channel for the constellation to send messages to the
/// timer thread.
scheduler_chan: IpcSender<TimerEventRequest>,
/// A channel for the constellation to send messages to the
/// Webrender thread.
webrender_api_sender: webrender_traits::RenderApiSender,
/// The set of all event loops in the browser. We generate a new
/// event loop for each registered domain name (aka eTLD+1) in
/// each top-level frame. We store the event loops in a map
/// indexed by top-level frame id (as a `FrameId`) and registered
/// domain name (as a `String`) to event loops. This double
/// indirection ensures that separate tabs do not share event
/// loops, even if the same domain is loaded in each.
/// It is important that scripts with the same eTLD+1
/// share an event loop, since they can use `document.domain`
/// to become same-origin, at which point they can share DOM objects.
event_loops: HashMap<FrameId, HashMap<String, Weak<EventLoop>>>,
/// The set of all the pipelines in the browser.
/// (See the `pipeline` module for more details.)
pipelines: HashMap<PipelineId, Pipeline>,
/// The set of all the frames in the browser.
frames: HashMap<FrameId, Frame>,
/// When a navigation is performed, we do not immediately update
/// the frame tree, instead we ask the event loop to begin loading
/// the new document, and do not update the frame tree until the
/// document is active. Between starting the load and it activating,
/// we store a `FrameChange` object for the navigation in progress.
pending_frames: Vec<FrameChange>,
/// The root frame.
root_frame_id: FrameId,
/// The currently focused pipeline for key events.
focus_pipeline_id: Option<PipelineId>,
/// Pipeline IDs are namespaced in order to avoid name collisions,
/// and the namespaces are allocated by the constellation.
next_pipeline_namespace_id: PipelineNamespaceId,
/// The size of the top-level window.
window_size: WindowSizeData,
/// Bits of state used to interact with the webdriver implementation
webdriver: WebDriverData,
/// Document states for loaded pipelines (used only when writing screenshots).
document_states: HashMap<PipelineId, DocumentState>,
/// Are we shutting down?
shutting_down: bool,
/// Have we seen any warnings? Hopefully always empty!
/// The buffer contains `(thread_name, reason)` entries.
handled_warnings: VecDeque<(Option<String>, String)>,
/// The random number generator and probability for closing pipelines.
/// This is for testing the hardening of the constellation.
random_pipeline_closure: Option<(StdRng, f32)>,
/// Phantom data that keeps the Rust type system happy.
phantom: PhantomData<(Message, LTF, STF)>,
}
/// State needed to construct a constellation.
pub struct InitialConstellationState {
/// A channel through which messages can be sent to the compositor.
pub compositor_proxy: Box<CompositorProxy + Send>,
/// A channel to the debugger, if applicable.
pub debugger_chan: Option<debugger::Sender>,
/// A channel to the developer tools, if applicable.
pub devtools_chan: Option<Sender<DevtoolsControlMsg>>,
/// A channel to the bluetooth thread.
pub bluetooth_thread: IpcSender<BluetoothRequest>,
/// A channel to the image cache thread.
pub image_cache_thread: ImageCacheThread,
/// A channel to the font cache thread.
pub font_cache_thread: FontCacheThread,
/// A channel to the resource thread.
pub public_resource_threads: ResourceThreads,
/// A channel to the resource thread.
pub private_resource_threads: ResourceThreads,
/// A channel to the time profiler thread.
pub time_profiler_chan: time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// Webrender API.
pub webrender_api_sender: webrender_traits::RenderApiSender,
/// Whether the constellation supports the clipboard.
/// TODO: this field is not used, remove it?
pub supports_clipboard: bool,
}
/// Data needed for webdriver
struct WebDriverData {
load_channel: Option<(PipelineId, IpcSender<webdriver_msg::LoadStatus>)>,
resize_channel: Option<IpcSender<WindowSizeData>>,
}
impl WebDriverData {
fn new() -> WebDriverData {
WebDriverData {
load_channel: None,
resize_channel: None,
}
}
}
/// When we are running reftests, we save an image to compare against a reference.
/// This enum gives the possible states of preparing such an image.
#[derive(Debug, PartialEq)]
enum ReadyToSave {
NoRootFrame,
PendingFrames,
WebFontNotLoaded,
DocumentLoading,
EpochMismatch,
PipelineUnknown,
Ready,
}
/// When we are exiting a pipeline, we can either force exiting or not.
/// A normal exit waits for the compositor to update its state before
/// exiting, and delegates layout exit to script. A forced exit does
/// not notify the compositor, and exits layout without involving script.
#[derive(Clone, Copy)]
enum ExitPipelineMode {
Normal,
Force,
}
/// The constellation uses logging to perform crash reporting.
/// The constellation receives all `warn!`, `error!` and `panic!` messages,
/// and generates a crash report when it receives a panic.
/// A logger directed at the constellation from content processes
#[derive(Clone)]
pub struct FromScriptLogger {
/// A channel to the constellation
pub constellation_chan: Arc<ReentrantMutex<IpcSender<FromScriptMsg>>>,
}
impl FromScriptLogger {
/// Create a new constellation logger.
pub fn new(constellation_chan: IpcSender<FromScriptMsg>) -> FromScriptLogger {
FromScriptLogger {
constellation_chan: Arc::new(ReentrantMutex::new(constellation_chan))
}
}
/// The maximum log level the constellation logger is interested in.
pub fn filter(&self) -> LogLevelFilter {
LogLevelFilter::Warn
}
}
impl Log for FromScriptLogger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
metadata.level() <= LogLevel::Warn
}
fn log(&self, record: &LogRecord) {
if let Some(entry) = log_entry(record) {
debug!("Sending log entry {:?}.", entry);
let top_level_frame_id = FrameId::installed();
let thread_name = thread::current().name().map(ToOwned::to_owned);
let msg = FromScriptMsg::LogEntry(top_level_frame_id, thread_name, entry);
let chan = self.constellation_chan.lock().unwrap_or_else(|err| err.into_inner());
let _ = chan.send(msg);
}
}
}
/// A logger directed at the constellation from the compositor
#[derive(Clone)]
pub struct FromCompositorLogger {
/// A channel to the constellation
pub constellation_chan: Arc<ReentrantMutex<Sender<FromCompositorMsg>>>,
}
impl FromCompositorLogger {
/// Create a new constellation logger.
pub fn new(constellation_chan: Sender<FromCompositorMsg>) -> FromCompositorLogger {
FromCompositorLogger {
constellation_chan: Arc::new(ReentrantMutex::new(constellation_chan))
}
}
/// The maximum log level the constellation logger is interested in.
pub fn filter(&self) -> LogLevelFilter {
LogLevelFilter::Warn
}
}
impl Log for FromCompositorLogger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
metadata.level() <= LogLevel::Warn
}
fn log(&self, record: &LogRecord) {
if let Some(entry) = log_entry(record) {
debug!("Sending log entry {:?}.", entry);
let top_level_frame_id = FrameId::installed();
let thread_name = thread::current().name().map(ToOwned::to_owned);
let msg = FromCompositorMsg::LogEntry(top_level_frame_id, thread_name, entry);
let chan = self.constellation_chan.lock().unwrap_or_else(|err| err.into_inner());
let _ = chan.send(msg);
}
}
}
/// Rust uses `LogRecord` for storing logging, but servo converts that to
/// a `LogEntry`. We do this so that we can record panics as well as log
/// messages, and because `LogRecord` does not implement serde (de)serialization,
/// so cannot be used over an IPC channel.
fn log_entry(record: &LogRecord) -> Option<LogEntry> {
match record.level() {
LogLevel::Error if thread::panicking() => Some(LogEntry::Panic(
format!("{}", record.args()),
format!("{:?}", Backtrace::new())
)),
LogLevel::Error => Some(LogEntry::Error(
format!("{}", record.args())
)),
LogLevel::Warn => Some(LogEntry::Warn(
format!("{}", record.args())
)),
_ => None,
}
}
/// The number of warnings to include in each crash report.
const WARNINGS_BUFFER_SIZE: usize = 32;
impl<Message, LTF, STF> Constellation<Message, LTF, STF>
where LTF: LayoutThreadFactory<Message=Message>,
STF: ScriptThreadFactory<Message=Message>
{
/// Create a new constellation thread.
pub fn start(state: InitialConstellationState) -> (Sender<FromCompositorMsg>, IpcSender<SWManagerMsg>) {
let (compositor_sender, compositor_receiver) = channel();
// service worker manager to communicate with constellation
let (swmanager_sender, swmanager_receiver) = ipc::channel().expect("ipc channel failure");
let sw_mgr_clone = swmanager_sender.clone();
thread::Builder::new().name("Constellation".to_owned()).spawn(move || {
let (ipc_script_sender, ipc_script_receiver) = ipc::channel().expect("ipc channel failure");
let script_receiver = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_script_receiver);
let (ipc_layout_sender, ipc_layout_receiver) = ipc::channel().expect("ipc channel failure");
let layout_receiver = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_layout_receiver);
let swmanager_receiver = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(swmanager_receiver);
PipelineNamespace::install(PipelineNamespaceId(0));
let mut constellation: Constellation<Message, LTF, STF> = Constellation {
script_sender: ipc_script_sender,
layout_sender: ipc_layout_sender,
script_receiver: script_receiver,
compositor_receiver: compositor_receiver,
layout_receiver: layout_receiver,
compositor_proxy: state.compositor_proxy,
debugger_chan: state.debugger_chan,
devtools_chan: state.devtools_chan,
bluetooth_thread: state.bluetooth_thread,
public_resource_threads: state.public_resource_threads,
private_resource_threads: state.private_resource_threads,
image_cache_thread: state.image_cache_thread,
font_cache_thread: state.font_cache_thread,
swmanager_chan: None,
swmanager_receiver: swmanager_receiver,
swmanager_sender: sw_mgr_clone,
event_loops: HashMap::new(),
pipelines: HashMap::new(),
frames: HashMap::new(),
pending_frames: vec!(),
// We initialize the namespace at 1, since we reserved namespace 0 for the constellation
next_pipeline_namespace_id: PipelineNamespaceId(1),
root_frame_id: FrameId::new(),
focus_pipeline_id: None,
time_profiler_chan: state.time_profiler_chan,
mem_profiler_chan: state.mem_profiler_chan,
window_size: WindowSizeData {
visible_viewport: opts::get().initial_window_size.to_f32() *
ScaleFactor::new(1.0),
initial_viewport: opts::get().initial_window_size.to_f32() *
ScaleFactor::new(1.0),
device_pixel_ratio:
ScaleFactor::new(opts::get().device_pixels_per_px.unwrap_or(1.0)),
},
phantom: PhantomData,
webdriver: WebDriverData::new(),
scheduler_chan: TimerScheduler::start(),
document_states: HashMap::new(),
webrender_api_sender: state.webrender_api_sender,
shutting_down: false,
handled_warnings: VecDeque::new(),
random_pipeline_closure: opts::get().random_pipeline_closure_probability.map(|prob| {
let seed = opts::get().random_pipeline_closure_seed.unwrap_or_else(random);
let rng = StdRng::from_seed(&[seed]);
warn!("Randomly closing pipelines.");
info!("Using seed {} for random pipeline closure.", seed);
(rng, prob)
}),
};
constellation.run();
}).expect("Thread spawning failed");
(compositor_sender, swmanager_sender)
}
/// The main event loop for the constellation.
fn run(&mut self) {
while !self.shutting_down || !self.pipelines.is_empty() {
// Randomly close a pipeline if --random-pipeline-closure-probability is set
// This is for testing the hardening of the constellation.
self.maybe_close_random_pipeline();
self.handle_request();
}
self.handle_shutdown();
}
/// Generate a new pipeline id namespace.
fn next_pipeline_namespace_id(&mut self) -> PipelineNamespaceId {
let namespace_id = self.next_pipeline_namespace_id;
let PipelineNamespaceId(ref mut i) = self.next_pipeline_namespace_id;
*i += 1;
namespace_id
}
/// Helper function for creating a pipeline
fn new_pipeline(&mut self,
pipeline_id: PipelineId,
frame_id: FrameId,
parent_info: Option<(PipelineId, FrameType)>,
initial_window_size: Option<TypedSize2D<f32, PagePx>>,
load_data: LoadData,
sandbox: IFrameSandboxState,
is_private: bool) {
if self.shutting_down { return; }
// TODO: can we get a case where the child pipeline is created
// before the parent is part of the frame tree?
let top_level_frame_id = match parent_info {
Some((_, FrameType::MozBrowserIFrame)) => frame_id,
Some((parent_id, _)) => self.get_top_level_frame_for_pipeline(parent_id),
None => self.root_frame_id,
};
let (event_loop, host) = match sandbox {
IFrameSandboxState::IFrameSandboxed => (None, None),
IFrameSandboxState::IFrameUnsandboxed => match reg_host(&load_data.url) {
None => (None, None),
Some(host) => {
let event_loop = self.event_loops.get(&top_level_frame_id)
.and_then(|map| map.get(host))
.and_then(|weak| weak.upgrade());
match event_loop {
None => (None, Some(String::from(host))),
Some(event_loop) => (Some(event_loop.clone()), None),
}
},
},
};
let resource_threads = if is_private {
self.private_resource_threads.clone()
} else {
self.public_resource_threads.clone()
};
let parent_visibility = parent_info
.and_then(|(parent_pipeline_id, _)| self.pipelines.get(&parent_pipeline_id))
.map(|pipeline| pipeline.visible);
let prev_visibility = self.frames.get(&frame_id)
.and_then(|frame| self.pipelines.get(&frame.current.pipeline_id))
.map(|pipeline| pipeline.visible)
.or(parent_visibility);
// TODO: think about the case where the child pipeline is created
// before the parent is part of the frame tree.
let top_level_frame_id = match parent_info {
Some((_, FrameType::MozBrowserIFrame)) => frame_id,
Some((parent_id, _)) => self.get_top_level_frame_for_pipeline(parent_id),
None => self.root_frame_id,
};
let result = Pipeline::spawn::<Message, LTF, STF>(InitialPipelineState {
id: pipeline_id,
frame_id: frame_id,
top_level_frame_id: top_level_frame_id,
parent_info: parent_info,
constellation_chan: self.script_sender.clone(),
layout_to_constellation_chan: self.layout_sender.clone(),
scheduler_chan: self.scheduler_chan.clone(),
compositor_proxy: self.compositor_proxy.clone_compositor_proxy(),
devtools_chan: self.devtools_chan.clone(),
bluetooth_thread: self.bluetooth_thread.clone(),
swmanager_thread: self.swmanager_sender.clone(),
image_cache_thread: self.image_cache_thread.clone(),
font_cache_thread: self.font_cache_thread.clone(),
resource_threads: resource_threads,
time_profiler_chan: self.time_profiler_chan.clone(),
mem_profiler_chan: self.mem_profiler_chan.clone(),
window_size: initial_window_size,
event_loop: event_loop,
load_data: load_data,
device_pixel_ratio: self.window_size.device_pixel_ratio,
pipeline_namespace_id: self.next_pipeline_namespace_id(),
prev_visibility: prev_visibility,
webrender_api_sender: self.webrender_api_sender.clone(),
is_private: is_private,
});
let pipeline = match result {
Ok(result) => result,
Err(e) => return self.handle_send_error(pipeline_id, e),
};
if let Some(host) = host {
self.event_loops.entry(top_level_frame_id)
.or_insert_with(HashMap::new)
.insert(host, Rc::downgrade(&pipeline.event_loop));
}
assert!(!self.pipelines.contains_key(&pipeline_id));
self.pipelines.insert(pipeline_id, pipeline);
}
/// Get an iterator for the current frame tree. Specify self.root_frame_id to
/// iterate the entire tree, or a specific frame id to iterate only that sub-tree.
/// Iterates over the fully active frames in the tree.
fn current_frame_tree_iter(&self, frame_id_root: FrameId) -> FrameTreeIterator {
FrameTreeIterator {
stack: vec!(frame_id_root),
pipelines: &self.pipelines,
frames: &self.frames,
}
}
/// Get an iterator for the current frame tree. Specify self.root_frame_id to
/// iterate the entire tree, or a specific frame id to iterate only that sub-tree.
/// Iterates over all frames in the tree.
fn full_frame_tree_iter(&self, frame_id_root: FrameId) -> FullFrameTreeIterator {
FullFrameTreeIterator {
stack: vec!(frame_id_root),
pipelines: &self.pipelines,
frames: &self.frames,
}
}
/// The joint session future is the merge of the session future of every
/// frame in the frame tree, sorted reverse chronologically.
fn joint_session_future(&self, frame_id_root: FrameId) -> Vec<(Instant, FrameId, PipelineId)> {
let mut future = vec!();
for frame in self.full_frame_tree_iter(frame_id_root) {
future.extend(frame.next.iter().map(|entry| (entry.instant, entry.frame_id, entry.pipeline_id)));
}
// reverse sorting
future.sort_by(|a, b| b.cmp(a));
future
}
/// Is the joint session future empty?
fn joint_session_future_is_empty(&self, frame_id_root: FrameId) -> bool {
self.full_frame_tree_iter(frame_id_root)
.all(|frame| frame.next.is_empty())
}
/// The joint session past is the merge of the session past of every
/// frame in the frame tree, sorted chronologically.
fn joint_session_past(&self, frame_id_root: FrameId) -> Vec<(Instant, FrameId, PipelineId)> {
let mut past = vec!();
for frame in self.full_frame_tree_iter(frame_id_root) {
let mut prev_instant = frame.current.instant;
for entry in frame.prev.iter().rev() {
past.push((prev_instant, entry.frame_id, entry.pipeline_id));
prev_instant = entry.instant;
}
}
past.sort();
past
}
/// Is the joint session past empty?
fn joint_session_past_is_empty(&self, frame_id_root: FrameId) -> bool {
self.full_frame_tree_iter(frame_id_root)
.all(|frame| frame.prev.is_empty())
}
/// Create a new frame and update the internal bookkeeping.
fn new_frame(&mut self, frame_id: FrameId, pipeline_id: PipelineId) {
let frame = Frame::new(frame_id, pipeline_id);
self.frames.insert(frame_id, frame);
// If a child frame, add it to the parent pipeline.
let parent_info = self.pipelines.get(&pipeline_id)
.and_then(|pipeline| pipeline.parent_info);
if let Some((parent_id, _)) = parent_info {
if let Some(parent) = self.pipelines.get_mut(&parent_id) {
parent.add_child(frame_id);
}
}
}
/// Handles loading pages, navigation, and granting access to the compositor
#[allow(unsafe_code)]
fn handle_request(&mut self) {
enum Request {
Script(FromScriptMsg),
Compositor(FromCompositorMsg),
Layout(FromLayoutMsg),
FromSWManager(SWManagerMsg),
}
// Get one incoming request.
// This is one of the few places where the compositor is
// allowed to panic. If one of the receiver.recv() calls
// fails, it is because the matching sender has been
// reclaimed, but this can't happen in normal execution
// because the constellation keeps a pointer to the sender,
// so it should never be reclaimed. A possible scenario in
// which receiver.recv() fails is if some unsafe code
// produces undefined behaviour, resulting in the destructor
// being called. If this happens, there's not much we can do
// other than panic.
let request = {
let receiver_from_script = &self.script_receiver;
let receiver_from_compositor = &self.compositor_receiver;
let receiver_from_layout = &self.layout_receiver;
let receiver_from_swmanager = &self.swmanager_receiver;
select! {
msg = receiver_from_script.recv() =>
Request::Script(msg.expect("Unexpected script channel panic in constellation")),
msg = receiver_from_compositor.recv() =>
Request::Compositor(msg.expect("Unexpected compositor channel panic in constellation")),
msg = receiver_from_layout.recv() =>
Request::Layout(msg.expect("Unexpected layout channel panic in constellation")),
msg = receiver_from_swmanager.recv() =>
Request::FromSWManager(msg.expect("Unexpected panic channel panic in constellation"))
}
};
match request {
Request::Compositor(message) => {
self.handle_request_from_compositor(message)
},
Request::Script(message) => {
self.handle_request_from_script(message);
},
Request::Layout(message) => {
self.handle_request_from_layout(message);
},
Request::FromSWManager(message) => {
self.handle_request_from_swmanager(message);
}
}
}
fn handle_request_from_swmanager(&mut self, message: SWManagerMsg) {
match message {
SWManagerMsg::OwnSender(sw_sender) => {
// store service worker manager for communicating with it.
self.swmanager_chan = Some(sw_sender);
}
}
}
fn handle_request_from_compositor(&mut self, message: FromCompositorMsg) {
match message {
FromCompositorMsg::Exit => {
debug!("constellation exiting");
self.handle_exit();
}
// The compositor discovered the size of a subframe. This needs to be reflected by all
// frame trees in the navigation context containing the subframe.
FromCompositorMsg::FrameSize(pipeline_id, size) => {
debug!("constellation got frame size message");
self.handle_frame_size_msg(pipeline_id, &TypedSize2D::from_untyped(&size));
}
FromCompositorMsg::GetFrame(pipeline_id, resp_chan) => {
debug!("constellation got get root pipeline message");
self.handle_get_frame(pipeline_id, resp_chan);
}
FromCompositorMsg::GetPipeline(frame_id, resp_chan) => {
debug!("constellation got get root pipeline message");
self.handle_get_pipeline(frame_id, resp_chan);
}
FromCompositorMsg::GetPipelineTitle(pipeline_id) => {
debug!("constellation got get-pipeline-title message");
self.handle_get_pipeline_title_msg(pipeline_id);
}
FromCompositorMsg::KeyEvent(ch, key, state, modifiers) => {
debug!("constellation got key event message");
self.handle_key_msg(ch, key, state, modifiers);
}
// Load a new page from a typed url
// If there is already a pending page (self.pending_frames), it will not be overridden;
// However, if the id is not encompassed by another change, it will be.
FromCompositorMsg::LoadUrl(source_id, load_data) => {
debug!("constellation got URL load message from compositor");
self.handle_load_url_msg(source_id, load_data, false);
}
FromCompositorMsg::IsReadyToSaveImage(pipeline_states) => {
let is_ready = self.handle_is_ready_to_save_image(pipeline_states);
debug!("Ready to save image {:?}.", is_ready);
if opts::get().is_running_problem_test {
println!("got ready to save image query, result is {:?}", is_ready);
}
let is_ready = is_ready == ReadyToSave::Ready;
self.compositor_proxy.send(ToCompositorMsg::IsReadyToSaveImageReply(is_ready));
if opts::get().is_running_problem_test {
println!("sent response");
}
}
// This should only be called once per constellation, and only by the browser
FromCompositorMsg::InitLoadUrl(url) => {
debug!("constellation got init load URL message");
self.handle_init_load(url);
}
// Handle a forward or back request
FromCompositorMsg::TraverseHistory(pipeline_id, direction) => {
debug!("constellation got traverse history message from compositor");
self.handle_traverse_history_msg(pipeline_id, direction);
}
FromCompositorMsg::WindowSize(new_size, size_type) => {
debug!("constellation got window resize message");
self.handle_window_size_msg(new_size, size_type);
}
FromCompositorMsg::TickAnimation(pipeline_id, tick_type) => {
self.handle_tick_animation(pipeline_id, tick_type)
}
FromCompositorMsg::WebDriverCommand(command) => {
debug!("constellation got webdriver command message");
self.handle_webdriver_msg(command);
}
FromCompositorMsg::Reload => {
debug!("constellation got reload message");
self.handle_reload_msg();
}
FromCompositorMsg::LogEntry(top_level_frame_id, thread_name, entry) => {
self.handle_log_entry(top_level_frame_id, thread_name, entry);
}
}
}
fn handle_request_from_script(&mut self, message: FromScriptMsg) {
match message {
FromScriptMsg::PipelineExited(pipeline_id) => {
self.handle_pipeline_exited(pipeline_id);
}
FromScriptMsg::ScriptLoadedURLInIFrame(load_info) => {
debug!("constellation got iframe URL load message {:?} {:?} {:?}",
load_info.info.parent_pipeline_id,
load_info.old_pipeline_id,
load_info.info.new_pipeline_id);
self.handle_script_loaded_url_in_iframe_msg(load_info);
}
FromScriptMsg::ScriptLoadedAboutBlankInIFrame(load_info, lc) => {
debug!("constellation got loaded `about:blank` in iframe message {:?} {:?}",
load_info.parent_pipeline_id,
load_info.new_pipeline_id);
self.handle_script_loaded_about_blank_in_iframe_msg(load_info, lc);
}
FromScriptMsg::ChangeRunningAnimationsState(pipeline_id, animation_state) => {
self.handle_change_running_animations_state(pipeline_id, animation_state)
}
// Load a new page from a mouse click
// If there is already a pending page (self.pending_frames), it will not be overridden;
// However, if the id is not encompassed by another change, it will be.
FromScriptMsg::LoadUrl(source_id, load_data, replace) => {
debug!("constellation got URL load message from script");
self.handle_load_url_msg(source_id, load_data, replace);
}
// A page loaded has completed all parsing, script, and reflow messages have been sent.
FromScriptMsg::LoadComplete(pipeline_id) => {
debug!("constellation got load complete message");
self.handle_load_complete_msg(pipeline_id)
}
// Handle a forward or back request
FromScriptMsg::TraverseHistory(pipeline_id, direction) => {
debug!("constellation got traverse history message from script");
self.handle_traverse_history_msg(pipeline_id, direction);
}
// Handle a joint session history length request.
FromScriptMsg::JointSessionHistoryLength(pipeline_id, sender) => {
debug!("constellation got joint session history length message from script");
self.handle_joint_session_history_length(pipeline_id, sender);
}
// Notification that the new document is ready to become active
FromScriptMsg::ActivateDocument(pipeline_id) => {
debug!("constellation got activate document message");
self.handle_activate_document_msg(pipeline_id);
}
// Update pipeline url after redirections
FromScriptMsg::SetFinalUrl(pipeline_id, final_url) => {
// The script may have finished loading after we already started shutting down.
if let Some(ref mut pipeline) = self.pipelines.get_mut(&pipeline_id) {
debug!("constellation got set final url message");
pipeline.url = final_url;
} else {
warn!("constellation got set final url message for dead pipeline");
}
}
FromScriptMsg::MozBrowserEvent(parent_pipeline_id, pipeline_id, event) => {
debug!("constellation got mozbrowser event message");
self.handle_mozbrowser_event_msg(parent_pipeline_id,
pipeline_id,
event);
}
FromScriptMsg::Focus(pipeline_id) => {
debug!("constellation got focus message");
self.handle_focus_msg(pipeline_id);
}
FromScriptMsg::ForwardEvent(pipeline_id, event) => {
let msg = ConstellationControlMsg::SendEvent(pipeline_id, event);
let result = match self.pipelines.get(&pipeline_id) {
None => { debug!("Pipeline {:?} got event after closure.", pipeline_id); return; }
Some(pipeline) => pipeline.event_loop.send(msg),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
FromScriptMsg::GetClipboardContents(sender) => {
if let Err(e) = sender.send("".to_owned()) {
warn!("Failed to send clipboard ({})", e);
}
}
FromScriptMsg::SetClipboardContents(_) => {
}
FromScriptMsg::SetVisible(pipeline_id, visible) => {
debug!("constellation got set visible messsage");
self.handle_set_visible_msg(pipeline_id, visible);
}
FromScriptMsg::VisibilityChangeComplete(pipeline_id, visible) => {
debug!("constellation got set visibility change complete message");
self.handle_visibility_change_complete(pipeline_id, visible);
}
FromScriptMsg::RemoveIFrame(pipeline_id, sender) => {
debug!("constellation got remove iframe message");
self.handle_remove_iframe_msg(pipeline_id);
if let Some(sender) = sender {
if let Err(e) = sender.send(()) {
warn!("Error replying to remove iframe ({})", e);
}
}
}
FromScriptMsg::NewFavicon(url) => {
debug!("constellation got new favicon message");
self.compositor_proxy.send(ToCompositorMsg::NewFavicon(url));
}
FromScriptMsg::HeadParsed => {
debug!("constellation got head parsed message");
self.compositor_proxy.send(ToCompositorMsg::HeadParsed);
}
FromScriptMsg::CreateCanvasPaintThread(size, sender) => {
debug!("constellation got create-canvas-paint-thread message");
self.handle_create_canvas_paint_thread_msg(&size, sender)
}
FromScriptMsg::CreateWebGLPaintThread(size, attributes, sender) => {
debug!("constellation got create-WebGL-paint-thread message");
self.handle_create_webgl_paint_thread_msg(&size, attributes, sender)
}
FromScriptMsg::NodeStatus(message) => {
debug!("constellation got NodeStatus message");
self.compositor_proxy.send(ToCompositorMsg::Status(message));
}
FromScriptMsg::SetDocumentState(pipeline_id, state) => {
debug!("constellation got SetDocumentState message");
self.document_states.insert(pipeline_id, state);
}
FromScriptMsg::Alert(pipeline_id, message, sender) => {
debug!("constellation got Alert message");
self.handle_alert(pipeline_id, message, sender);
}
FromScriptMsg::ScrollFragmentPoint(pipeline_id, scroll_root_id, point, smooth) => {
self.compositor_proxy.send(ToCompositorMsg::ScrollFragmentPoint(pipeline_id,
scroll_root_id,
point,
smooth));
}
FromScriptMsg::GetClientWindow(send) => {
self.compositor_proxy.send(ToCompositorMsg::GetClientWindow(send));
}
FromScriptMsg::MoveTo(point) => {
self.compositor_proxy.send(ToCompositorMsg::MoveTo(point));
}
FromScriptMsg::ResizeTo(size) => {
self.compositor_proxy.send(ToCompositorMsg::ResizeTo(size));
}
FromScriptMsg::Exit => {
self.compositor_proxy.send(ToCompositorMsg::Exit);
}
FromScriptMsg::LogEntry(top_level_frame_id, thread_name, entry) => {
self.handle_log_entry(top_level_frame_id, thread_name, entry);
}
FromScriptMsg::SetTitle(pipeline_id, title) => {
self.compositor_proxy.send(ToCompositorMsg::ChangePageTitle(pipeline_id, title))
}
FromScriptMsg::SendKeyEvent(ch, key, key_state, key_modifiers) => {
self.compositor_proxy.send(ToCompositorMsg::KeyEvent(ch, key, key_state, key_modifiers))
}
FromScriptMsg::TouchEventProcessed(result) => {
self.compositor_proxy.send(ToCompositorMsg::TouchEventProcessed(result))
}
FromScriptMsg::RegisterServiceWorker(scope_things, scope) => {
debug!("constellation got store registration scope message");
self.handle_register_serviceworker(scope_things, scope);
}
FromScriptMsg::ForwardDOMMessage(msg_vec, scope_url) => {
if let Some(ref mgr) = self.swmanager_chan {
let _ = mgr.send(ServiceWorkerMsg::ForwardDOMMessage(msg_vec, scope_url));
} else {
warn!("Unable to forward DOMMessage for postMessage call");
}
}
FromScriptMsg::BroadcastStorageEvent(pipeline_id, storage, url, key, old_value, new_value) => {
self.handle_broadcast_storage_event(pipeline_id, storage, url, key, old_value, new_value);
}
FromScriptMsg::SetFullscreenState(state) => {
self.compositor_proxy.send(ToCompositorMsg::SetFullscreenState(state));
}
}
}
fn handle_request_from_layout(&mut self, message: FromLayoutMsg) {
match message {
FromLayoutMsg::ChangeRunningAnimationsState(pipeline_id, animation_state) => {
self.handle_change_running_animations_state(pipeline_id, animation_state)
}
FromLayoutMsg::SetCursor(cursor) => {
self.handle_set_cursor_msg(cursor)
}
FromLayoutMsg::ViewportConstrained(pipeline_id, constraints) => {
debug!("constellation got viewport-constrained event message");
self.handle_viewport_constrained_msg(pipeline_id, constraints);
}
}
}
fn handle_register_serviceworker(&self, scope_things: ScopeThings, scope: ServoUrl) {
if let Some(ref mgr) = self.swmanager_chan {
let _ = mgr.send(ServiceWorkerMsg::RegisterServiceWorker(scope_things, scope));
} else {
warn!("sending scope info to service worker manager failed");
}
}
fn handle_broadcast_storage_event(&self, pipeline_id: PipelineId, storage: StorageType, url: ServoUrl,
key: Option<String>, old_value: Option<String>, new_value: Option<String>) {
let origin = url.origin();
for pipeline in self.pipelines.values() {
if (pipeline.id != pipeline_id) && (pipeline.url.origin() == origin) {
let msg = ConstellationControlMsg::DispatchStorageEvent(
pipeline.id, storage, url.clone(), key.clone(), old_value.clone(), new_value.clone()
);
if let Err(err) = pipeline.event_loop.send(msg) {
warn!("Failed to broadcast storage event to pipeline {} ({:?}).", pipeline.id, err);
}
}
}
}
fn handle_exit(&mut self) {
// TODO: add a timer, which forces shutdown if threads aren't responsive.
if self.shutting_down { return; }
self.shutting_down = true;
self.mem_profiler_chan.send(mem::ProfilerMsg::Exit);
// TODO: exit before the root frame is initialized?
debug!("Removing root frame.");
let root_frame_id = self.root_frame_id;
self.close_frame(root_frame_id, ExitPipelineMode::Normal);
// Close any pending frames and pipelines
while let Some(pending) = self.pending_frames.pop() {
debug!("Removing pending frame {}.", pending.frame_id);
self.close_frame(pending.frame_id, ExitPipelineMode::Normal);
debug!("Removing pending pipeline {}.", pending.new_pipeline_id);
self.close_pipeline(pending.new_pipeline_id, ExitPipelineMode::Normal);
}
// In case there are frames which weren't attached to the frame tree, we close them.
let frame_ids: Vec<FrameId> = self.frames.keys().cloned().collect();
for frame_id in frame_ids {
debug!("Removing detached frame {}.", frame_id);
self.close_frame(frame_id, ExitPipelineMode::Normal);
}
// In case there are pipelines which weren't attached to the pipeline tree, we close them.
let pipeline_ids: Vec<PipelineId> = self.pipelines.keys().cloned().collect();
for pipeline_id in pipeline_ids {
debug!("Removing detached pipeline {}.", pipeline_id);
self.close_pipeline(pipeline_id, ExitPipelineMode::Normal);
}
}
fn handle_shutdown(&mut self) {
// At this point, there are no active pipelines,
// so we can safely block on other threads, without worrying about deadlock.
// Channels to receive signals when threads are done exiting.
let (core_sender, core_receiver) = ipc::channel().expect("Failed to create IPC channel!");
let (storage_sender, storage_receiver) = ipc::channel().expect("Failed to create IPC channel!");
debug!("Exiting image cache.");
self.image_cache_thread.exit();
debug!("Exiting core resource threads.");
if let Err(e) = self.public_resource_threads.send(net_traits::CoreResourceMsg::Exit(core_sender)) {
warn!("Exit resource thread failed ({})", e);
}
if let Some(ref chan) = self.debugger_chan {
debugger::shutdown_server(chan);
}
if let Some(ref chan) = self.devtools_chan {
debug!("Exiting devtools.");
let msg = DevtoolsControlMsg::FromChrome(ChromeToDevtoolsControlMsg::ServerExitMsg);
if let Err(e) = chan.send(msg) {
warn!("Exit devtools failed ({})", e);
}
}
debug!("Exiting storage resource threads.");
if let Err(e) = self.public_resource_threads.send(StorageThreadMsg::Exit(storage_sender)) {
warn!("Exit storage thread failed ({})", e);
}
debug!("Exiting bluetooth thread.");
if let Err(e) = self.bluetooth_thread.send(BluetoothRequest::Exit) {
warn!("Exit bluetooth thread failed ({})", e);
}
debug!("Exiting service worker manager thread.");
if let Some(mgr) = self.swmanager_chan.as_ref() {
if let Err(e) = mgr.send(ServiceWorkerMsg::Exit) {
warn!("Exit service worker manager failed ({})", e);
}
}
debug!("Exiting font cache thread.");
self.font_cache_thread.exit();
// Receive exit signals from threads.
if let Err(e) = core_receiver.recv() {
warn!("Exit resource thread failed ({})", e);
}
if let Err(e) = storage_receiver.recv() {
warn!("Exit storage thread failed ({})", e);
}
debug!("Asking compositor to complete shutdown.");
self.compositor_proxy.send(ToCompositorMsg::ShutdownComplete);
}
fn handle_pipeline_exited(&mut self, pipeline_id: PipelineId) {
debug!("Pipeline {:?} exited.", pipeline_id);
self.pipelines.remove(&pipeline_id);
}
fn handle_send_error(&mut self, pipeline_id: PipelineId, err: IOError) {
// Treat send error the same as receiving a panic message
debug!("Pipeline {:?} send error ({}).", pipeline_id, err);
let top_level_frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
let reason = format!("Send failed ({})", err);
self.handle_panic(top_level_frame_id, reason, None);
}
fn handle_panic(&mut self, top_level_frame_id: FrameId, reason: String, backtrace: Option<String>) {
if opts::get().hard_fail {
// It's quite difficult to make Servo exit cleanly if some threads have failed.
// Hard fail exists for test runners so we crash and that's good enough.
println!("Pipeline failed in hard-fail mode. Crashing!");
process::exit(1);
}
debug!("Panic handler for top-level frame {}: {}.", top_level_frame_id, reason);
// Notify the browser chrome that the pipeline has failed
self.trigger_mozbrowsererror(top_level_frame_id, reason, backtrace);
let pipeline_id = self.frames.get(&top_level_frame_id).map(|frame| frame.current.pipeline_id);
let pipeline_url = pipeline_id.and_then(|id| self.pipelines.get(&id).map(|pipeline| pipeline.url.clone()));
let parent_info = pipeline_id.and_then(|id| self.pipelines.get(&id).and_then(|pipeline| pipeline.parent_info));
let window_size = pipeline_id.and_then(|id| self.pipelines.get(&id).and_then(|pipeline| pipeline.size));
self.close_frame_children(top_level_frame_id, ExitPipelineMode::Force);
let failure_url = ServoUrl::parse("about:failure").expect("infallible");
if let Some(pipeline_url) = pipeline_url {
if pipeline_url == failure_url {
return error!("about:failure failed");
}
}
warn!("creating replacement pipeline for about:failure");
let new_pipeline_id = PipelineId::new();
let load_data = LoadData::new(failure_url, None, None);
let sandbox = IFrameSandboxState::IFrameSandboxed;
self.new_pipeline(new_pipeline_id, top_level_frame_id, parent_info, window_size, load_data, sandbox, false);
self.pending_frames.push(FrameChange {
frame_id: top_level_frame_id,
old_pipeline_id: pipeline_id,
new_pipeline_id: new_pipeline_id,
replace: false,
});
}
fn handle_log_entry(&mut self, top_level_frame_id: Option<FrameId>, thread_name: Option<String>, entry: LogEntry) {
debug!("Received log entry {:?}.", entry);
match entry {
LogEntry::Panic(reason, backtrace) => {
let top_level_frame_id = top_level_frame_id.unwrap_or(self.root_frame_id);
self.handle_panic(top_level_frame_id, reason, Some(backtrace));
},
LogEntry::Error(reason) | LogEntry::Warn(reason) => {
// VecDeque::truncate is unstable
if WARNINGS_BUFFER_SIZE <= self.handled_warnings.len() {
self.handled_warnings.pop_front();
}
self.handled_warnings.push_back((thread_name, reason));
},
}
}
fn handle_init_load(&mut self, url: ServoUrl) {
let window_size = self.window_size.visible_viewport;
let root_pipeline_id = PipelineId::new();
let root_frame_id = self.root_frame_id;
let load_data = LoadData::new(url.clone(), None, None);
let sandbox = IFrameSandboxState::IFrameUnsandboxed;
self.new_pipeline(root_pipeline_id, root_frame_id, None, Some(window_size), load_data, sandbox, false);
self.handle_load_start_msg(root_pipeline_id);
self.pending_frames.push(FrameChange {
frame_id: self.root_frame_id,
old_pipeline_id: None,
new_pipeline_id: root_pipeline_id,
replace: false,
});
self.compositor_proxy.send(ToCompositorMsg::ChangePageUrl(root_pipeline_id, url));
}
fn handle_frame_size_msg(&mut self,
pipeline_id: PipelineId,
size: &TypedSize2D<f32, PagePx>) {
let msg = ConstellationControlMsg::Resize(pipeline_id, WindowSizeData {
visible_viewport: *size,
initial_viewport: *size * ScaleFactor::new(1.0),
device_pixel_ratio: self.window_size.device_pixel_ratio,
}, WindowSizeType::Initial);
// Store the new rect inside the pipeline
let result = {
// Find the pipeline that corresponds to this rectangle. It's possible that this
// pipeline may have already exited before we process this message, so just
// early exit if that occurs.
match self.pipelines.get_mut(&pipeline_id) {
Some(pipeline) => {
pipeline.size = Some(*size);
pipeline.event_loop.send(msg)
}
None => return,
}
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_subframe_loaded(&mut self, pipeline_id: PipelineId) {
let (frame_id, parent_id) = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => match pipeline.parent_info {
Some((parent_id, _)) => (pipeline.frame_id, parent_id),
None => return warn!("Pipeline {} has no parent.", pipeline_id),
},
None => return warn!("Pipeline {} loaded after closure.", pipeline_id),
};
let msg = ConstellationControlMsg::DispatchFrameLoadEvent {
target: frame_id,
parent: parent_id,
child: pipeline_id,
};
let result = match self.pipelines.get(&parent_id) {
Some(parent) => parent.event_loop.send(msg),
None => return warn!("Parent {} frame loaded after closure.", parent_id),
};
if let Err(e) = result {
self.handle_send_error(parent_id, e);
}
}
// The script thread associated with pipeline_id has loaded a URL in an iframe via script. This
// will result in a new pipeline being spawned and a frame tree being added to
// parent_pipeline_id's frame tree's children. This message is never the result of a
// page navigation.
fn handle_script_loaded_url_in_iframe_msg(&mut self, load_info: IFrameLoadInfoWithData) {
let (load_data, window_size, is_private) = {
let old_pipeline = load_info.old_pipeline_id
.and_then(|old_pipeline_id| self.pipelines.get(&old_pipeline_id));
let source_pipeline = match self.pipelines.get(&load_info.info.parent_pipeline_id) {
Some(source_pipeline) => source_pipeline,
None => return warn!("Script loaded url in closed iframe {}.", load_info.info.parent_pipeline_id),
};
// If no url is specified, reload.
let load_data = load_info.load_data.unwrap_or_else(|| {
let url = match old_pipeline {
Some(old_pipeline) => old_pipeline.url.clone(),
None => ServoUrl::parse("about:blank").expect("infallible"),
};
// TODO - loaddata here should have referrer info (not None, None)
LoadData::new(url, None, None)
});
let is_private = load_info.info.is_private || source_pipeline.is_private;
let window_size = old_pipeline.and_then(|old_pipeline| old_pipeline.size);
if let Some(old_pipeline) = old_pipeline {
old_pipeline.freeze();
}
(load_data, window_size, is_private)
};
// Create the new pipeline, attached to the parent and push to pending frames
self.new_pipeline(load_info.info.new_pipeline_id,
load_info.info.frame_id,
Some((load_info.info.parent_pipeline_id, load_info.info.frame_type)),
window_size,
load_data,
load_info.sandbox,
is_private);
self.pending_frames.push(FrameChange {
frame_id: load_info.info.frame_id,
old_pipeline_id: load_info.old_pipeline_id,
new_pipeline_id: load_info.info.new_pipeline_id,
replace: load_info.info.replace,
});
}
fn handle_script_loaded_about_blank_in_iframe_msg(&mut self,
load_info: IFrameLoadInfo,
layout_sender: IpcSender<LayoutControlMsg>) {
let IFrameLoadInfo {
parent_pipeline_id,
new_pipeline_id,
frame_type,
replace,
frame_id,
is_private,
} = load_info;
let pipeline = {
let parent_pipeline = match self.pipelines.get(&parent_pipeline_id) {
Some(parent_pipeline) => parent_pipeline,
None => return warn!("Script loaded url in closed iframe {}.", parent_pipeline_id),
};
let script_sender = parent_pipeline.event_loop.clone();
let url = ServoUrl::parse("about:blank").expect("infallible");
Pipeline::new(new_pipeline_id,
frame_id,
Some((parent_pipeline_id, frame_type)),
script_sender,
layout_sender,
self.compositor_proxy.clone_compositor_proxy(),
is_private || parent_pipeline.is_private,
url,
None,
parent_pipeline.visible)
};
assert!(!self.pipelines.contains_key(&new_pipeline_id));
self.pipelines.insert(new_pipeline_id, pipeline);
self.pending_frames.push(FrameChange {
frame_id: frame_id,
old_pipeline_id: None,
new_pipeline_id: new_pipeline_id,
replace: replace,
});
}
fn handle_set_cursor_msg(&mut self, cursor: Cursor) {
self.compositor_proxy.send(ToCompositorMsg::SetCursor(cursor))
}
fn handle_change_running_animations_state(&mut self,
pipeline_id: PipelineId,
animation_state: AnimationState) {
self.compositor_proxy.send(ToCompositorMsg::ChangeRunningAnimationsState(pipeline_id,
animation_state))
}
fn handle_tick_animation(&mut self, pipeline_id: PipelineId, tick_type: AnimationTickType) {
let result = match tick_type {
AnimationTickType::Script => {
let msg = ConstellationControlMsg::TickAllAnimations(pipeline_id);
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return warn!("Pipeline {:?} got script tick after closure.", pipeline_id),
}
}
AnimationTickType::Layout => {
let msg = LayoutControlMsg::TickAnimations;
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.layout_chan.send(msg),
None => return warn!("Pipeline {:?} got layout tick after closure.", pipeline_id),
}
}
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_alert(&mut self,
pipeline_id: PipelineId,
message: String,
sender: IpcSender<bool>) {
let pipeline_isnt_root = self.pipelines.get(&pipeline_id).and_then(|pipeline| pipeline.parent_info).is_some();
let mozbrowser_modal_prompt = pipeline_isnt_root && PREFS.is_mozbrowser_enabled();
if mozbrowser_modal_prompt {
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowsershowmodalprompt
let prompt_type = String::from("alert");
let title = String::from("Alert");
let return_value = String::from("");
let event = MozBrowserEvent::ShowModalPrompt(prompt_type, title, message, return_value);
let top_level_frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
match self.frames.get(&self.root_frame_id) {
None => warn!("Alert sent after root frame closure."),
Some(root_frame) => match self.pipelines.get(&root_frame.current.pipeline_id) {
None => warn!("Alert sent after root pipeline closure."),
Some(root_pipeline) => root_pipeline.trigger_mozbrowser_event(Some(top_level_frame_id), event),
}
}
}
let result = sender.send(!mozbrowser_modal_prompt);
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_load_url_msg(&mut self, source_id: PipelineId, load_data: LoadData, replace: bool) {
self.load_url(source_id, load_data, replace);
}
fn load_url(&mut self, source_id: PipelineId, load_data: LoadData, replace: bool) -> Option<PipelineId> {
debug!("Loading {} in pipeline {}.", load_data.url, source_id);
// If this load targets an iframe, its framing element may exist
// in a separate script thread than the framed document that initiated
// the new load. The framing element must be notified about the
// requested change so it can update its internal state.
//
// If replace is true, the current entry is replaced instead of a new entry being added.
let (frame_id, parent_info) = match self.pipelines.get(&source_id) {
Some(pipeline) => (pipeline.frame_id, pipeline.parent_info),
None => {
warn!("Pipeline {:?} loaded after closure.", source_id);
return None;
}
};
match parent_info {
Some((parent_pipeline_id, _)) => {
self.handle_load_start_msg(source_id);
// Message the constellation to find the script thread for this iframe
// and issue an iframe load through there.
let msg = ConstellationControlMsg::Navigate(parent_pipeline_id, frame_id, load_data, replace);
let result = match self.pipelines.get(&parent_pipeline_id) {
Some(parent_pipeline) => parent_pipeline.event_loop.send(msg),
None => {
warn!("Pipeline {:?} child loaded after closure", parent_pipeline_id);
return None;
},
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
Some(source_id)
}
None => {
// Make sure no pending page would be overridden.
for frame_change in &self.pending_frames {
if frame_change.old_pipeline_id == Some(source_id) {
// id that sent load msg is being changed already; abort
return None;
}
}
if !self.pipeline_is_in_current_frame(source_id) {
// Disregard this load if the navigating pipeline is not actually
// active. This could be caused by a delayed navigation (eg. from
// a timer) or a race between multiple navigations (such as an
// onclick handler on an anchor element).
return None;
}
self.handle_load_start_msg(source_id);
// Being here means either there are no pending frames, or none of the pending
// changes would be overridden by changing the subframe associated with source_id.
// Create the new pipeline
let window_size = self.pipelines.get(&source_id).and_then(|source| source.size);
let new_pipeline_id = PipelineId::new();
let root_frame_id = self.root_frame_id;
let sandbox = IFrameSandboxState::IFrameUnsandboxed;
self.new_pipeline(new_pipeline_id, root_frame_id, None, window_size, load_data, sandbox, false);
self.pending_frames.push(FrameChange {
frame_id: root_frame_id,
old_pipeline_id: Some(source_id),
new_pipeline_id: new_pipeline_id,
replace: replace,
});
// Send message to ScriptThread that will suspend all timers
match self.pipelines.get(&source_id) {
Some(source) => source.freeze(),
None => warn!("Pipeline {:?} loaded after closure", source_id),
};
Some(new_pipeline_id)
}
}
}
fn handle_load_start_msg(&mut self, pipeline_id: PipelineId) {
let frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
let forward = !self.joint_session_future_is_empty(frame_id);
let back = !self.joint_session_past_is_empty(frame_id);
self.compositor_proxy.send(ToCompositorMsg::LoadStart(back, forward));
}
fn handle_load_complete_msg(&mut self, pipeline_id: PipelineId) {
let mut webdriver_reset = false;
if let Some((expected_pipeline_id, ref reply_chan)) = self.webdriver.load_channel {
debug!("Sending load to WebDriver");
if expected_pipeline_id == pipeline_id {
let _ = reply_chan.send(webdriver_msg::LoadStatus::LoadComplete);
webdriver_reset = true;
}
}
if webdriver_reset {
self.webdriver.load_channel = None;
}
let frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
let forward = !self.joint_session_future_is_empty(frame_id);
let back = !self.joint_session_past_is_empty(frame_id);
let root = self.root_frame_id == frame_id;
self.compositor_proxy.send(ToCompositorMsg::LoadComplete(back, forward, root));
self.handle_subframe_loaded(pipeline_id);
}
fn handle_traverse_history_msg(&mut self,
pipeline_id: Option<PipelineId>,
direction: TraversalDirection) {
let top_level_frame_id = pipeline_id
.map(|pipeline_id| self.get_top_level_frame_for_pipeline(pipeline_id))
.unwrap_or(self.root_frame_id);
let mut traversal_info = HashMap::new();
match direction {
TraversalDirection::Forward(delta) => {
let mut future = self.joint_session_future(top_level_frame_id);
for _ in 0..delta {
match future.pop() {
Some((_, frame_id, pipeline_id)) => {
traversal_info.insert(frame_id, pipeline_id);
},
None => return warn!("invalid traversal delta"),
}
}
},
TraversalDirection::Back(delta) => {
let mut past = self.joint_session_past(top_level_frame_id);
for _ in 0..delta {
match past.pop() {
Some((_, frame_id, pipeline_id)) => {
traversal_info.insert(frame_id, pipeline_id);
},
None => return warn!("invalid traversal delta"),
}
}
},
};
for (frame_id, pipeline_id) in traversal_info {
self.traverse_frame_to_pipeline(frame_id, pipeline_id);
}
}
fn handle_joint_session_history_length(&self, pipeline_id: PipelineId, sender: IpcSender<u32>) {
let frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
// Initialize length at 1 to count for the current active entry
let mut length = 1;
for frame in self.full_frame_tree_iter(frame_id) {
length += frame.next.len();
length += frame.prev.len();
}
let _ = sender.send(length as u32);
}
fn handle_key_msg(&mut self, ch: Option<char>, key: Key, state: KeyState, mods: KeyModifiers) {
// Send to the explicitly focused pipeline (if it exists), or the root
// frame's current pipeline. If neither exist, fall back to sending to
// the compositor below.
let root_pipeline_id = self.frames.get(&self.root_frame_id)
.map(|root_frame| root_frame.current.pipeline_id);
let pipeline_id = self.focus_pipeline_id.or(root_pipeline_id);
match pipeline_id {
Some(pipeline_id) => {
let event = CompositorEvent::KeyEvent(ch, key, state, mods);
let msg = ConstellationControlMsg::SendEvent(pipeline_id, event);
let result = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return debug!("Pipeline {:?} got key event after closure.", pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
},
None => {
let event = ToCompositorMsg::KeyEvent(ch, key, state, mods);
self.compositor_proxy.clone_compositor_proxy().send(event);
}
}
}
fn handle_reload_msg(&mut self) {
// Send Reload constellation msg to root script channel.
let root_pipeline_id = self.frames.get(&self.root_frame_id)
.map(|root_frame| root_frame.current.pipeline_id);
if let Some(pipeline_id) = root_pipeline_id {
let msg = ConstellationControlMsg::Reload(pipeline_id);
let result = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return debug!("Pipeline {:?} got reload event after closure.", pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
}
fn handle_get_pipeline_title_msg(&mut self, pipeline_id: PipelineId) {
let result = match self.pipelines.get(&pipeline_id) {
None => return self.compositor_proxy.send(ToCompositorMsg::ChangePageTitle(pipeline_id, None)),
Some(pipeline) => pipeline.event_loop.send(ConstellationControlMsg::GetTitle(pipeline_id)),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_mozbrowser_event_msg(&mut self,
parent_pipeline_id: PipelineId,
pipeline_id: PipelineId,
event: MozBrowserEvent) {
assert!(PREFS.is_mozbrowser_enabled());
// Find the script channel for the given parent pipeline,
// and pass the event to that script thread.
// If the pipeline lookup fails, it is because we have torn down the pipeline,
// so it is reasonable to silently ignore the event.
let frame_id = self.pipelines.get(&pipeline_id).map(|pipeline| pipeline.frame_id);
match self.pipelines.get(&parent_pipeline_id) {
Some(pipeline) => pipeline.trigger_mozbrowser_event(frame_id, event),
None => warn!("Pipeline {:?} handling mozbrowser event after closure.", parent_pipeline_id),
}
}
fn handle_get_pipeline(&mut self, frame_id: Option<FrameId>,
resp_chan: IpcSender<Option<PipelineId>>) {
let frame_id = frame_id.unwrap_or(self.root_frame_id);
let current_pipeline_id = self.frames.get(&frame_id)
.map(|frame| frame.current.pipeline_id);
let current_pipeline_id_loaded = current_pipeline_id
.map(|id| id);
let pipeline_id_loaded = self.pending_frames.iter().rev()
.find(|x| x.old_pipeline_id == current_pipeline_id)
.map(|x| x.new_pipeline_id)
.or(current_pipeline_id_loaded);
if let Err(e) = resp_chan.send(pipeline_id_loaded) {
warn!("Failed get_pipeline response ({}).", e);
}
}
fn handle_get_frame(&mut self,
pipeline_id: PipelineId,
resp_chan: IpcSender<Option<FrameId>>) {
let frame_id = self.pipelines.get(&pipeline_id).map(|pipeline| pipeline.frame_id);
if let Err(e) = resp_chan.send(frame_id) {
warn!("Failed get_frame response ({}).", e);
}
}
fn focus_parent_pipeline(&mut self, pipeline_id: PipelineId) {
let (frame_id, parent_info) = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => (pipeline.frame_id, pipeline.parent_info),
None => return warn!("Pipeline {:?} focus parent after closure.", pipeline_id),
};
let (parent_pipeline_id, _) = match parent_info {
Some(info) => info,
None => return debug!("Pipeline {:?} focus has no parent.", pipeline_id),
};
// Send a message to the parent of the provided pipeline (if it exists)
// telling it to mark the iframe element as focused.
let msg = ConstellationControlMsg::FocusIFrame(parent_pipeline_id, frame_id);
let result = match self.pipelines.get(&parent_pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return warn!("Pipeline {:?} focus after closure.", parent_pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
self.focus_parent_pipeline(parent_pipeline_id);
}
fn handle_focus_msg(&mut self, pipeline_id: PipelineId) {
self.focus_pipeline_id = Some(pipeline_id);
// Focus parent iframes recursively
self.focus_parent_pipeline(pipeline_id);
}
fn handle_remove_iframe_msg(&mut self, pipeline_id: PipelineId) {
let frame_id = self.pipelines.get(&pipeline_id).map(|pipeline| pipeline.frame_id);
match frame_id {
Some(frame_id) => {
// This iframe has already loaded and been added to the frame tree.
self.close_frame(frame_id, ExitPipelineMode::Normal);
}
None => {
// This iframe is currently loading / painting for the first time.
// In this case, it doesn't exist in the frame tree, but the pipeline
// still needs to be shut down.
self.close_pipeline(pipeline_id, ExitPipelineMode::Normal);
}
}
}
fn handle_set_visible_msg(&mut self, pipeline_id: PipelineId, visible: bool) {
let frame_id = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.frame_id,
None => return warn!("No frame associated with pipeline {:?}", pipeline_id),
};
let child_pipeline_ids: Vec<PipelineId> = self.full_frame_tree_iter(frame_id)
.flat_map(|frame| frame.next.iter()
.chain(frame.prev.iter())
.chain(once(&frame.current)))
.map(|state| state.pipeline_id)
.collect();
for id in child_pipeline_ids {
if let Some(pipeline) = self.pipelines.get_mut(&id) {
pipeline.change_visibility(visible);
}
}
}
fn handle_visibility_change_complete(&mut self, pipeline_id: PipelineId, visibility: bool) {
let (frame_id, parent_pipeline_info) = match self.pipelines.get(&pipeline_id) {
None => return warn!("Visibity change for closed pipeline {:?}.", pipeline_id),
Some(pipeline) => (pipeline.frame_id, pipeline.parent_info),
};
if let Some((parent_pipeline_id, _)) = parent_pipeline_info {
let visibility_msg = ConstellationControlMsg::NotifyVisibilityChange(parent_pipeline_id,
frame_id,
visibility);
let result = match self.pipelines.get(&parent_pipeline_id) {
None => return warn!("Parent pipeline {:?} closed", parent_pipeline_id),
Some(parent_pipeline) => parent_pipeline.event_loop.send(visibility_msg),
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
}
}
fn handle_create_canvas_paint_thread_msg(
&mut self,
size: &Size2D<i32>,
response_sender: IpcSender<IpcSender<CanvasMsg>>) {
let webrender_api = self.webrender_api_sender.clone();
let sender = CanvasPaintThread::start(*size, webrender_api,
opts::get().enable_canvas_antialiasing);
if let Err(e) = response_sender.send(sender) {
warn!("Create canvas paint thread response failed ({})", e);
}
}
fn handle_create_webgl_paint_thread_msg(
&mut self,
size: &Size2D<i32>,
attributes: GLContextAttributes,
response_sender: IpcSender<Result<(IpcSender<CanvasMsg>, GLLimits), String>>) {
let webrender_api = self.webrender_api_sender.clone();
let response = WebGLPaintThread::start(*size, attributes, webrender_api);
if let Err(e) = response_sender.send(response) {
warn!("Create WebGL paint thread response failed ({})", e);
}
}
fn handle_webdriver_msg(&mut self, msg: WebDriverCommandMsg) {
// Find the script channel for the given parent pipeline,
// and pass the event to that script thread.
match msg {
WebDriverCommandMsg::GetWindowSize(_, reply) => {
let _ = reply.send(self.window_size);
},
WebDriverCommandMsg::SetWindowSize(_, size, reply) => {
self.webdriver.resize_channel = Some(reply);
self.compositor_proxy.send(ToCompositorMsg::ResizeTo(size));
},
WebDriverCommandMsg::LoadUrl(pipeline_id, load_data, reply) => {
self.load_url_for_webdriver(pipeline_id, load_data, reply, false);
},
WebDriverCommandMsg::Refresh(pipeline_id, reply) => {
let load_data = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => LoadData::new(pipeline.url.clone(), None, None),
None => return warn!("Pipeline {:?} Refresh after closure.", pipeline_id),
};
self.load_url_for_webdriver(pipeline_id, load_data, reply, true);
}
WebDriverCommandMsg::ScriptCommand(pipeline_id, cmd) => {
let control_msg = ConstellationControlMsg::WebDriverScriptCommand(pipeline_id, cmd);
let result = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(control_msg),
None => return warn!("Pipeline {:?} ScriptCommand after closure.", pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
},
WebDriverCommandMsg::SendKeys(pipeline_id, cmd) => {
let event_loop = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.clone(),
None => return warn!("Pipeline {:?} SendKeys after closure.", pipeline_id),
};
for (key, mods, state) in cmd {
let event = CompositorEvent::KeyEvent(None, key, state, mods);
let control_msg = ConstellationControlMsg::SendEvent(pipeline_id, event);
if let Err(e) = event_loop.send(control_msg) {
return self.handle_send_error(pipeline_id, e);
}
}
},
WebDriverCommandMsg::TakeScreenshot(pipeline_id, reply) => {
let current_pipeline_id = self.frames.get(&self.root_frame_id)
.map(|root_frame| root_frame.current.pipeline_id);
if Some(pipeline_id) == current_pipeline_id {
self.compositor_proxy.send(ToCompositorMsg::CreatePng(reply));
} else {
if let Err(e) = reply.send(None) {
warn!("Screenshot reply failed ({})", e);
}
}
},
}
}
fn traverse_frame_to_pipeline(&mut self, frame_id: FrameId, next_pipeline_id: PipelineId) {
// Check if the currently focused pipeline is the pipeline being replaced
// (or a child of it). This has to be done here, before the current
// frame tree is modified below.
let update_focus_pipeline = self.focused_pipeline_in_tree(frame_id);
let prev_pipeline_id = match self.frames.get_mut(&frame_id) {
Some(frame) => {
let prev = frame.current.pipeline_id;
// Check that this frame contains the pipeline passed in, so that this does not
// change Frame's state before realizing `next_pipeline_id` is invalid.
if frame.next.iter().find(|entry| next_pipeline_id == entry.pipeline_id).is_some() {
frame.prev.push(frame.current.clone());
while let Some(entry) = frame.next.pop() {
if entry.pipeline_id == next_pipeline_id {
frame.current = entry;
break;
} else {
frame.prev.push(entry);
}
}
} else if frame.prev.iter().find(|entry| next_pipeline_id == entry.pipeline_id).is_some() {
frame.next.push(frame.current.clone());
while let Some(entry) = frame.prev.pop() {
if entry.pipeline_id == next_pipeline_id {
frame.current = entry;
break;
} else {
frame.next.push(entry);
}
}
} else if prev != next_pipeline_id {
return warn!("Tried to traverse frame {:?} to pipeline {:?} it does not contain.",
frame_id, next_pipeline_id);
}
prev
},
None => return warn!("no frame to traverse"),
};
let pipeline_info = self.pipelines.get(&prev_pipeline_id).and_then(|p| p.parent_info);
// If the currently focused pipeline is the one being changed (or a child
// of the pipeline being changed) then update the focus pipeline to be
// the replacement.
if update_focus_pipeline {
self.focus_pipeline_id = Some(next_pipeline_id);
}
// Suspend the old pipeline, and resume the new one.
if let Some(prev_pipeline) = self.pipelines.get(&prev_pipeline_id) {
prev_pipeline.freeze();
}
if let Some(next_pipeline) = self.pipelines.get(&next_pipeline_id) {
next_pipeline.thaw();
}
// Set paint permissions correctly for the compositor layers.
self.send_frame_tree();
// Update the owning iframe to point to the new pipeline id.
// This makes things like contentDocument work correctly.
if let Some((parent_pipeline_id, _)) = pipeline_info {
let msg = ConstellationControlMsg::UpdatePipelineId(parent_pipeline_id,
frame_id,
next_pipeline_id);
let result = match self.pipelines.get(&parent_pipeline_id) {
None => return warn!("Pipeline {:?} child traversed after closure.", parent_pipeline_id),
Some(pipeline) => pipeline.event_loop.send(msg),
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
// If this is an iframe, send a mozbrowser location change event.
// This is the result of a back/forward traversal.
self.trigger_mozbrowserlocationchange(next_pipeline_id);
}
}
fn get_top_level_frame_for_pipeline(&self, mut pipeline_id: PipelineId) -> FrameId {
if PREFS.is_mozbrowser_enabled() {
loop {
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => match pipeline.parent_info {
Some((_, FrameType::MozBrowserIFrame)) => return pipeline.frame_id,
Some((parent_id, _)) => pipeline_id = parent_id,
None => return self.root_frame_id,
},
None => {
warn!("Finding top-level ancestor for pipeline {} after closure.", pipeline_id);
return self.root_frame_id;
},
}
}
} else {
// If mozbrowser is not enabled, the root frame is the only top-level frame
self.root_frame_id
}
}
fn load_url_for_webdriver(&mut self,
pipeline_id: PipelineId,
load_data: LoadData,
reply: IpcSender<webdriver_msg::LoadStatus>,
replace: bool) {
let new_pipeline_id = self.load_url(pipeline_id, load_data, replace);
if let Some(id) = new_pipeline_id {
self.webdriver.load_channel = Some((id, reply));
}
}
fn add_or_replace_pipeline_in_frame_tree(&mut self, frame_change: FrameChange) {
debug!("Setting frame {} to be pipeline {}.", frame_change.frame_id, frame_change.new_pipeline_id);
// If the currently focused pipeline is the one being changed (or a child
// of the pipeline being changed) then update the focus pipeline to be
// the replacement.
if let Some(old_pipeline_id) = frame_change.old_pipeline_id {
if let Some(old_frame_id) = self.pipelines.get(&old_pipeline_id).map(|pipeline| pipeline.frame_id) {
if self.focused_pipeline_in_tree(old_frame_id) {
self.focus_pipeline_id = Some(frame_change.new_pipeline_id);
}
}
}
if self.frames.contains_key(&frame_change.frame_id) {
if frame_change.replace {
let evicted = self.frames.get_mut(&frame_change.frame_id).map(|frame| {
frame.replace_current(frame_change.new_pipeline_id)
});
if let Some(evicted) = evicted {
self.close_pipeline(evicted.pipeline_id, ExitPipelineMode::Normal);
}
} else {
if let Some(ref mut frame) = self.frames.get_mut(&frame_change.frame_id) {
frame.load(frame_change.new_pipeline_id);
}
}
} else {
// The new pipeline is in a new frame with no history
self.new_frame(frame_change.frame_id, frame_change.new_pipeline_id);
}
if !frame_change.replace {
// If this is an iframe, send a mozbrowser location change event.
// This is the result of a link being clicked and a navigation completing.
self.trigger_mozbrowserlocationchange(frame_change.new_pipeline_id);
let top_level_frame_id = self.get_top_level_frame_for_pipeline(frame_change.new_pipeline_id);
self.clear_joint_session_future(top_level_frame_id);
}
// Build frame tree
self.send_frame_tree();
}
fn handle_activate_document_msg(&mut self, pipeline_id: PipelineId) {
debug!("Document ready to activate {:?}", pipeline_id);
// Notify the parent (if there is one).
if let Some(pipeline) = self.pipelines.get(&pipeline_id) {
if let Some((parent_pipeline_id, _)) = pipeline.parent_info {
if let Some(parent_pipeline) = self.pipelines.get(&parent_pipeline_id) {
let msg = ConstellationControlMsg::FramedContentChanged(parent_pipeline_id, pipeline.frame_id);
let _ = parent_pipeline.event_loop.send(msg);
}
}
}
// Find the pending frame change whose new pipeline id is pipeline_id.
let pending_index = self.pending_frames.iter().rposition(|frame_change| {
frame_change.new_pipeline_id == pipeline_id
});
// If it is found, remove it from the pending frames, and make it
// the active document of its frame.
if let Some(pending_index) = pending_index {
let frame_change = self.pending_frames.swap_remove(pending_index);
self.add_or_replace_pipeline_in_frame_tree(frame_change);
}
}
/// Called when the window is resized.
fn handle_window_size_msg(&mut self, new_size: WindowSizeData, size_type: WindowSizeType) {
debug!("handle_window_size_msg: {:?} {:?}", new_size.initial_viewport.to_untyped(),
new_size.visible_viewport.to_untyped());
if let Some(frame) = self.frames.get(&self.root_frame_id) {
// Send Resize (or ResizeInactive) messages to each
// pipeline in the frame tree.
let pipeline_id = frame.current.pipeline_id;
let pipeline = match self.pipelines.get(&pipeline_id) {
None => return warn!("Pipeline {:?} resized after closing.", pipeline_id),
Some(pipeline) => pipeline,
};
let _ = pipeline.event_loop.send(ConstellationControlMsg::Resize(
pipeline.id,
new_size,
size_type
));
for entry in frame.prev.iter().chain(&frame.next) {
let pipeline = match self.pipelines.get(&entry.pipeline_id) {
None => {
warn!("Inactive pipeline {:?} resized after closing.", pipeline_id);
continue;
},
Some(pipeline) => pipeline,
};
let _ = pipeline.event_loop.send(ConstellationControlMsg::ResizeInactive(
pipeline.id,
new_size
));
}
}
// Send resize message to any pending pipelines that aren't loaded yet.
for pending_frame in &self.pending_frames {
let pipeline_id = pending_frame.new_pipeline_id;
let pipeline = match self.pipelines.get(&pipeline_id) {
None => { warn!("Pending pipeline {:?} is closed", pipeline_id); continue; }
Some(pipeline) => pipeline,
};
if pipeline.parent_info.is_none() {
let _ = pipeline.event_loop.send(ConstellationControlMsg::Resize(
pipeline.id,
new_size,
size_type
));
}
}
if let Some(resize_channel) = self.webdriver.resize_channel.take() {
let _ = resize_channel.send(new_size);
}
self.window_size = new_size;
}
/// Handle updating actual viewport / zoom due to @viewport rules
fn handle_viewport_constrained_msg(&mut self,
pipeline_id: PipelineId,
constraints: ViewportConstraints) {
self.compositor_proxy.send(ToCompositorMsg::ViewportConstrained(pipeline_id, constraints));
}
/// Checks the state of all script and layout pipelines to see if they are idle
/// and compares the current layout state to what the compositor has. This is used
/// to check if the output image is "stable" and can be written as a screenshot
/// for reftests.
/// Since this function is only used in reftests, we do not harden it against panic.
fn handle_is_ready_to_save_image(&mut self,
pipeline_states: HashMap<PipelineId, Epoch>) -> ReadyToSave {
// Note that this function can panic, due to ipc-channel creation failure.
// avoiding this panic would require a mechanism for dealing
// with low-resource scenarios.
//
// If there is no root frame yet, the initial page has
// not loaded, so there is nothing to save yet.
if !self.frames.contains_key(&self.root_frame_id) {
return ReadyToSave::NoRootFrame;
}
// If there are pending loads, wait for those to complete.
if !self.pending_frames.is_empty() {
return ReadyToSave::PendingFrames;
}
let (state_sender, state_receiver) = ipc::channel().expect("Failed to create IPC channel!");
let (epoch_sender, epoch_receiver) = ipc::channel().expect("Failed to create IPC channel!");
// Step through the current frame tree, checking that the script
// thread is idle, and that the current epoch of the layout thread
// matches what the compositor has painted. If all these conditions
// are met, then the output image should not change and a reftest
// screenshot can safely be written.
for frame in self.current_frame_tree_iter(self.root_frame_id) {
let pipeline_id = frame.current.pipeline_id;
debug!("Checking readiness of frame {}, pipeline {}.", frame.id, pipeline_id);
let pipeline = match self.pipelines.get(&pipeline_id) {
None => {
warn!("Pipeline {:?} screenshot while closing.", pipeline_id);
continue;
},
Some(pipeline) => pipeline,
};
// Check to see if there are any webfonts still loading.
//
// If GetWebFontLoadState returns false, either there are no
// webfonts loading, or there's a WebFontLoaded message waiting in
// script_chan's message queue. Therefore, we need to check this
// before we check whether the document is ready; otherwise,
// there's a race condition where a webfont has finished loading,
// but hasn't yet notified the document.
let msg = LayoutControlMsg::GetWebFontLoadState(state_sender.clone());
if let Err(e) = pipeline.layout_chan.send(msg) {
warn!("Get web font failed ({})", e);
}
if state_receiver.recv().unwrap_or(true) {
return ReadyToSave::WebFontNotLoaded;
}
// See if this pipeline has reached idle script state yet.
match self.document_states.get(&frame.current.pipeline_id) {
Some(&DocumentState::Idle) => {}
Some(&DocumentState::Pending) | None => {
return ReadyToSave::DocumentLoading;
}
}
// Check the visible rectangle for this pipeline. If the constellation has received a
// size for the pipeline, then its painting should be up to date. If the constellation
// *hasn't* received a size, it could be that the layer was hidden by script before the
// compositor discovered it, so we just don't check the layer.
if let Some(size) = pipeline.size {
// If the rectangle for this pipeline is zero sized, it will
// never be painted. In this case, don't query the layout
// thread as it won't contribute to the final output image.
if size == TypedSize2D::zero() {
continue;
}
// Get the epoch that the compositor has drawn for this pipeline.
let compositor_epoch = pipeline_states.get(&frame.current.pipeline_id);
match compositor_epoch {
Some(compositor_epoch) => {
// Synchronously query the layout thread to see if the current
// epoch matches what the compositor has drawn. If they match
// (and script is idle) then this pipeline won't change again
// and can be considered stable.
let message = LayoutControlMsg::GetCurrentEpoch(epoch_sender.clone());
if let Err(e) = pipeline.layout_chan.send(message) {
warn!("Failed to send GetCurrentEpoch ({}).", e);
}
match epoch_receiver.recv() {
Err(e) => warn!("Failed to receive current epoch ({}).", e),
Ok(layout_thread_epoch) => if layout_thread_epoch != *compositor_epoch {
return ReadyToSave::EpochMismatch;
},
}
}
None => {
// The compositor doesn't know about this pipeline yet.
// Assume it hasn't rendered yet.
return ReadyToSave::PipelineUnknown;
}
}
}
}
// All script threads are idle and layout epochs match compositor, so output image!
ReadyToSave::Ready
}
fn clear_joint_session_future(&mut self, frame_id: FrameId) {
let mut evicted_pipelines = vec!();
let mut frames_to_clear = vec!(frame_id);
while let Some(frame_id) = frames_to_clear.pop() {
let frame = match self.frames.get_mut(&frame_id) {
Some(frame) => frame,
None => {
warn!("Removed forward history after frame {:?} closure.", frame_id);
continue;
}
};
evicted_pipelines.extend(frame.remove_forward_entries());
for entry in frame.next.iter().chain(frame.prev.iter()).chain(once(&frame.current)) {
let pipeline = match self.pipelines.get(&entry.pipeline_id) {
Some(pipeline) => pipeline,
None => {
warn!("Removed forward history after pipeline {:?} closure.", entry.pipeline_id);
continue;
}
};
frames_to_clear.extend_from_slice(&pipeline.children);
}
}
for entry in evicted_pipelines {
self.close_pipeline(entry.pipeline_id, ExitPipelineMode::Normal);
}
}
// Close a frame (and all children)
fn close_frame(&mut self, frame_id: FrameId, exit_mode: ExitPipelineMode) {
debug!("Closing frame {}.", frame_id);
let parent_info = self.frames.get(&frame_id)
.and_then(|frame| self.pipelines.get(&frame.current.pipeline_id))
.and_then(|pipeline| pipeline.parent_info);
self.close_frame_children(frame_id, exit_mode);
self.event_loops.remove(&frame_id);
if self.frames.remove(&frame_id).is_none() {
warn!("Closing frame {:?} twice.", frame_id);
}
if let Some((parent_pipeline_id, _)) = parent_info {
let parent_pipeline = match self.pipelines.get_mut(&parent_pipeline_id) {
None => return warn!("Pipeline {:?} child closed after parent.", parent_pipeline_id),
Some(parent_pipeline) => parent_pipeline,
};
parent_pipeline.remove_child(frame_id);
}
debug!("Closed frame {:?}.", frame_id);
}
// Close the children of a frame
fn close_frame_children(&mut self, frame_id: FrameId, exit_mode: ExitPipelineMode) {
debug!("Closing frame children {}.", frame_id);
// Store information about the pipelines to be closed. Then close the
// pipelines, before removing ourself from the frames hash map. This
// ordering is vital - so that if close_pipeline() ends up closing
// any child frames, they can be removed from the parent frame correctly.
let mut pipelines_to_close: Vec<PipelineId> = self.pending_frames.iter()
.filter(|frame_change| frame_change.frame_id == frame_id)
.map(|frame_change| frame_change.new_pipeline_id)
.collect();
if let Some(frame) = self.frames.get(&frame_id) {
pipelines_to_close.extend(frame.next.iter().map(|state| state.pipeline_id));
pipelines_to_close.push(frame.current.pipeline_id);
pipelines_to_close.extend(frame.prev.iter().map(|state| state.pipeline_id));
}
for pipeline_id in pipelines_to_close {
self.close_pipeline(pipeline_id, exit_mode);
}
debug!("Closed frame children {}.", frame_id);
}
// Close all pipelines at and beneath a given frame
fn close_pipeline(&mut self, pipeline_id: PipelineId, exit_mode: ExitPipelineMode) {
debug!("Closing pipeline {:?}.", pipeline_id);
// Store information about the frames to be closed. Then close the
// frames, before removing ourself from the pipelines hash map. This
// ordering is vital - so that if close_frames() ends up closing
// any child pipelines, they can be removed from the parent pipeline correctly.
let frames_to_close = {
let mut frames_to_close = vec!();
if let Some(pipeline) = self.pipelines.get(&pipeline_id) {
frames_to_close.extend_from_slice(&pipeline.children);
}
frames_to_close
};
// Remove any child frames
for child_frame in &frames_to_close {
self.close_frame(*child_frame, exit_mode);
}
// Note, we don't remove the pipeline now, we wait for the message to come back from
// the pipeline.
let pipeline = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline,
None => return warn!("Closing pipeline {:?} twice.", pipeline_id),
};
// Remove this pipeline from pending frames if it hasn't loaded yet.
let pending_index = self.pending_frames.iter().position(|frame_change| {
frame_change.new_pipeline_id == pipeline_id
});
if let Some(pending_index) = pending_index {
self.pending_frames.remove(pending_index);
}
// Inform script, compositor that this pipeline has exited.
match exit_mode {
ExitPipelineMode::Normal => pipeline.exit(),
ExitPipelineMode::Force => pipeline.force_exit(),
}
debug!("Closed pipeline {:?}.", pipeline_id);
}
// Randomly close a pipeline -if --random-pipeline-closure-probability is set
fn maybe_close_random_pipeline(&mut self) {
match self.random_pipeline_closure {
Some((ref mut rng, probability)) => if probability <= rng.gen::<f32>() { return },
_ => return,
};
// In order to get repeatability, we sort the pipeline ids.
let mut pipeline_ids: Vec<&PipelineId> = self.pipelines.keys().collect();
pipeline_ids.sort();
if let Some((ref mut rng, _)) = self.random_pipeline_closure {
if let Some(pipeline_id) = rng.choose(&*pipeline_ids) {
if let Some(pipeline) = self.pipelines.get(pipeline_id) {
// Don't kill the mozbrowser pipeline
if PREFS.is_mozbrowser_enabled() && pipeline.parent_info.is_none() {
info!("Not closing mozbrowser pipeline {}.", pipeline_id);
} else {
// Note that we deliberately do not do any of the tidying up
// associated with closing a pipeline. The constellation should cope!
warn!("Randomly closing pipeline {}.", pipeline_id);
pipeline.force_exit();
}
}
}
}
}
// Convert a frame to a sendable form to pass to the compositor
fn frame_to_sendable(&self, frame_id: FrameId) -> Option<SendableFrameTree> {
self.frames.get(&frame_id).and_then(|frame: &Frame| {
self.pipelines.get(&frame.current.pipeline_id).map(|pipeline: &Pipeline| {
let mut frame_tree = SendableFrameTree {
pipeline: pipeline.to_sendable(),
size: pipeline.size,
children: vec!(),
};
for child_frame_id in &pipeline.children {
if let Some(frame) = self.frame_to_sendable(*child_frame_id) {
frame_tree.children.push(frame);
}
}
frame_tree
})
})
}
// Send the current frame tree to compositor
fn send_frame_tree(&mut self) {
// Note that this function can panic, due to ipc-channel creation failure.
// avoiding this panic would require a mechanism for dealing
// with low-resource scenarios.
debug!("Sending frame tree for frame {}.", self.root_frame_id);
if let Some(frame_tree) = self.frame_to_sendable(self.root_frame_id) {
let (chan, port) = ipc::channel().expect("Failed to create IPC channel!");
self.compositor_proxy.send(ToCompositorMsg::SetFrameTree(frame_tree,
chan));
if port.recv().is_err() {
warn!("Compositor has discarded SetFrameTree");
return; // Our message has been discarded, probably shutting down.
}
}
}
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowserlocationchange
// Note that this is a no-op if the pipeline is not a mozbrowser iframe
fn trigger_mozbrowserlocationchange(&self, pipeline_id: PipelineId) {
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => if let Some((parent_id, FrameType::MozBrowserIFrame)) = pipeline.parent_info {
match self.pipelines.get(&parent_id) {
Some(parent) => {
let can_go_forward = !self.joint_session_future_is_empty(pipeline.frame_id);
let can_go_back = !self.joint_session_past_is_empty(pipeline.frame_id);
let url = pipeline.url.to_string();
let event = MozBrowserEvent::LocationChange(url, can_go_back, can_go_forward);
parent.trigger_mozbrowser_event(Some(pipeline.frame_id), event);
},
None => warn!("triggered mozbrowser location change on closed parent {}", parent_id),
}
},
None => warn!("triggered mozbrowser location change on closed pipeline {}", pipeline_id),
}
}
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowsererror
// Note that this does not require the pipeline to be an immediate child of the root
fn trigger_mozbrowsererror(&mut self, top_level_frame_id: FrameId, reason: String, backtrace: Option<String>) {
if !PREFS.is_mozbrowser_enabled() { return; }
let mut report = String::new();
for (thread_name, warning) in self.handled_warnings.drain(..) {
report.push_str("\nWARNING: ");
if let Some(thread_name) = thread_name {
report.push_str("<");
report.push_str(&*thread_name);
report.push_str(">: ");
}
report.push_str(&*warning);
}
report.push_str("\nERROR: ");
report.push_str(&*reason);
if let Some(backtrace) = backtrace {
report.push_str("\n\n");
report.push_str(&*backtrace);
}
let event = MozBrowserEvent::Error(MozBrowserErrorType::Fatal, reason, report);
match self.frames.get(&top_level_frame_id) {
None => warn!("Mozbrowser error after top-level frame closed."),
Some(frame) => match self.pipelines.get(&frame.current.pipeline_id) {
None => warn!("Mozbrowser error after top-level pipeline closed."),
Some(pipeline) => match pipeline.parent_info {
None => pipeline.trigger_mozbrowser_event(None, event),
Some((parent_id, _)) => match self.pipelines.get(&parent_id) {
None => warn!("Mozbrowser error after root pipeline closed."),
Some(parent) => parent.trigger_mozbrowser_event(Some(top_level_frame_id), event),
},
},
},
};
}
fn focused_pipeline_in_tree(&self, frame_id: FrameId) -> bool {
self.focus_pipeline_id.map_or(false, |pipeline_id| {
self.pipeline_exists_in_tree(pipeline_id, frame_id)
})
}
fn pipeline_is_in_current_frame(&self, pipeline_id: PipelineId) -> bool {
self.pipeline_exists_in_tree(pipeline_id, self.root_frame_id)
}
fn pipeline_exists_in_tree(&self,
pipeline_id: PipelineId,
root_frame_id: FrameId) -> bool {
self.current_frame_tree_iter(root_frame_id)
.any(|current_frame| current_frame.current.pipeline_id == pipeline_id)
}
}
Auto merge of #14825 - fflorent:master, r=jdm
Remove useless call to map() in constellation.rs
Remove a little bit useless call to map() in constellation.rs :)
Context: https://github.com/servo/servo/pull/14724/files#r94317893
---
<!-- Thank you for contributing to Servo! Please replace each `[ ]` by `[X]` when the step is complete, and replace `__` with appropriate data: -->
- [x] `./mach build -d` does not report any errors
- [x] `./mach test-tidy` does not report any errors
- [ ] These changes fix [this remark](https://github.com/servo/servo/pull/14724/files#r94317893)
<!-- Either: -->
- [ ] There are tests for these changes OR
- [x] These changes do not require tests because these are minor changes
<!-- Pull requests that do not address these steps are welcome, but they will require additional verification as part of the review process. -->
<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/servo/14825)
<!-- Reviewable:end -->
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The `Constellation`, Servo's Grand Central Station
//!
//! The constellation tracks all information kept globally by the
//! browser engine, which includes:
//!
//! * The set of all `EventLoop` objects. Each event loop is
//! the constellation's view of a script thread. The constellation
//! interacts with a script thread by message-passing.
//!
//! * The set of all `Pipeline` objects. Each pipeline gives the
//! constellation's view of a `Window`, with its script thread and
//! layout threads. Pipelines may share script threads, but not
//! layout threads.
//!
//! * The set of all `Frame` objects. Each frame gives the constellation's
//! view of a browsing context. Each browsing context stores an independent
//! session history, created by navigation of that frame. The session
//! history can be traversed, for example by the back and forwards UI,
//! so each session history maintains a list of past and future pipelines,
//! as well as the current active pipeline.
//!
//! There are two kinds of frames: top-level frames (for example tabs
//! in a browser UI), and nested frames (typically caused by `iframe`
//! elements). Frames have a hierarchy (typically caused by `iframe`s
//! containing `iframe`s), giving rise to a frame tree with a root frame.
//! The logical relationship between these types is:
//!
//! ```
//! +---------+ +------------+ +-------------+
//! | Frame | --parent?--> | Pipeline | --event_loop--> | EventLoop |
//! | | --current--> | | | |
//! | | --prev*----> | | <---pipeline*-- | |
//! | | --next*----> | | +-------------+
//! | | | |
//! | | <----frame-- | |
//! +---------+ +------------+
//! ```
//
//! Complicating matters, there are also mozbrowser iframes, which are top-level
//! frames with a parent.
//!
//! The constellation also maintains channels to threads, including:
//!
//! * The script and layout threads.
//! * The graphics compositor.
//! * The font cache, image cache, and resource manager, which load
//! and cache shared fonts, images, or other resources.
//! * The service worker manager.
//! * The devtools, debugger and webdriver servers.
//!
//! The constellation passes messages between the threads, and updates its state
//! to track the evolving state of the frame tree.
//!
//! The constellation acts as a logger, tracking any `warn!` messages from threads,
//! and converting any `error!` or `panic!` into a crash report, which is filed
//! using an appropriate `mozbrowsererror` event.
//!
//! Since there is only one constellation, and its responsibilities include crash reporting,
//! it is very important that it does not panic.
use backtrace::Backtrace;
use bluetooth_traits::BluetoothRequest;
use canvas::canvas_paint_thread::CanvasPaintThread;
use canvas::webgl_paint_thread::WebGLPaintThread;
use canvas_traits::CanvasMsg;
use compositing::SendableFrameTree;
use compositing::compositor_thread::CompositorProxy;
use compositing::compositor_thread::Msg as ToCompositorMsg;
use debugger;
use devtools_traits::{ChromeToDevtoolsControlMsg, DevtoolsControlMsg};
use euclid::scale_factor::ScaleFactor;
use euclid::size::{Size2D, TypedSize2D};
use event_loop::EventLoop;
use frame::{Frame, FrameChange, FrameTreeIterator, FullFrameTreeIterator};
use gfx::font_cache_thread::FontCacheThread;
use gfx_traits::Epoch;
use ipc_channel::ipc::{self, IpcSender};
use ipc_channel::router::ROUTER;
use layout_traits::LayoutThreadFactory;
use log::{Log, LogLevel, LogLevelFilter, LogMetadata, LogRecord};
use msg::constellation_msg::{FrameId, FrameType, PipelineId};
use msg::constellation_msg::{Key, KeyModifiers, KeyState};
use msg::constellation_msg::{PipelineNamespace, PipelineNamespaceId, TraversalDirection};
use net_traits::{self, IpcSend, ResourceThreads};
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::pub_domains::reg_host;
use net_traits::storage_thread::{StorageThreadMsg, StorageType};
use offscreen_gl_context::{GLContextAttributes, GLLimits};
use pipeline::{InitialPipelineState, Pipeline};
use profile_traits::mem;
use profile_traits::time;
use rand::{Rng, SeedableRng, StdRng, random};
use script_traits::{AnimationState, AnimationTickType, CompositorEvent};
use script_traits::{ConstellationControlMsg, ConstellationMsg as FromCompositorMsg};
use script_traits::{DocumentState, LayoutControlMsg, LoadData};
use script_traits::{IFrameLoadInfo, IFrameLoadInfoWithData, IFrameSandboxState, TimerEventRequest};
use script_traits::{LayoutMsg as FromLayoutMsg, ScriptMsg as FromScriptMsg, ScriptThreadFactory};
use script_traits::{LogEntry, ServiceWorkerMsg, webdriver_msg};
use script_traits::{MozBrowserErrorType, MozBrowserEvent, WebDriverCommandMsg, WindowSizeData};
use script_traits::{SWManagerMsg, ScopeThings, WindowSizeType};
use servo_config::opts;
use servo_config::prefs::PREFS;
use servo_remutex::ReentrantMutex;
use servo_url::ServoUrl;
use std::borrow::ToOwned;
use std::collections::{HashMap, VecDeque};
use std::io::Error as IOError;
use std::iter::once;
use std::marker::PhantomData;
use std::process;
use std::rc::{Rc, Weak};
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender, channel};
use std::thread;
use std::time::Instant;
use style_traits::PagePx;
use style_traits::cursor::Cursor;
use style_traits::viewport::ViewportConstraints;
use timer_scheduler::TimerScheduler;
use webrender_traits;
/// The `Constellation` itself. In the servo browser, there is one
/// constellation, which maintains all of the browser global data.
/// In embedded applications, there may be more than one constellation,
/// which are independent of each other.
///
/// The constellation may be in a different process from the pipelines,
/// and communicates using IPC.
///
/// It is parameterized over a `LayoutThreadFactory` and a
/// `ScriptThreadFactory` (which in practice are implemented by
/// `LayoutThread` in the `layout` crate, and `ScriptThread` in
/// the `script` crate). Script and layout communicate using a `Message`
/// type.
pub struct Constellation<Message, LTF, STF> {
/// An IPC channel for script threads to send messages to the constellation.
/// This is the script threads' view of `script_receiver`.
script_sender: IpcSender<FromScriptMsg>,
/// A channel for the constellation to receive messages from script threads.
/// This is the constellation's view of `script_sender`.
script_receiver: Receiver<FromScriptMsg>,
/// An IPC channel for layout threads to send messages to the constellation.
/// This is the layout threads' view of `layout_receiver`.
layout_sender: IpcSender<FromLayoutMsg>,
/// A channel for the constellation to receive messages from layout threads.
/// This is the constellation's view of `layout_sender`.
layout_receiver: Receiver<FromLayoutMsg>,
/// A channel for the constellation to receive messages from the compositor thread.
compositor_receiver: Receiver<FromCompositorMsg>,
/// A channel (the implementation of which is port-specific) for the
/// constellation to send messages to the compositor thread.
compositor_proxy: Box<CompositorProxy>,
/// Channels for the constellation to send messages to the public
/// resource-related threads. There are two groups of resource
/// threads: one for public browsing, and one for private
/// browsing.
public_resource_threads: ResourceThreads,
/// Channels for the constellation to send messages to the private
/// resource-related threads. There are two groups of resource
/// threads: one for public browsing, and one for private
/// browsing.
private_resource_threads: ResourceThreads,
/// A channel for the constellation to send messages to the image
/// cache thread.
image_cache_thread: ImageCacheThread,
/// A channel for the constellation to send messages to the font
/// cache thread.
font_cache_thread: FontCacheThread,
/// A channel for the constellation to send messages to the
/// debugger thread.
debugger_chan: Option<debugger::Sender>,
/// A channel for the constellation to send messages to the
/// devtools thread.
devtools_chan: Option<Sender<DevtoolsControlMsg>>,
/// An IPC channel for the constellation to send messages to the
/// bluetooth thread.
bluetooth_thread: IpcSender<BluetoothRequest>,
/// An IPC channel for the constellation to send messages to the
/// Service Worker Manager thread.
swmanager_chan: Option<IpcSender<ServiceWorkerMsg>>,
/// An IPC channel for Service Worker Manager threads to send
/// messages to the constellation. This is the SW Manager thread's
/// view of `swmanager_receiver`.
swmanager_sender: IpcSender<SWManagerMsg>,
/// A channel for the constellation to receive messages from the
/// Service Worker Manager thread. This is the constellation's view of
/// `swmanager_sender`.
swmanager_receiver: Receiver<SWManagerMsg>,
/// A channel for the constellation to send messages to the
/// time profiler thread.
time_profiler_chan: time::ProfilerChan,
/// A channel for the constellation to send messages to the
/// memory profiler thread.
mem_profiler_chan: mem::ProfilerChan,
/// A channel for the constellation to send messages to the
/// timer thread.
scheduler_chan: IpcSender<TimerEventRequest>,
/// A channel for the constellation to send messages to the
/// Webrender thread.
webrender_api_sender: webrender_traits::RenderApiSender,
/// The set of all event loops in the browser. We generate a new
/// event loop for each registered domain name (aka eTLD+1) in
/// each top-level frame. We store the event loops in a map
/// indexed by top-level frame id (as a `FrameId`) and registered
/// domain name (as a `String`) to event loops. This double
/// indirection ensures that separate tabs do not share event
/// loops, even if the same domain is loaded in each.
/// It is important that scripts with the same eTLD+1
/// share an event loop, since they can use `document.domain`
/// to become same-origin, at which point they can share DOM objects.
event_loops: HashMap<FrameId, HashMap<String, Weak<EventLoop>>>,
/// The set of all the pipelines in the browser.
/// (See the `pipeline` module for more details.)
pipelines: HashMap<PipelineId, Pipeline>,
/// The set of all the frames in the browser.
frames: HashMap<FrameId, Frame>,
/// When a navigation is performed, we do not immediately update
/// the frame tree, instead we ask the event loop to begin loading
/// the new document, and do not update the frame tree until the
/// document is active. Between starting the load and it activating,
/// we store a `FrameChange` object for the navigation in progress.
pending_frames: Vec<FrameChange>,
/// The root frame.
root_frame_id: FrameId,
/// The currently focused pipeline for key events.
focus_pipeline_id: Option<PipelineId>,
/// Pipeline IDs are namespaced in order to avoid name collisions,
/// and the namespaces are allocated by the constellation.
next_pipeline_namespace_id: PipelineNamespaceId,
/// The size of the top-level window.
window_size: WindowSizeData,
/// Bits of state used to interact with the webdriver implementation
webdriver: WebDriverData,
/// Document states for loaded pipelines (used only when writing screenshots).
document_states: HashMap<PipelineId, DocumentState>,
/// Are we shutting down?
shutting_down: bool,
/// Have we seen any warnings? Hopefully always empty!
/// The buffer contains `(thread_name, reason)` entries.
handled_warnings: VecDeque<(Option<String>, String)>,
/// The random number generator and probability for closing pipelines.
/// This is for testing the hardening of the constellation.
random_pipeline_closure: Option<(StdRng, f32)>,
/// Phantom data that keeps the Rust type system happy.
phantom: PhantomData<(Message, LTF, STF)>,
}
/// State needed to construct a constellation.
pub struct InitialConstellationState {
/// A channel through which messages can be sent to the compositor.
pub compositor_proxy: Box<CompositorProxy + Send>,
/// A channel to the debugger, if applicable.
pub debugger_chan: Option<debugger::Sender>,
/// A channel to the developer tools, if applicable.
pub devtools_chan: Option<Sender<DevtoolsControlMsg>>,
/// A channel to the bluetooth thread.
pub bluetooth_thread: IpcSender<BluetoothRequest>,
/// A channel to the image cache thread.
pub image_cache_thread: ImageCacheThread,
/// A channel to the font cache thread.
pub font_cache_thread: FontCacheThread,
/// A channel to the resource thread.
pub public_resource_threads: ResourceThreads,
/// A channel to the resource thread.
pub private_resource_threads: ResourceThreads,
/// A channel to the time profiler thread.
pub time_profiler_chan: time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// Webrender API.
pub webrender_api_sender: webrender_traits::RenderApiSender,
/// Whether the constellation supports the clipboard.
/// TODO: this field is not used, remove it?
pub supports_clipboard: bool,
}
/// Data needed for webdriver
struct WebDriverData {
load_channel: Option<(PipelineId, IpcSender<webdriver_msg::LoadStatus>)>,
resize_channel: Option<IpcSender<WindowSizeData>>,
}
impl WebDriverData {
fn new() -> WebDriverData {
WebDriverData {
load_channel: None,
resize_channel: None,
}
}
}
/// When we are running reftests, we save an image to compare against a reference.
/// This enum gives the possible states of preparing such an image.
#[derive(Debug, PartialEq)]
enum ReadyToSave {
NoRootFrame,
PendingFrames,
WebFontNotLoaded,
DocumentLoading,
EpochMismatch,
PipelineUnknown,
Ready,
}
/// When we are exiting a pipeline, we can either force exiting or not.
/// A normal exit waits for the compositor to update its state before
/// exiting, and delegates layout exit to script. A forced exit does
/// not notify the compositor, and exits layout without involving script.
#[derive(Clone, Copy)]
enum ExitPipelineMode {
Normal,
Force,
}
/// The constellation uses logging to perform crash reporting.
/// The constellation receives all `warn!`, `error!` and `panic!` messages,
/// and generates a crash report when it receives a panic.
/// A logger directed at the constellation from content processes
#[derive(Clone)]
pub struct FromScriptLogger {
/// A channel to the constellation
pub constellation_chan: Arc<ReentrantMutex<IpcSender<FromScriptMsg>>>,
}
impl FromScriptLogger {
/// Create a new constellation logger.
pub fn new(constellation_chan: IpcSender<FromScriptMsg>) -> FromScriptLogger {
FromScriptLogger {
constellation_chan: Arc::new(ReentrantMutex::new(constellation_chan))
}
}
/// The maximum log level the constellation logger is interested in.
pub fn filter(&self) -> LogLevelFilter {
LogLevelFilter::Warn
}
}
impl Log for FromScriptLogger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
metadata.level() <= LogLevel::Warn
}
fn log(&self, record: &LogRecord) {
if let Some(entry) = log_entry(record) {
debug!("Sending log entry {:?}.", entry);
let top_level_frame_id = FrameId::installed();
let thread_name = thread::current().name().map(ToOwned::to_owned);
let msg = FromScriptMsg::LogEntry(top_level_frame_id, thread_name, entry);
let chan = self.constellation_chan.lock().unwrap_or_else(|err| err.into_inner());
let _ = chan.send(msg);
}
}
}
/// A logger directed at the constellation from the compositor
#[derive(Clone)]
pub struct FromCompositorLogger {
/// A channel to the constellation
pub constellation_chan: Arc<ReentrantMutex<Sender<FromCompositorMsg>>>,
}
impl FromCompositorLogger {
/// Create a new constellation logger.
pub fn new(constellation_chan: Sender<FromCompositorMsg>) -> FromCompositorLogger {
FromCompositorLogger {
constellation_chan: Arc::new(ReentrantMutex::new(constellation_chan))
}
}
/// The maximum log level the constellation logger is interested in.
pub fn filter(&self) -> LogLevelFilter {
LogLevelFilter::Warn
}
}
impl Log for FromCompositorLogger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
metadata.level() <= LogLevel::Warn
}
fn log(&self, record: &LogRecord) {
if let Some(entry) = log_entry(record) {
debug!("Sending log entry {:?}.", entry);
let top_level_frame_id = FrameId::installed();
let thread_name = thread::current().name().map(ToOwned::to_owned);
let msg = FromCompositorMsg::LogEntry(top_level_frame_id, thread_name, entry);
let chan = self.constellation_chan.lock().unwrap_or_else(|err| err.into_inner());
let _ = chan.send(msg);
}
}
}
/// Rust uses `LogRecord` for storing logging, but servo converts that to
/// a `LogEntry`. We do this so that we can record panics as well as log
/// messages, and because `LogRecord` does not implement serde (de)serialization,
/// so cannot be used over an IPC channel.
fn log_entry(record: &LogRecord) -> Option<LogEntry> {
match record.level() {
LogLevel::Error if thread::panicking() => Some(LogEntry::Panic(
format!("{}", record.args()),
format!("{:?}", Backtrace::new())
)),
LogLevel::Error => Some(LogEntry::Error(
format!("{}", record.args())
)),
LogLevel::Warn => Some(LogEntry::Warn(
format!("{}", record.args())
)),
_ => None,
}
}
/// The number of warnings to include in each crash report.
const WARNINGS_BUFFER_SIZE: usize = 32;
impl<Message, LTF, STF> Constellation<Message, LTF, STF>
where LTF: LayoutThreadFactory<Message=Message>,
STF: ScriptThreadFactory<Message=Message>
{
/// Create a new constellation thread.
pub fn start(state: InitialConstellationState) -> (Sender<FromCompositorMsg>, IpcSender<SWManagerMsg>) {
let (compositor_sender, compositor_receiver) = channel();
// service worker manager to communicate with constellation
let (swmanager_sender, swmanager_receiver) = ipc::channel().expect("ipc channel failure");
let sw_mgr_clone = swmanager_sender.clone();
thread::Builder::new().name("Constellation".to_owned()).spawn(move || {
let (ipc_script_sender, ipc_script_receiver) = ipc::channel().expect("ipc channel failure");
let script_receiver = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_script_receiver);
let (ipc_layout_sender, ipc_layout_receiver) = ipc::channel().expect("ipc channel failure");
let layout_receiver = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_layout_receiver);
let swmanager_receiver = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(swmanager_receiver);
PipelineNamespace::install(PipelineNamespaceId(0));
let mut constellation: Constellation<Message, LTF, STF> = Constellation {
script_sender: ipc_script_sender,
layout_sender: ipc_layout_sender,
script_receiver: script_receiver,
compositor_receiver: compositor_receiver,
layout_receiver: layout_receiver,
compositor_proxy: state.compositor_proxy,
debugger_chan: state.debugger_chan,
devtools_chan: state.devtools_chan,
bluetooth_thread: state.bluetooth_thread,
public_resource_threads: state.public_resource_threads,
private_resource_threads: state.private_resource_threads,
image_cache_thread: state.image_cache_thread,
font_cache_thread: state.font_cache_thread,
swmanager_chan: None,
swmanager_receiver: swmanager_receiver,
swmanager_sender: sw_mgr_clone,
event_loops: HashMap::new(),
pipelines: HashMap::new(),
frames: HashMap::new(),
pending_frames: vec!(),
// We initialize the namespace at 1, since we reserved namespace 0 for the constellation
next_pipeline_namespace_id: PipelineNamespaceId(1),
root_frame_id: FrameId::new(),
focus_pipeline_id: None,
time_profiler_chan: state.time_profiler_chan,
mem_profiler_chan: state.mem_profiler_chan,
window_size: WindowSizeData {
visible_viewport: opts::get().initial_window_size.to_f32() *
ScaleFactor::new(1.0),
initial_viewport: opts::get().initial_window_size.to_f32() *
ScaleFactor::new(1.0),
device_pixel_ratio:
ScaleFactor::new(opts::get().device_pixels_per_px.unwrap_or(1.0)),
},
phantom: PhantomData,
webdriver: WebDriverData::new(),
scheduler_chan: TimerScheduler::start(),
document_states: HashMap::new(),
webrender_api_sender: state.webrender_api_sender,
shutting_down: false,
handled_warnings: VecDeque::new(),
random_pipeline_closure: opts::get().random_pipeline_closure_probability.map(|prob| {
let seed = opts::get().random_pipeline_closure_seed.unwrap_or_else(random);
let rng = StdRng::from_seed(&[seed]);
warn!("Randomly closing pipelines.");
info!("Using seed {} for random pipeline closure.", seed);
(rng, prob)
}),
};
constellation.run();
}).expect("Thread spawning failed");
(compositor_sender, swmanager_sender)
}
/// The main event loop for the constellation.
fn run(&mut self) {
while !self.shutting_down || !self.pipelines.is_empty() {
// Randomly close a pipeline if --random-pipeline-closure-probability is set
// This is for testing the hardening of the constellation.
self.maybe_close_random_pipeline();
self.handle_request();
}
self.handle_shutdown();
}
/// Generate a new pipeline id namespace.
fn next_pipeline_namespace_id(&mut self) -> PipelineNamespaceId {
let namespace_id = self.next_pipeline_namespace_id;
let PipelineNamespaceId(ref mut i) = self.next_pipeline_namespace_id;
*i += 1;
namespace_id
}
/// Helper function for creating a pipeline
fn new_pipeline(&mut self,
pipeline_id: PipelineId,
frame_id: FrameId,
parent_info: Option<(PipelineId, FrameType)>,
initial_window_size: Option<TypedSize2D<f32, PagePx>>,
load_data: LoadData,
sandbox: IFrameSandboxState,
is_private: bool) {
if self.shutting_down { return; }
// TODO: can we get a case where the child pipeline is created
// before the parent is part of the frame tree?
let top_level_frame_id = match parent_info {
Some((_, FrameType::MozBrowserIFrame)) => frame_id,
Some((parent_id, _)) => self.get_top_level_frame_for_pipeline(parent_id),
None => self.root_frame_id,
};
let (event_loop, host) = match sandbox {
IFrameSandboxState::IFrameSandboxed => (None, None),
IFrameSandboxState::IFrameUnsandboxed => match reg_host(&load_data.url) {
None => (None, None),
Some(host) => {
let event_loop = self.event_loops.get(&top_level_frame_id)
.and_then(|map| map.get(host))
.and_then(|weak| weak.upgrade());
match event_loop {
None => (None, Some(String::from(host))),
Some(event_loop) => (Some(event_loop.clone()), None),
}
},
},
};
let resource_threads = if is_private {
self.private_resource_threads.clone()
} else {
self.public_resource_threads.clone()
};
let parent_visibility = parent_info
.and_then(|(parent_pipeline_id, _)| self.pipelines.get(&parent_pipeline_id))
.map(|pipeline| pipeline.visible);
let prev_visibility = self.frames.get(&frame_id)
.and_then(|frame| self.pipelines.get(&frame.current.pipeline_id))
.map(|pipeline| pipeline.visible)
.or(parent_visibility);
// TODO: think about the case where the child pipeline is created
// before the parent is part of the frame tree.
let top_level_frame_id = match parent_info {
Some((_, FrameType::MozBrowserIFrame)) => frame_id,
Some((parent_id, _)) => self.get_top_level_frame_for_pipeline(parent_id),
None => self.root_frame_id,
};
let result = Pipeline::spawn::<Message, LTF, STF>(InitialPipelineState {
id: pipeline_id,
frame_id: frame_id,
top_level_frame_id: top_level_frame_id,
parent_info: parent_info,
constellation_chan: self.script_sender.clone(),
layout_to_constellation_chan: self.layout_sender.clone(),
scheduler_chan: self.scheduler_chan.clone(),
compositor_proxy: self.compositor_proxy.clone_compositor_proxy(),
devtools_chan: self.devtools_chan.clone(),
bluetooth_thread: self.bluetooth_thread.clone(),
swmanager_thread: self.swmanager_sender.clone(),
image_cache_thread: self.image_cache_thread.clone(),
font_cache_thread: self.font_cache_thread.clone(),
resource_threads: resource_threads,
time_profiler_chan: self.time_profiler_chan.clone(),
mem_profiler_chan: self.mem_profiler_chan.clone(),
window_size: initial_window_size,
event_loop: event_loop,
load_data: load_data,
device_pixel_ratio: self.window_size.device_pixel_ratio,
pipeline_namespace_id: self.next_pipeline_namespace_id(),
prev_visibility: prev_visibility,
webrender_api_sender: self.webrender_api_sender.clone(),
is_private: is_private,
});
let pipeline = match result {
Ok(result) => result,
Err(e) => return self.handle_send_error(pipeline_id, e),
};
if let Some(host) = host {
self.event_loops.entry(top_level_frame_id)
.or_insert_with(HashMap::new)
.insert(host, Rc::downgrade(&pipeline.event_loop));
}
assert!(!self.pipelines.contains_key(&pipeline_id));
self.pipelines.insert(pipeline_id, pipeline);
}
/// Get an iterator for the current frame tree. Specify self.root_frame_id to
/// iterate the entire tree, or a specific frame id to iterate only that sub-tree.
/// Iterates over the fully active frames in the tree.
fn current_frame_tree_iter(&self, frame_id_root: FrameId) -> FrameTreeIterator {
FrameTreeIterator {
stack: vec!(frame_id_root),
pipelines: &self.pipelines,
frames: &self.frames,
}
}
/// Get an iterator for the current frame tree. Specify self.root_frame_id to
/// iterate the entire tree, or a specific frame id to iterate only that sub-tree.
/// Iterates over all frames in the tree.
fn full_frame_tree_iter(&self, frame_id_root: FrameId) -> FullFrameTreeIterator {
FullFrameTreeIterator {
stack: vec!(frame_id_root),
pipelines: &self.pipelines,
frames: &self.frames,
}
}
/// The joint session future is the merge of the session future of every
/// frame in the frame tree, sorted reverse chronologically.
fn joint_session_future(&self, frame_id_root: FrameId) -> Vec<(Instant, FrameId, PipelineId)> {
let mut future = vec!();
for frame in self.full_frame_tree_iter(frame_id_root) {
future.extend(frame.next.iter().map(|entry| (entry.instant, entry.frame_id, entry.pipeline_id)));
}
// reverse sorting
future.sort_by(|a, b| b.cmp(a));
future
}
/// Is the joint session future empty?
fn joint_session_future_is_empty(&self, frame_id_root: FrameId) -> bool {
self.full_frame_tree_iter(frame_id_root)
.all(|frame| frame.next.is_empty())
}
/// The joint session past is the merge of the session past of every
/// frame in the frame tree, sorted chronologically.
fn joint_session_past(&self, frame_id_root: FrameId) -> Vec<(Instant, FrameId, PipelineId)> {
let mut past = vec!();
for frame in self.full_frame_tree_iter(frame_id_root) {
let mut prev_instant = frame.current.instant;
for entry in frame.prev.iter().rev() {
past.push((prev_instant, entry.frame_id, entry.pipeline_id));
prev_instant = entry.instant;
}
}
past.sort();
past
}
/// Is the joint session past empty?
fn joint_session_past_is_empty(&self, frame_id_root: FrameId) -> bool {
self.full_frame_tree_iter(frame_id_root)
.all(|frame| frame.prev.is_empty())
}
/// Create a new frame and update the internal bookkeeping.
fn new_frame(&mut self, frame_id: FrameId, pipeline_id: PipelineId) {
let frame = Frame::new(frame_id, pipeline_id);
self.frames.insert(frame_id, frame);
// If a child frame, add it to the parent pipeline.
let parent_info = self.pipelines.get(&pipeline_id)
.and_then(|pipeline| pipeline.parent_info);
if let Some((parent_id, _)) = parent_info {
if let Some(parent) = self.pipelines.get_mut(&parent_id) {
parent.add_child(frame_id);
}
}
}
/// Handles loading pages, navigation, and granting access to the compositor
#[allow(unsafe_code)]
fn handle_request(&mut self) {
enum Request {
Script(FromScriptMsg),
Compositor(FromCompositorMsg),
Layout(FromLayoutMsg),
FromSWManager(SWManagerMsg),
}
// Get one incoming request.
// This is one of the few places where the compositor is
// allowed to panic. If one of the receiver.recv() calls
// fails, it is because the matching sender has been
// reclaimed, but this can't happen in normal execution
// because the constellation keeps a pointer to the sender,
// so it should never be reclaimed. A possible scenario in
// which receiver.recv() fails is if some unsafe code
// produces undefined behaviour, resulting in the destructor
// being called. If this happens, there's not much we can do
// other than panic.
let request = {
let receiver_from_script = &self.script_receiver;
let receiver_from_compositor = &self.compositor_receiver;
let receiver_from_layout = &self.layout_receiver;
let receiver_from_swmanager = &self.swmanager_receiver;
select! {
msg = receiver_from_script.recv() =>
Request::Script(msg.expect("Unexpected script channel panic in constellation")),
msg = receiver_from_compositor.recv() =>
Request::Compositor(msg.expect("Unexpected compositor channel panic in constellation")),
msg = receiver_from_layout.recv() =>
Request::Layout(msg.expect("Unexpected layout channel panic in constellation")),
msg = receiver_from_swmanager.recv() =>
Request::FromSWManager(msg.expect("Unexpected panic channel panic in constellation"))
}
};
match request {
Request::Compositor(message) => {
self.handle_request_from_compositor(message)
},
Request::Script(message) => {
self.handle_request_from_script(message);
},
Request::Layout(message) => {
self.handle_request_from_layout(message);
},
Request::FromSWManager(message) => {
self.handle_request_from_swmanager(message);
}
}
}
fn handle_request_from_swmanager(&mut self, message: SWManagerMsg) {
match message {
SWManagerMsg::OwnSender(sw_sender) => {
// store service worker manager for communicating with it.
self.swmanager_chan = Some(sw_sender);
}
}
}
fn handle_request_from_compositor(&mut self, message: FromCompositorMsg) {
match message {
FromCompositorMsg::Exit => {
debug!("constellation exiting");
self.handle_exit();
}
// The compositor discovered the size of a subframe. This needs to be reflected by all
// frame trees in the navigation context containing the subframe.
FromCompositorMsg::FrameSize(pipeline_id, size) => {
debug!("constellation got frame size message");
self.handle_frame_size_msg(pipeline_id, &TypedSize2D::from_untyped(&size));
}
FromCompositorMsg::GetFrame(pipeline_id, resp_chan) => {
debug!("constellation got get root pipeline message");
self.handle_get_frame(pipeline_id, resp_chan);
}
FromCompositorMsg::GetPipeline(frame_id, resp_chan) => {
debug!("constellation got get root pipeline message");
self.handle_get_pipeline(frame_id, resp_chan);
}
FromCompositorMsg::GetPipelineTitle(pipeline_id) => {
debug!("constellation got get-pipeline-title message");
self.handle_get_pipeline_title_msg(pipeline_id);
}
FromCompositorMsg::KeyEvent(ch, key, state, modifiers) => {
debug!("constellation got key event message");
self.handle_key_msg(ch, key, state, modifiers);
}
// Load a new page from a typed url
// If there is already a pending page (self.pending_frames), it will not be overridden;
// However, if the id is not encompassed by another change, it will be.
FromCompositorMsg::LoadUrl(source_id, load_data) => {
debug!("constellation got URL load message from compositor");
self.handle_load_url_msg(source_id, load_data, false);
}
FromCompositorMsg::IsReadyToSaveImage(pipeline_states) => {
let is_ready = self.handle_is_ready_to_save_image(pipeline_states);
debug!("Ready to save image {:?}.", is_ready);
if opts::get().is_running_problem_test {
println!("got ready to save image query, result is {:?}", is_ready);
}
let is_ready = is_ready == ReadyToSave::Ready;
self.compositor_proxy.send(ToCompositorMsg::IsReadyToSaveImageReply(is_ready));
if opts::get().is_running_problem_test {
println!("sent response");
}
}
// This should only be called once per constellation, and only by the browser
FromCompositorMsg::InitLoadUrl(url) => {
debug!("constellation got init load URL message");
self.handle_init_load(url);
}
// Handle a forward or back request
FromCompositorMsg::TraverseHistory(pipeline_id, direction) => {
debug!("constellation got traverse history message from compositor");
self.handle_traverse_history_msg(pipeline_id, direction);
}
FromCompositorMsg::WindowSize(new_size, size_type) => {
debug!("constellation got window resize message");
self.handle_window_size_msg(new_size, size_type);
}
FromCompositorMsg::TickAnimation(pipeline_id, tick_type) => {
self.handle_tick_animation(pipeline_id, tick_type)
}
FromCompositorMsg::WebDriverCommand(command) => {
debug!("constellation got webdriver command message");
self.handle_webdriver_msg(command);
}
FromCompositorMsg::Reload => {
debug!("constellation got reload message");
self.handle_reload_msg();
}
FromCompositorMsg::LogEntry(top_level_frame_id, thread_name, entry) => {
self.handle_log_entry(top_level_frame_id, thread_name, entry);
}
}
}
fn handle_request_from_script(&mut self, message: FromScriptMsg) {
match message {
FromScriptMsg::PipelineExited(pipeline_id) => {
self.handle_pipeline_exited(pipeline_id);
}
FromScriptMsg::ScriptLoadedURLInIFrame(load_info) => {
debug!("constellation got iframe URL load message {:?} {:?} {:?}",
load_info.info.parent_pipeline_id,
load_info.old_pipeline_id,
load_info.info.new_pipeline_id);
self.handle_script_loaded_url_in_iframe_msg(load_info);
}
FromScriptMsg::ScriptLoadedAboutBlankInIFrame(load_info, lc) => {
debug!("constellation got loaded `about:blank` in iframe message {:?} {:?}",
load_info.parent_pipeline_id,
load_info.new_pipeline_id);
self.handle_script_loaded_about_blank_in_iframe_msg(load_info, lc);
}
FromScriptMsg::ChangeRunningAnimationsState(pipeline_id, animation_state) => {
self.handle_change_running_animations_state(pipeline_id, animation_state)
}
// Load a new page from a mouse click
// If there is already a pending page (self.pending_frames), it will not be overridden;
// However, if the id is not encompassed by another change, it will be.
FromScriptMsg::LoadUrl(source_id, load_data, replace) => {
debug!("constellation got URL load message from script");
self.handle_load_url_msg(source_id, load_data, replace);
}
// A page loaded has completed all parsing, script, and reflow messages have been sent.
FromScriptMsg::LoadComplete(pipeline_id) => {
debug!("constellation got load complete message");
self.handle_load_complete_msg(pipeline_id)
}
// Handle a forward or back request
FromScriptMsg::TraverseHistory(pipeline_id, direction) => {
debug!("constellation got traverse history message from script");
self.handle_traverse_history_msg(pipeline_id, direction);
}
// Handle a joint session history length request.
FromScriptMsg::JointSessionHistoryLength(pipeline_id, sender) => {
debug!("constellation got joint session history length message from script");
self.handle_joint_session_history_length(pipeline_id, sender);
}
// Notification that the new document is ready to become active
FromScriptMsg::ActivateDocument(pipeline_id) => {
debug!("constellation got activate document message");
self.handle_activate_document_msg(pipeline_id);
}
// Update pipeline url after redirections
FromScriptMsg::SetFinalUrl(pipeline_id, final_url) => {
// The script may have finished loading after we already started shutting down.
if let Some(ref mut pipeline) = self.pipelines.get_mut(&pipeline_id) {
debug!("constellation got set final url message");
pipeline.url = final_url;
} else {
warn!("constellation got set final url message for dead pipeline");
}
}
FromScriptMsg::MozBrowserEvent(parent_pipeline_id, pipeline_id, event) => {
debug!("constellation got mozbrowser event message");
self.handle_mozbrowser_event_msg(parent_pipeline_id,
pipeline_id,
event);
}
FromScriptMsg::Focus(pipeline_id) => {
debug!("constellation got focus message");
self.handle_focus_msg(pipeline_id);
}
FromScriptMsg::ForwardEvent(pipeline_id, event) => {
let msg = ConstellationControlMsg::SendEvent(pipeline_id, event);
let result = match self.pipelines.get(&pipeline_id) {
None => { debug!("Pipeline {:?} got event after closure.", pipeline_id); return; }
Some(pipeline) => pipeline.event_loop.send(msg),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
FromScriptMsg::GetClipboardContents(sender) => {
if let Err(e) = sender.send("".to_owned()) {
warn!("Failed to send clipboard ({})", e);
}
}
FromScriptMsg::SetClipboardContents(_) => {
}
FromScriptMsg::SetVisible(pipeline_id, visible) => {
debug!("constellation got set visible messsage");
self.handle_set_visible_msg(pipeline_id, visible);
}
FromScriptMsg::VisibilityChangeComplete(pipeline_id, visible) => {
debug!("constellation got set visibility change complete message");
self.handle_visibility_change_complete(pipeline_id, visible);
}
FromScriptMsg::RemoveIFrame(pipeline_id, sender) => {
debug!("constellation got remove iframe message");
self.handle_remove_iframe_msg(pipeline_id);
if let Some(sender) = sender {
if let Err(e) = sender.send(()) {
warn!("Error replying to remove iframe ({})", e);
}
}
}
FromScriptMsg::NewFavicon(url) => {
debug!("constellation got new favicon message");
self.compositor_proxy.send(ToCompositorMsg::NewFavicon(url));
}
FromScriptMsg::HeadParsed => {
debug!("constellation got head parsed message");
self.compositor_proxy.send(ToCompositorMsg::HeadParsed);
}
FromScriptMsg::CreateCanvasPaintThread(size, sender) => {
debug!("constellation got create-canvas-paint-thread message");
self.handle_create_canvas_paint_thread_msg(&size, sender)
}
FromScriptMsg::CreateWebGLPaintThread(size, attributes, sender) => {
debug!("constellation got create-WebGL-paint-thread message");
self.handle_create_webgl_paint_thread_msg(&size, attributes, sender)
}
FromScriptMsg::NodeStatus(message) => {
debug!("constellation got NodeStatus message");
self.compositor_proxy.send(ToCompositorMsg::Status(message));
}
FromScriptMsg::SetDocumentState(pipeline_id, state) => {
debug!("constellation got SetDocumentState message");
self.document_states.insert(pipeline_id, state);
}
FromScriptMsg::Alert(pipeline_id, message, sender) => {
debug!("constellation got Alert message");
self.handle_alert(pipeline_id, message, sender);
}
FromScriptMsg::ScrollFragmentPoint(pipeline_id, scroll_root_id, point, smooth) => {
self.compositor_proxy.send(ToCompositorMsg::ScrollFragmentPoint(pipeline_id,
scroll_root_id,
point,
smooth));
}
FromScriptMsg::GetClientWindow(send) => {
self.compositor_proxy.send(ToCompositorMsg::GetClientWindow(send));
}
FromScriptMsg::MoveTo(point) => {
self.compositor_proxy.send(ToCompositorMsg::MoveTo(point));
}
FromScriptMsg::ResizeTo(size) => {
self.compositor_proxy.send(ToCompositorMsg::ResizeTo(size));
}
FromScriptMsg::Exit => {
self.compositor_proxy.send(ToCompositorMsg::Exit);
}
FromScriptMsg::LogEntry(top_level_frame_id, thread_name, entry) => {
self.handle_log_entry(top_level_frame_id, thread_name, entry);
}
FromScriptMsg::SetTitle(pipeline_id, title) => {
self.compositor_proxy.send(ToCompositorMsg::ChangePageTitle(pipeline_id, title))
}
FromScriptMsg::SendKeyEvent(ch, key, key_state, key_modifiers) => {
self.compositor_proxy.send(ToCompositorMsg::KeyEvent(ch, key, key_state, key_modifiers))
}
FromScriptMsg::TouchEventProcessed(result) => {
self.compositor_proxy.send(ToCompositorMsg::TouchEventProcessed(result))
}
FromScriptMsg::RegisterServiceWorker(scope_things, scope) => {
debug!("constellation got store registration scope message");
self.handle_register_serviceworker(scope_things, scope);
}
FromScriptMsg::ForwardDOMMessage(msg_vec, scope_url) => {
if let Some(ref mgr) = self.swmanager_chan {
let _ = mgr.send(ServiceWorkerMsg::ForwardDOMMessage(msg_vec, scope_url));
} else {
warn!("Unable to forward DOMMessage for postMessage call");
}
}
FromScriptMsg::BroadcastStorageEvent(pipeline_id, storage, url, key, old_value, new_value) => {
self.handle_broadcast_storage_event(pipeline_id, storage, url, key, old_value, new_value);
}
FromScriptMsg::SetFullscreenState(state) => {
self.compositor_proxy.send(ToCompositorMsg::SetFullscreenState(state));
}
}
}
fn handle_request_from_layout(&mut self, message: FromLayoutMsg) {
match message {
FromLayoutMsg::ChangeRunningAnimationsState(pipeline_id, animation_state) => {
self.handle_change_running_animations_state(pipeline_id, animation_state)
}
FromLayoutMsg::SetCursor(cursor) => {
self.handle_set_cursor_msg(cursor)
}
FromLayoutMsg::ViewportConstrained(pipeline_id, constraints) => {
debug!("constellation got viewport-constrained event message");
self.handle_viewport_constrained_msg(pipeline_id, constraints);
}
}
}
fn handle_register_serviceworker(&self, scope_things: ScopeThings, scope: ServoUrl) {
if let Some(ref mgr) = self.swmanager_chan {
let _ = mgr.send(ServiceWorkerMsg::RegisterServiceWorker(scope_things, scope));
} else {
warn!("sending scope info to service worker manager failed");
}
}
fn handle_broadcast_storage_event(&self, pipeline_id: PipelineId, storage: StorageType, url: ServoUrl,
key: Option<String>, old_value: Option<String>, new_value: Option<String>) {
let origin = url.origin();
for pipeline in self.pipelines.values() {
if (pipeline.id != pipeline_id) && (pipeline.url.origin() == origin) {
let msg = ConstellationControlMsg::DispatchStorageEvent(
pipeline.id, storage, url.clone(), key.clone(), old_value.clone(), new_value.clone()
);
if let Err(err) = pipeline.event_loop.send(msg) {
warn!("Failed to broadcast storage event to pipeline {} ({:?}).", pipeline.id, err);
}
}
}
}
fn handle_exit(&mut self) {
// TODO: add a timer, which forces shutdown if threads aren't responsive.
if self.shutting_down { return; }
self.shutting_down = true;
self.mem_profiler_chan.send(mem::ProfilerMsg::Exit);
// TODO: exit before the root frame is initialized?
debug!("Removing root frame.");
let root_frame_id = self.root_frame_id;
self.close_frame(root_frame_id, ExitPipelineMode::Normal);
// Close any pending frames and pipelines
while let Some(pending) = self.pending_frames.pop() {
debug!("Removing pending frame {}.", pending.frame_id);
self.close_frame(pending.frame_id, ExitPipelineMode::Normal);
debug!("Removing pending pipeline {}.", pending.new_pipeline_id);
self.close_pipeline(pending.new_pipeline_id, ExitPipelineMode::Normal);
}
// In case there are frames which weren't attached to the frame tree, we close them.
let frame_ids: Vec<FrameId> = self.frames.keys().cloned().collect();
for frame_id in frame_ids {
debug!("Removing detached frame {}.", frame_id);
self.close_frame(frame_id, ExitPipelineMode::Normal);
}
// In case there are pipelines which weren't attached to the pipeline tree, we close them.
let pipeline_ids: Vec<PipelineId> = self.pipelines.keys().cloned().collect();
for pipeline_id in pipeline_ids {
debug!("Removing detached pipeline {}.", pipeline_id);
self.close_pipeline(pipeline_id, ExitPipelineMode::Normal);
}
}
fn handle_shutdown(&mut self) {
// At this point, there are no active pipelines,
// so we can safely block on other threads, without worrying about deadlock.
// Channels to receive signals when threads are done exiting.
let (core_sender, core_receiver) = ipc::channel().expect("Failed to create IPC channel!");
let (storage_sender, storage_receiver) = ipc::channel().expect("Failed to create IPC channel!");
debug!("Exiting image cache.");
self.image_cache_thread.exit();
debug!("Exiting core resource threads.");
if let Err(e) = self.public_resource_threads.send(net_traits::CoreResourceMsg::Exit(core_sender)) {
warn!("Exit resource thread failed ({})", e);
}
if let Some(ref chan) = self.debugger_chan {
debugger::shutdown_server(chan);
}
if let Some(ref chan) = self.devtools_chan {
debug!("Exiting devtools.");
let msg = DevtoolsControlMsg::FromChrome(ChromeToDevtoolsControlMsg::ServerExitMsg);
if let Err(e) = chan.send(msg) {
warn!("Exit devtools failed ({})", e);
}
}
debug!("Exiting storage resource threads.");
if let Err(e) = self.public_resource_threads.send(StorageThreadMsg::Exit(storage_sender)) {
warn!("Exit storage thread failed ({})", e);
}
debug!("Exiting bluetooth thread.");
if let Err(e) = self.bluetooth_thread.send(BluetoothRequest::Exit) {
warn!("Exit bluetooth thread failed ({})", e);
}
debug!("Exiting service worker manager thread.");
if let Some(mgr) = self.swmanager_chan.as_ref() {
if let Err(e) = mgr.send(ServiceWorkerMsg::Exit) {
warn!("Exit service worker manager failed ({})", e);
}
}
debug!("Exiting font cache thread.");
self.font_cache_thread.exit();
// Receive exit signals from threads.
if let Err(e) = core_receiver.recv() {
warn!("Exit resource thread failed ({})", e);
}
if let Err(e) = storage_receiver.recv() {
warn!("Exit storage thread failed ({})", e);
}
debug!("Asking compositor to complete shutdown.");
self.compositor_proxy.send(ToCompositorMsg::ShutdownComplete);
}
fn handle_pipeline_exited(&mut self, pipeline_id: PipelineId) {
debug!("Pipeline {:?} exited.", pipeline_id);
self.pipelines.remove(&pipeline_id);
}
fn handle_send_error(&mut self, pipeline_id: PipelineId, err: IOError) {
// Treat send error the same as receiving a panic message
debug!("Pipeline {:?} send error ({}).", pipeline_id, err);
let top_level_frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
let reason = format!("Send failed ({})", err);
self.handle_panic(top_level_frame_id, reason, None);
}
fn handle_panic(&mut self, top_level_frame_id: FrameId, reason: String, backtrace: Option<String>) {
if opts::get().hard_fail {
// It's quite difficult to make Servo exit cleanly if some threads have failed.
// Hard fail exists for test runners so we crash and that's good enough.
println!("Pipeline failed in hard-fail mode. Crashing!");
process::exit(1);
}
debug!("Panic handler for top-level frame {}: {}.", top_level_frame_id, reason);
// Notify the browser chrome that the pipeline has failed
self.trigger_mozbrowsererror(top_level_frame_id, reason, backtrace);
let pipeline_id = self.frames.get(&top_level_frame_id).map(|frame| frame.current.pipeline_id);
let pipeline_url = pipeline_id.and_then(|id| self.pipelines.get(&id).map(|pipeline| pipeline.url.clone()));
let parent_info = pipeline_id.and_then(|id| self.pipelines.get(&id).and_then(|pipeline| pipeline.parent_info));
let window_size = pipeline_id.and_then(|id| self.pipelines.get(&id).and_then(|pipeline| pipeline.size));
self.close_frame_children(top_level_frame_id, ExitPipelineMode::Force);
let failure_url = ServoUrl::parse("about:failure").expect("infallible");
if let Some(pipeline_url) = pipeline_url {
if pipeline_url == failure_url {
return error!("about:failure failed");
}
}
warn!("creating replacement pipeline for about:failure");
let new_pipeline_id = PipelineId::new();
let load_data = LoadData::new(failure_url, None, None);
let sandbox = IFrameSandboxState::IFrameSandboxed;
self.new_pipeline(new_pipeline_id, top_level_frame_id, parent_info, window_size, load_data, sandbox, false);
self.pending_frames.push(FrameChange {
frame_id: top_level_frame_id,
old_pipeline_id: pipeline_id,
new_pipeline_id: new_pipeline_id,
replace: false,
});
}
fn handle_log_entry(&mut self, top_level_frame_id: Option<FrameId>, thread_name: Option<String>, entry: LogEntry) {
debug!("Received log entry {:?}.", entry);
match entry {
LogEntry::Panic(reason, backtrace) => {
let top_level_frame_id = top_level_frame_id.unwrap_or(self.root_frame_id);
self.handle_panic(top_level_frame_id, reason, Some(backtrace));
},
LogEntry::Error(reason) | LogEntry::Warn(reason) => {
// VecDeque::truncate is unstable
if WARNINGS_BUFFER_SIZE <= self.handled_warnings.len() {
self.handled_warnings.pop_front();
}
self.handled_warnings.push_back((thread_name, reason));
},
}
}
fn handle_init_load(&mut self, url: ServoUrl) {
let window_size = self.window_size.visible_viewport;
let root_pipeline_id = PipelineId::new();
let root_frame_id = self.root_frame_id;
let load_data = LoadData::new(url.clone(), None, None);
let sandbox = IFrameSandboxState::IFrameUnsandboxed;
self.new_pipeline(root_pipeline_id, root_frame_id, None, Some(window_size), load_data, sandbox, false);
self.handle_load_start_msg(root_pipeline_id);
self.pending_frames.push(FrameChange {
frame_id: self.root_frame_id,
old_pipeline_id: None,
new_pipeline_id: root_pipeline_id,
replace: false,
});
self.compositor_proxy.send(ToCompositorMsg::ChangePageUrl(root_pipeline_id, url));
}
fn handle_frame_size_msg(&mut self,
pipeline_id: PipelineId,
size: &TypedSize2D<f32, PagePx>) {
let msg = ConstellationControlMsg::Resize(pipeline_id, WindowSizeData {
visible_viewport: *size,
initial_viewport: *size * ScaleFactor::new(1.0),
device_pixel_ratio: self.window_size.device_pixel_ratio,
}, WindowSizeType::Initial);
// Store the new rect inside the pipeline
let result = {
// Find the pipeline that corresponds to this rectangle. It's possible that this
// pipeline may have already exited before we process this message, so just
// early exit if that occurs.
match self.pipelines.get_mut(&pipeline_id) {
Some(pipeline) => {
pipeline.size = Some(*size);
pipeline.event_loop.send(msg)
}
None => return,
}
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_subframe_loaded(&mut self, pipeline_id: PipelineId) {
let (frame_id, parent_id) = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => match pipeline.parent_info {
Some((parent_id, _)) => (pipeline.frame_id, parent_id),
None => return warn!("Pipeline {} has no parent.", pipeline_id),
},
None => return warn!("Pipeline {} loaded after closure.", pipeline_id),
};
let msg = ConstellationControlMsg::DispatchFrameLoadEvent {
target: frame_id,
parent: parent_id,
child: pipeline_id,
};
let result = match self.pipelines.get(&parent_id) {
Some(parent) => parent.event_loop.send(msg),
None => return warn!("Parent {} frame loaded after closure.", parent_id),
};
if let Err(e) = result {
self.handle_send_error(parent_id, e);
}
}
// The script thread associated with pipeline_id has loaded a URL in an iframe via script. This
// will result in a new pipeline being spawned and a frame tree being added to
// parent_pipeline_id's frame tree's children. This message is never the result of a
// page navigation.
fn handle_script_loaded_url_in_iframe_msg(&mut self, load_info: IFrameLoadInfoWithData) {
let (load_data, window_size, is_private) = {
let old_pipeline = load_info.old_pipeline_id
.and_then(|old_pipeline_id| self.pipelines.get(&old_pipeline_id));
let source_pipeline = match self.pipelines.get(&load_info.info.parent_pipeline_id) {
Some(source_pipeline) => source_pipeline,
None => return warn!("Script loaded url in closed iframe {}.", load_info.info.parent_pipeline_id),
};
// If no url is specified, reload.
let load_data = load_info.load_data.unwrap_or_else(|| {
let url = match old_pipeline {
Some(old_pipeline) => old_pipeline.url.clone(),
None => ServoUrl::parse("about:blank").expect("infallible"),
};
// TODO - loaddata here should have referrer info (not None, None)
LoadData::new(url, None, None)
});
let is_private = load_info.info.is_private || source_pipeline.is_private;
let window_size = old_pipeline.and_then(|old_pipeline| old_pipeline.size);
if let Some(old_pipeline) = old_pipeline {
old_pipeline.freeze();
}
(load_data, window_size, is_private)
};
// Create the new pipeline, attached to the parent and push to pending frames
self.new_pipeline(load_info.info.new_pipeline_id,
load_info.info.frame_id,
Some((load_info.info.parent_pipeline_id, load_info.info.frame_type)),
window_size,
load_data,
load_info.sandbox,
is_private);
self.pending_frames.push(FrameChange {
frame_id: load_info.info.frame_id,
old_pipeline_id: load_info.old_pipeline_id,
new_pipeline_id: load_info.info.new_pipeline_id,
replace: load_info.info.replace,
});
}
fn handle_script_loaded_about_blank_in_iframe_msg(&mut self,
load_info: IFrameLoadInfo,
layout_sender: IpcSender<LayoutControlMsg>) {
let IFrameLoadInfo {
parent_pipeline_id,
new_pipeline_id,
frame_type,
replace,
frame_id,
is_private,
} = load_info;
let pipeline = {
let parent_pipeline = match self.pipelines.get(&parent_pipeline_id) {
Some(parent_pipeline) => parent_pipeline,
None => return warn!("Script loaded url in closed iframe {}.", parent_pipeline_id),
};
let script_sender = parent_pipeline.event_loop.clone();
let url = ServoUrl::parse("about:blank").expect("infallible");
Pipeline::new(new_pipeline_id,
frame_id,
Some((parent_pipeline_id, frame_type)),
script_sender,
layout_sender,
self.compositor_proxy.clone_compositor_proxy(),
is_private || parent_pipeline.is_private,
url,
None,
parent_pipeline.visible)
};
assert!(!self.pipelines.contains_key(&new_pipeline_id));
self.pipelines.insert(new_pipeline_id, pipeline);
self.pending_frames.push(FrameChange {
frame_id: frame_id,
old_pipeline_id: None,
new_pipeline_id: new_pipeline_id,
replace: replace,
});
}
fn handle_set_cursor_msg(&mut self, cursor: Cursor) {
self.compositor_proxy.send(ToCompositorMsg::SetCursor(cursor))
}
fn handle_change_running_animations_state(&mut self,
pipeline_id: PipelineId,
animation_state: AnimationState) {
self.compositor_proxy.send(ToCompositorMsg::ChangeRunningAnimationsState(pipeline_id,
animation_state))
}
fn handle_tick_animation(&mut self, pipeline_id: PipelineId, tick_type: AnimationTickType) {
let result = match tick_type {
AnimationTickType::Script => {
let msg = ConstellationControlMsg::TickAllAnimations(pipeline_id);
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return warn!("Pipeline {:?} got script tick after closure.", pipeline_id),
}
}
AnimationTickType::Layout => {
let msg = LayoutControlMsg::TickAnimations;
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.layout_chan.send(msg),
None => return warn!("Pipeline {:?} got layout tick after closure.", pipeline_id),
}
}
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_alert(&mut self,
pipeline_id: PipelineId,
message: String,
sender: IpcSender<bool>) {
let pipeline_isnt_root = self.pipelines.get(&pipeline_id).and_then(|pipeline| pipeline.parent_info).is_some();
let mozbrowser_modal_prompt = pipeline_isnt_root && PREFS.is_mozbrowser_enabled();
if mozbrowser_modal_prompt {
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowsershowmodalprompt
let prompt_type = String::from("alert");
let title = String::from("Alert");
let return_value = String::from("");
let event = MozBrowserEvent::ShowModalPrompt(prompt_type, title, message, return_value);
let top_level_frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
match self.frames.get(&self.root_frame_id) {
None => warn!("Alert sent after root frame closure."),
Some(root_frame) => match self.pipelines.get(&root_frame.current.pipeline_id) {
None => warn!("Alert sent after root pipeline closure."),
Some(root_pipeline) => root_pipeline.trigger_mozbrowser_event(Some(top_level_frame_id), event),
}
}
}
let result = sender.send(!mozbrowser_modal_prompt);
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_load_url_msg(&mut self, source_id: PipelineId, load_data: LoadData, replace: bool) {
self.load_url(source_id, load_data, replace);
}
fn load_url(&mut self, source_id: PipelineId, load_data: LoadData, replace: bool) -> Option<PipelineId> {
debug!("Loading {} in pipeline {}.", load_data.url, source_id);
// If this load targets an iframe, its framing element may exist
// in a separate script thread than the framed document that initiated
// the new load. The framing element must be notified about the
// requested change so it can update its internal state.
//
// If replace is true, the current entry is replaced instead of a new entry being added.
let (frame_id, parent_info) = match self.pipelines.get(&source_id) {
Some(pipeline) => (pipeline.frame_id, pipeline.parent_info),
None => {
warn!("Pipeline {:?} loaded after closure.", source_id);
return None;
}
};
match parent_info {
Some((parent_pipeline_id, _)) => {
self.handle_load_start_msg(source_id);
// Message the constellation to find the script thread for this iframe
// and issue an iframe load through there.
let msg = ConstellationControlMsg::Navigate(parent_pipeline_id, frame_id, load_data, replace);
let result = match self.pipelines.get(&parent_pipeline_id) {
Some(parent_pipeline) => parent_pipeline.event_loop.send(msg),
None => {
warn!("Pipeline {:?} child loaded after closure", parent_pipeline_id);
return None;
},
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
Some(source_id)
}
None => {
// Make sure no pending page would be overridden.
for frame_change in &self.pending_frames {
if frame_change.old_pipeline_id == Some(source_id) {
// id that sent load msg is being changed already; abort
return None;
}
}
if !self.pipeline_is_in_current_frame(source_id) {
// Disregard this load if the navigating pipeline is not actually
// active. This could be caused by a delayed navigation (eg. from
// a timer) or a race between multiple navigations (such as an
// onclick handler on an anchor element).
return None;
}
self.handle_load_start_msg(source_id);
// Being here means either there are no pending frames, or none of the pending
// changes would be overridden by changing the subframe associated with source_id.
// Create the new pipeline
let window_size = self.pipelines.get(&source_id).and_then(|source| source.size);
let new_pipeline_id = PipelineId::new();
let root_frame_id = self.root_frame_id;
let sandbox = IFrameSandboxState::IFrameUnsandboxed;
self.new_pipeline(new_pipeline_id, root_frame_id, None, window_size, load_data, sandbox, false);
self.pending_frames.push(FrameChange {
frame_id: root_frame_id,
old_pipeline_id: Some(source_id),
new_pipeline_id: new_pipeline_id,
replace: replace,
});
// Send message to ScriptThread that will suspend all timers
match self.pipelines.get(&source_id) {
Some(source) => source.freeze(),
None => warn!("Pipeline {:?} loaded after closure", source_id),
};
Some(new_pipeline_id)
}
}
}
fn handle_load_start_msg(&mut self, pipeline_id: PipelineId) {
let frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
let forward = !self.joint_session_future_is_empty(frame_id);
let back = !self.joint_session_past_is_empty(frame_id);
self.compositor_proxy.send(ToCompositorMsg::LoadStart(back, forward));
}
fn handle_load_complete_msg(&mut self, pipeline_id: PipelineId) {
let mut webdriver_reset = false;
if let Some((expected_pipeline_id, ref reply_chan)) = self.webdriver.load_channel {
debug!("Sending load to WebDriver");
if expected_pipeline_id == pipeline_id {
let _ = reply_chan.send(webdriver_msg::LoadStatus::LoadComplete);
webdriver_reset = true;
}
}
if webdriver_reset {
self.webdriver.load_channel = None;
}
let frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
let forward = !self.joint_session_future_is_empty(frame_id);
let back = !self.joint_session_past_is_empty(frame_id);
let root = self.root_frame_id == frame_id;
self.compositor_proxy.send(ToCompositorMsg::LoadComplete(back, forward, root));
self.handle_subframe_loaded(pipeline_id);
}
fn handle_traverse_history_msg(&mut self,
pipeline_id: Option<PipelineId>,
direction: TraversalDirection) {
let top_level_frame_id = pipeline_id
.map(|pipeline_id| self.get_top_level_frame_for_pipeline(pipeline_id))
.unwrap_or(self.root_frame_id);
let mut traversal_info = HashMap::new();
match direction {
TraversalDirection::Forward(delta) => {
let mut future = self.joint_session_future(top_level_frame_id);
for _ in 0..delta {
match future.pop() {
Some((_, frame_id, pipeline_id)) => {
traversal_info.insert(frame_id, pipeline_id);
},
None => return warn!("invalid traversal delta"),
}
}
},
TraversalDirection::Back(delta) => {
let mut past = self.joint_session_past(top_level_frame_id);
for _ in 0..delta {
match past.pop() {
Some((_, frame_id, pipeline_id)) => {
traversal_info.insert(frame_id, pipeline_id);
},
None => return warn!("invalid traversal delta"),
}
}
},
};
for (frame_id, pipeline_id) in traversal_info {
self.traverse_frame_to_pipeline(frame_id, pipeline_id);
}
}
fn handle_joint_session_history_length(&self, pipeline_id: PipelineId, sender: IpcSender<u32>) {
let frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
// Initialize length at 1 to count for the current active entry
let mut length = 1;
for frame in self.full_frame_tree_iter(frame_id) {
length += frame.next.len();
length += frame.prev.len();
}
let _ = sender.send(length as u32);
}
fn handle_key_msg(&mut self, ch: Option<char>, key: Key, state: KeyState, mods: KeyModifiers) {
// Send to the explicitly focused pipeline (if it exists), or the root
// frame's current pipeline. If neither exist, fall back to sending to
// the compositor below.
let root_pipeline_id = self.frames.get(&self.root_frame_id)
.map(|root_frame| root_frame.current.pipeline_id);
let pipeline_id = self.focus_pipeline_id.or(root_pipeline_id);
match pipeline_id {
Some(pipeline_id) => {
let event = CompositorEvent::KeyEvent(ch, key, state, mods);
let msg = ConstellationControlMsg::SendEvent(pipeline_id, event);
let result = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return debug!("Pipeline {:?} got key event after closure.", pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
},
None => {
let event = ToCompositorMsg::KeyEvent(ch, key, state, mods);
self.compositor_proxy.clone_compositor_proxy().send(event);
}
}
}
fn handle_reload_msg(&mut self) {
// Send Reload constellation msg to root script channel.
let root_pipeline_id = self.frames.get(&self.root_frame_id)
.map(|root_frame| root_frame.current.pipeline_id);
if let Some(pipeline_id) = root_pipeline_id {
let msg = ConstellationControlMsg::Reload(pipeline_id);
let result = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return debug!("Pipeline {:?} got reload event after closure.", pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
}
fn handle_get_pipeline_title_msg(&mut self, pipeline_id: PipelineId) {
let result = match self.pipelines.get(&pipeline_id) {
None => return self.compositor_proxy.send(ToCompositorMsg::ChangePageTitle(pipeline_id, None)),
Some(pipeline) => pipeline.event_loop.send(ConstellationControlMsg::GetTitle(pipeline_id)),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_mozbrowser_event_msg(&mut self,
parent_pipeline_id: PipelineId,
pipeline_id: PipelineId,
event: MozBrowserEvent) {
assert!(PREFS.is_mozbrowser_enabled());
// Find the script channel for the given parent pipeline,
// and pass the event to that script thread.
// If the pipeline lookup fails, it is because we have torn down the pipeline,
// so it is reasonable to silently ignore the event.
let frame_id = self.pipelines.get(&pipeline_id).map(|pipeline| pipeline.frame_id);
match self.pipelines.get(&parent_pipeline_id) {
Some(pipeline) => pipeline.trigger_mozbrowser_event(frame_id, event),
None => warn!("Pipeline {:?} handling mozbrowser event after closure.", parent_pipeline_id),
}
}
fn handle_get_pipeline(&mut self, frame_id: Option<FrameId>,
resp_chan: IpcSender<Option<PipelineId>>) {
let frame_id = frame_id.unwrap_or(self.root_frame_id);
let current_pipeline_id = self.frames.get(&frame_id)
.map(|frame| frame.current.pipeline_id);
let pipeline_id_loaded = self.pending_frames.iter().rev()
.find(|x| x.old_pipeline_id == current_pipeline_id)
.map(|x| x.new_pipeline_id)
.or(current_pipeline_id);
if let Err(e) = resp_chan.send(pipeline_id_loaded) {
warn!("Failed get_pipeline response ({}).", e);
}
}
fn handle_get_frame(&mut self,
pipeline_id: PipelineId,
resp_chan: IpcSender<Option<FrameId>>) {
let frame_id = self.pipelines.get(&pipeline_id).map(|pipeline| pipeline.frame_id);
if let Err(e) = resp_chan.send(frame_id) {
warn!("Failed get_frame response ({}).", e);
}
}
fn focus_parent_pipeline(&mut self, pipeline_id: PipelineId) {
let (frame_id, parent_info) = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => (pipeline.frame_id, pipeline.parent_info),
None => return warn!("Pipeline {:?} focus parent after closure.", pipeline_id),
};
let (parent_pipeline_id, _) = match parent_info {
Some(info) => info,
None => return debug!("Pipeline {:?} focus has no parent.", pipeline_id),
};
// Send a message to the parent of the provided pipeline (if it exists)
// telling it to mark the iframe element as focused.
let msg = ConstellationControlMsg::FocusIFrame(parent_pipeline_id, frame_id);
let result = match self.pipelines.get(&parent_pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return warn!("Pipeline {:?} focus after closure.", parent_pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
self.focus_parent_pipeline(parent_pipeline_id);
}
fn handle_focus_msg(&mut self, pipeline_id: PipelineId) {
self.focus_pipeline_id = Some(pipeline_id);
// Focus parent iframes recursively
self.focus_parent_pipeline(pipeline_id);
}
fn handle_remove_iframe_msg(&mut self, pipeline_id: PipelineId) {
let frame_id = self.pipelines.get(&pipeline_id).map(|pipeline| pipeline.frame_id);
match frame_id {
Some(frame_id) => {
// This iframe has already loaded and been added to the frame tree.
self.close_frame(frame_id, ExitPipelineMode::Normal);
}
None => {
// This iframe is currently loading / painting for the first time.
// In this case, it doesn't exist in the frame tree, but the pipeline
// still needs to be shut down.
self.close_pipeline(pipeline_id, ExitPipelineMode::Normal);
}
}
}
fn handle_set_visible_msg(&mut self, pipeline_id: PipelineId, visible: bool) {
let frame_id = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.frame_id,
None => return warn!("No frame associated with pipeline {:?}", pipeline_id),
};
let child_pipeline_ids: Vec<PipelineId> = self.full_frame_tree_iter(frame_id)
.flat_map(|frame| frame.next.iter()
.chain(frame.prev.iter())
.chain(once(&frame.current)))
.map(|state| state.pipeline_id)
.collect();
for id in child_pipeline_ids {
if let Some(pipeline) = self.pipelines.get_mut(&id) {
pipeline.change_visibility(visible);
}
}
}
fn handle_visibility_change_complete(&mut self, pipeline_id: PipelineId, visibility: bool) {
let (frame_id, parent_pipeline_info) = match self.pipelines.get(&pipeline_id) {
None => return warn!("Visibity change for closed pipeline {:?}.", pipeline_id),
Some(pipeline) => (pipeline.frame_id, pipeline.parent_info),
};
if let Some((parent_pipeline_id, _)) = parent_pipeline_info {
let visibility_msg = ConstellationControlMsg::NotifyVisibilityChange(parent_pipeline_id,
frame_id,
visibility);
let result = match self.pipelines.get(&parent_pipeline_id) {
None => return warn!("Parent pipeline {:?} closed", parent_pipeline_id),
Some(parent_pipeline) => parent_pipeline.event_loop.send(visibility_msg),
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
}
}
fn handle_create_canvas_paint_thread_msg(
&mut self,
size: &Size2D<i32>,
response_sender: IpcSender<IpcSender<CanvasMsg>>) {
let webrender_api = self.webrender_api_sender.clone();
let sender = CanvasPaintThread::start(*size, webrender_api,
opts::get().enable_canvas_antialiasing);
if let Err(e) = response_sender.send(sender) {
warn!("Create canvas paint thread response failed ({})", e);
}
}
fn handle_create_webgl_paint_thread_msg(
&mut self,
size: &Size2D<i32>,
attributes: GLContextAttributes,
response_sender: IpcSender<Result<(IpcSender<CanvasMsg>, GLLimits), String>>) {
let webrender_api = self.webrender_api_sender.clone();
let response = WebGLPaintThread::start(*size, attributes, webrender_api);
if let Err(e) = response_sender.send(response) {
warn!("Create WebGL paint thread response failed ({})", e);
}
}
fn handle_webdriver_msg(&mut self, msg: WebDriverCommandMsg) {
// Find the script channel for the given parent pipeline,
// and pass the event to that script thread.
match msg {
WebDriverCommandMsg::GetWindowSize(_, reply) => {
let _ = reply.send(self.window_size);
},
WebDriverCommandMsg::SetWindowSize(_, size, reply) => {
self.webdriver.resize_channel = Some(reply);
self.compositor_proxy.send(ToCompositorMsg::ResizeTo(size));
},
WebDriverCommandMsg::LoadUrl(pipeline_id, load_data, reply) => {
self.load_url_for_webdriver(pipeline_id, load_data, reply, false);
},
WebDriverCommandMsg::Refresh(pipeline_id, reply) => {
let load_data = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => LoadData::new(pipeline.url.clone(), None, None),
None => return warn!("Pipeline {:?} Refresh after closure.", pipeline_id),
};
self.load_url_for_webdriver(pipeline_id, load_data, reply, true);
}
WebDriverCommandMsg::ScriptCommand(pipeline_id, cmd) => {
let control_msg = ConstellationControlMsg::WebDriverScriptCommand(pipeline_id, cmd);
let result = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(control_msg),
None => return warn!("Pipeline {:?} ScriptCommand after closure.", pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
},
WebDriverCommandMsg::SendKeys(pipeline_id, cmd) => {
let event_loop = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.clone(),
None => return warn!("Pipeline {:?} SendKeys after closure.", pipeline_id),
};
for (key, mods, state) in cmd {
let event = CompositorEvent::KeyEvent(None, key, state, mods);
let control_msg = ConstellationControlMsg::SendEvent(pipeline_id, event);
if let Err(e) = event_loop.send(control_msg) {
return self.handle_send_error(pipeline_id, e);
}
}
},
WebDriverCommandMsg::TakeScreenshot(pipeline_id, reply) => {
let current_pipeline_id = self.frames.get(&self.root_frame_id)
.map(|root_frame| root_frame.current.pipeline_id);
if Some(pipeline_id) == current_pipeline_id {
self.compositor_proxy.send(ToCompositorMsg::CreatePng(reply));
} else {
if let Err(e) = reply.send(None) {
warn!("Screenshot reply failed ({})", e);
}
}
},
}
}
fn traverse_frame_to_pipeline(&mut self, frame_id: FrameId, next_pipeline_id: PipelineId) {
// Check if the currently focused pipeline is the pipeline being replaced
// (or a child of it). This has to be done here, before the current
// frame tree is modified below.
let update_focus_pipeline = self.focused_pipeline_in_tree(frame_id);
let prev_pipeline_id = match self.frames.get_mut(&frame_id) {
Some(frame) => {
let prev = frame.current.pipeline_id;
// Check that this frame contains the pipeline passed in, so that this does not
// change Frame's state before realizing `next_pipeline_id` is invalid.
if frame.next.iter().find(|entry| next_pipeline_id == entry.pipeline_id).is_some() {
frame.prev.push(frame.current.clone());
while let Some(entry) = frame.next.pop() {
if entry.pipeline_id == next_pipeline_id {
frame.current = entry;
break;
} else {
frame.prev.push(entry);
}
}
} else if frame.prev.iter().find(|entry| next_pipeline_id == entry.pipeline_id).is_some() {
frame.next.push(frame.current.clone());
while let Some(entry) = frame.prev.pop() {
if entry.pipeline_id == next_pipeline_id {
frame.current = entry;
break;
} else {
frame.next.push(entry);
}
}
} else if prev != next_pipeline_id {
return warn!("Tried to traverse frame {:?} to pipeline {:?} it does not contain.",
frame_id, next_pipeline_id);
}
prev
},
None => return warn!("no frame to traverse"),
};
let pipeline_info = self.pipelines.get(&prev_pipeline_id).and_then(|p| p.parent_info);
// If the currently focused pipeline is the one being changed (or a child
// of the pipeline being changed) then update the focus pipeline to be
// the replacement.
if update_focus_pipeline {
self.focus_pipeline_id = Some(next_pipeline_id);
}
// Suspend the old pipeline, and resume the new one.
if let Some(prev_pipeline) = self.pipelines.get(&prev_pipeline_id) {
prev_pipeline.freeze();
}
if let Some(next_pipeline) = self.pipelines.get(&next_pipeline_id) {
next_pipeline.thaw();
}
// Set paint permissions correctly for the compositor layers.
self.send_frame_tree();
// Update the owning iframe to point to the new pipeline id.
// This makes things like contentDocument work correctly.
if let Some((parent_pipeline_id, _)) = pipeline_info {
let msg = ConstellationControlMsg::UpdatePipelineId(parent_pipeline_id,
frame_id,
next_pipeline_id);
let result = match self.pipelines.get(&parent_pipeline_id) {
None => return warn!("Pipeline {:?} child traversed after closure.", parent_pipeline_id),
Some(pipeline) => pipeline.event_loop.send(msg),
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
// If this is an iframe, send a mozbrowser location change event.
// This is the result of a back/forward traversal.
self.trigger_mozbrowserlocationchange(next_pipeline_id);
}
}
fn get_top_level_frame_for_pipeline(&self, mut pipeline_id: PipelineId) -> FrameId {
if PREFS.is_mozbrowser_enabled() {
loop {
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => match pipeline.parent_info {
Some((_, FrameType::MozBrowserIFrame)) => return pipeline.frame_id,
Some((parent_id, _)) => pipeline_id = parent_id,
None => return self.root_frame_id,
},
None => {
warn!("Finding top-level ancestor for pipeline {} after closure.", pipeline_id);
return self.root_frame_id;
},
}
}
} else {
// If mozbrowser is not enabled, the root frame is the only top-level frame
self.root_frame_id
}
}
fn load_url_for_webdriver(&mut self,
pipeline_id: PipelineId,
load_data: LoadData,
reply: IpcSender<webdriver_msg::LoadStatus>,
replace: bool) {
let new_pipeline_id = self.load_url(pipeline_id, load_data, replace);
if let Some(id) = new_pipeline_id {
self.webdriver.load_channel = Some((id, reply));
}
}
fn add_or_replace_pipeline_in_frame_tree(&mut self, frame_change: FrameChange) {
debug!("Setting frame {} to be pipeline {}.", frame_change.frame_id, frame_change.new_pipeline_id);
// If the currently focused pipeline is the one being changed (or a child
// of the pipeline being changed) then update the focus pipeline to be
// the replacement.
if let Some(old_pipeline_id) = frame_change.old_pipeline_id {
if let Some(old_frame_id) = self.pipelines.get(&old_pipeline_id).map(|pipeline| pipeline.frame_id) {
if self.focused_pipeline_in_tree(old_frame_id) {
self.focus_pipeline_id = Some(frame_change.new_pipeline_id);
}
}
}
if self.frames.contains_key(&frame_change.frame_id) {
if frame_change.replace {
let evicted = self.frames.get_mut(&frame_change.frame_id).map(|frame| {
frame.replace_current(frame_change.new_pipeline_id)
});
if let Some(evicted) = evicted {
self.close_pipeline(evicted.pipeline_id, ExitPipelineMode::Normal);
}
} else {
if let Some(ref mut frame) = self.frames.get_mut(&frame_change.frame_id) {
frame.load(frame_change.new_pipeline_id);
}
}
} else {
// The new pipeline is in a new frame with no history
self.new_frame(frame_change.frame_id, frame_change.new_pipeline_id);
}
if !frame_change.replace {
// If this is an iframe, send a mozbrowser location change event.
// This is the result of a link being clicked and a navigation completing.
self.trigger_mozbrowserlocationchange(frame_change.new_pipeline_id);
let top_level_frame_id = self.get_top_level_frame_for_pipeline(frame_change.new_pipeline_id);
self.clear_joint_session_future(top_level_frame_id);
}
// Build frame tree
self.send_frame_tree();
}
fn handle_activate_document_msg(&mut self, pipeline_id: PipelineId) {
debug!("Document ready to activate {:?}", pipeline_id);
// Notify the parent (if there is one).
if let Some(pipeline) = self.pipelines.get(&pipeline_id) {
if let Some((parent_pipeline_id, _)) = pipeline.parent_info {
if let Some(parent_pipeline) = self.pipelines.get(&parent_pipeline_id) {
let msg = ConstellationControlMsg::FramedContentChanged(parent_pipeline_id, pipeline.frame_id);
let _ = parent_pipeline.event_loop.send(msg);
}
}
}
// Find the pending frame change whose new pipeline id is pipeline_id.
let pending_index = self.pending_frames.iter().rposition(|frame_change| {
frame_change.new_pipeline_id == pipeline_id
});
// If it is found, remove it from the pending frames, and make it
// the active document of its frame.
if let Some(pending_index) = pending_index {
let frame_change = self.pending_frames.swap_remove(pending_index);
self.add_or_replace_pipeline_in_frame_tree(frame_change);
}
}
/// Called when the window is resized.
fn handle_window_size_msg(&mut self, new_size: WindowSizeData, size_type: WindowSizeType) {
debug!("handle_window_size_msg: {:?} {:?}", new_size.initial_viewport.to_untyped(),
new_size.visible_viewport.to_untyped());
if let Some(frame) = self.frames.get(&self.root_frame_id) {
// Send Resize (or ResizeInactive) messages to each
// pipeline in the frame tree.
let pipeline_id = frame.current.pipeline_id;
let pipeline = match self.pipelines.get(&pipeline_id) {
None => return warn!("Pipeline {:?} resized after closing.", pipeline_id),
Some(pipeline) => pipeline,
};
let _ = pipeline.event_loop.send(ConstellationControlMsg::Resize(
pipeline.id,
new_size,
size_type
));
for entry in frame.prev.iter().chain(&frame.next) {
let pipeline = match self.pipelines.get(&entry.pipeline_id) {
None => {
warn!("Inactive pipeline {:?} resized after closing.", pipeline_id);
continue;
},
Some(pipeline) => pipeline,
};
let _ = pipeline.event_loop.send(ConstellationControlMsg::ResizeInactive(
pipeline.id,
new_size
));
}
}
// Send resize message to any pending pipelines that aren't loaded yet.
for pending_frame in &self.pending_frames {
let pipeline_id = pending_frame.new_pipeline_id;
let pipeline = match self.pipelines.get(&pipeline_id) {
None => { warn!("Pending pipeline {:?} is closed", pipeline_id); continue; }
Some(pipeline) => pipeline,
};
if pipeline.parent_info.is_none() {
let _ = pipeline.event_loop.send(ConstellationControlMsg::Resize(
pipeline.id,
new_size,
size_type
));
}
}
if let Some(resize_channel) = self.webdriver.resize_channel.take() {
let _ = resize_channel.send(new_size);
}
self.window_size = new_size;
}
/// Handle updating actual viewport / zoom due to @viewport rules
fn handle_viewport_constrained_msg(&mut self,
pipeline_id: PipelineId,
constraints: ViewportConstraints) {
self.compositor_proxy.send(ToCompositorMsg::ViewportConstrained(pipeline_id, constraints));
}
/// Checks the state of all script and layout pipelines to see if they are idle
/// and compares the current layout state to what the compositor has. This is used
/// to check if the output image is "stable" and can be written as a screenshot
/// for reftests.
/// Since this function is only used in reftests, we do not harden it against panic.
fn handle_is_ready_to_save_image(&mut self,
pipeline_states: HashMap<PipelineId, Epoch>) -> ReadyToSave {
// Note that this function can panic, due to ipc-channel creation failure.
// avoiding this panic would require a mechanism for dealing
// with low-resource scenarios.
//
// If there is no root frame yet, the initial page has
// not loaded, so there is nothing to save yet.
if !self.frames.contains_key(&self.root_frame_id) {
return ReadyToSave::NoRootFrame;
}
// If there are pending loads, wait for those to complete.
if !self.pending_frames.is_empty() {
return ReadyToSave::PendingFrames;
}
let (state_sender, state_receiver) = ipc::channel().expect("Failed to create IPC channel!");
let (epoch_sender, epoch_receiver) = ipc::channel().expect("Failed to create IPC channel!");
// Step through the current frame tree, checking that the script
// thread is idle, and that the current epoch of the layout thread
// matches what the compositor has painted. If all these conditions
// are met, then the output image should not change and a reftest
// screenshot can safely be written.
for frame in self.current_frame_tree_iter(self.root_frame_id) {
let pipeline_id = frame.current.pipeline_id;
debug!("Checking readiness of frame {}, pipeline {}.", frame.id, pipeline_id);
let pipeline = match self.pipelines.get(&pipeline_id) {
None => {
warn!("Pipeline {:?} screenshot while closing.", pipeline_id);
continue;
},
Some(pipeline) => pipeline,
};
// Check to see if there are any webfonts still loading.
//
// If GetWebFontLoadState returns false, either there are no
// webfonts loading, or there's a WebFontLoaded message waiting in
// script_chan's message queue. Therefore, we need to check this
// before we check whether the document is ready; otherwise,
// there's a race condition where a webfont has finished loading,
// but hasn't yet notified the document.
let msg = LayoutControlMsg::GetWebFontLoadState(state_sender.clone());
if let Err(e) = pipeline.layout_chan.send(msg) {
warn!("Get web font failed ({})", e);
}
if state_receiver.recv().unwrap_or(true) {
return ReadyToSave::WebFontNotLoaded;
}
// See if this pipeline has reached idle script state yet.
match self.document_states.get(&frame.current.pipeline_id) {
Some(&DocumentState::Idle) => {}
Some(&DocumentState::Pending) | None => {
return ReadyToSave::DocumentLoading;
}
}
// Check the visible rectangle for this pipeline. If the constellation has received a
// size for the pipeline, then its painting should be up to date. If the constellation
// *hasn't* received a size, it could be that the layer was hidden by script before the
// compositor discovered it, so we just don't check the layer.
if let Some(size) = pipeline.size {
// If the rectangle for this pipeline is zero sized, it will
// never be painted. In this case, don't query the layout
// thread as it won't contribute to the final output image.
if size == TypedSize2D::zero() {
continue;
}
// Get the epoch that the compositor has drawn for this pipeline.
let compositor_epoch = pipeline_states.get(&frame.current.pipeline_id);
match compositor_epoch {
Some(compositor_epoch) => {
// Synchronously query the layout thread to see if the current
// epoch matches what the compositor has drawn. If they match
// (and script is idle) then this pipeline won't change again
// and can be considered stable.
let message = LayoutControlMsg::GetCurrentEpoch(epoch_sender.clone());
if let Err(e) = pipeline.layout_chan.send(message) {
warn!("Failed to send GetCurrentEpoch ({}).", e);
}
match epoch_receiver.recv() {
Err(e) => warn!("Failed to receive current epoch ({}).", e),
Ok(layout_thread_epoch) => if layout_thread_epoch != *compositor_epoch {
return ReadyToSave::EpochMismatch;
},
}
}
None => {
// The compositor doesn't know about this pipeline yet.
// Assume it hasn't rendered yet.
return ReadyToSave::PipelineUnknown;
}
}
}
}
// All script threads are idle and layout epochs match compositor, so output image!
ReadyToSave::Ready
}
fn clear_joint_session_future(&mut self, frame_id: FrameId) {
let mut evicted_pipelines = vec!();
let mut frames_to_clear = vec!(frame_id);
while let Some(frame_id) = frames_to_clear.pop() {
let frame = match self.frames.get_mut(&frame_id) {
Some(frame) => frame,
None => {
warn!("Removed forward history after frame {:?} closure.", frame_id);
continue;
}
};
evicted_pipelines.extend(frame.remove_forward_entries());
for entry in frame.next.iter().chain(frame.prev.iter()).chain(once(&frame.current)) {
let pipeline = match self.pipelines.get(&entry.pipeline_id) {
Some(pipeline) => pipeline,
None => {
warn!("Removed forward history after pipeline {:?} closure.", entry.pipeline_id);
continue;
}
};
frames_to_clear.extend_from_slice(&pipeline.children);
}
}
for entry in evicted_pipelines {
self.close_pipeline(entry.pipeline_id, ExitPipelineMode::Normal);
}
}
// Close a frame (and all children)
fn close_frame(&mut self, frame_id: FrameId, exit_mode: ExitPipelineMode) {
debug!("Closing frame {}.", frame_id);
let parent_info = self.frames.get(&frame_id)
.and_then(|frame| self.pipelines.get(&frame.current.pipeline_id))
.and_then(|pipeline| pipeline.parent_info);
self.close_frame_children(frame_id, exit_mode);
self.event_loops.remove(&frame_id);
if self.frames.remove(&frame_id).is_none() {
warn!("Closing frame {:?} twice.", frame_id);
}
if let Some((parent_pipeline_id, _)) = parent_info {
let parent_pipeline = match self.pipelines.get_mut(&parent_pipeline_id) {
None => return warn!("Pipeline {:?} child closed after parent.", parent_pipeline_id),
Some(parent_pipeline) => parent_pipeline,
};
parent_pipeline.remove_child(frame_id);
}
debug!("Closed frame {:?}.", frame_id);
}
// Close the children of a frame
fn close_frame_children(&mut self, frame_id: FrameId, exit_mode: ExitPipelineMode) {
debug!("Closing frame children {}.", frame_id);
// Store information about the pipelines to be closed. Then close the
// pipelines, before removing ourself from the frames hash map. This
// ordering is vital - so that if close_pipeline() ends up closing
// any child frames, they can be removed from the parent frame correctly.
let mut pipelines_to_close: Vec<PipelineId> = self.pending_frames.iter()
.filter(|frame_change| frame_change.frame_id == frame_id)
.map(|frame_change| frame_change.new_pipeline_id)
.collect();
if let Some(frame) = self.frames.get(&frame_id) {
pipelines_to_close.extend(frame.next.iter().map(|state| state.pipeline_id));
pipelines_to_close.push(frame.current.pipeline_id);
pipelines_to_close.extend(frame.prev.iter().map(|state| state.pipeline_id));
}
for pipeline_id in pipelines_to_close {
self.close_pipeline(pipeline_id, exit_mode);
}
debug!("Closed frame children {}.", frame_id);
}
// Close all pipelines at and beneath a given frame
fn close_pipeline(&mut self, pipeline_id: PipelineId, exit_mode: ExitPipelineMode) {
debug!("Closing pipeline {:?}.", pipeline_id);
// Store information about the frames to be closed. Then close the
// frames, before removing ourself from the pipelines hash map. This
// ordering is vital - so that if close_frames() ends up closing
// any child pipelines, they can be removed from the parent pipeline correctly.
let frames_to_close = {
let mut frames_to_close = vec!();
if let Some(pipeline) = self.pipelines.get(&pipeline_id) {
frames_to_close.extend_from_slice(&pipeline.children);
}
frames_to_close
};
// Remove any child frames
for child_frame in &frames_to_close {
self.close_frame(*child_frame, exit_mode);
}
// Note, we don't remove the pipeline now, we wait for the message to come back from
// the pipeline.
let pipeline = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline,
None => return warn!("Closing pipeline {:?} twice.", pipeline_id),
};
// Remove this pipeline from pending frames if it hasn't loaded yet.
let pending_index = self.pending_frames.iter().position(|frame_change| {
frame_change.new_pipeline_id == pipeline_id
});
if let Some(pending_index) = pending_index {
self.pending_frames.remove(pending_index);
}
// Inform script, compositor that this pipeline has exited.
match exit_mode {
ExitPipelineMode::Normal => pipeline.exit(),
ExitPipelineMode::Force => pipeline.force_exit(),
}
debug!("Closed pipeline {:?}.", pipeline_id);
}
// Randomly close a pipeline -if --random-pipeline-closure-probability is set
fn maybe_close_random_pipeline(&mut self) {
match self.random_pipeline_closure {
Some((ref mut rng, probability)) => if probability <= rng.gen::<f32>() { return },
_ => return,
};
// In order to get repeatability, we sort the pipeline ids.
let mut pipeline_ids: Vec<&PipelineId> = self.pipelines.keys().collect();
pipeline_ids.sort();
if let Some((ref mut rng, _)) = self.random_pipeline_closure {
if let Some(pipeline_id) = rng.choose(&*pipeline_ids) {
if let Some(pipeline) = self.pipelines.get(pipeline_id) {
// Don't kill the mozbrowser pipeline
if PREFS.is_mozbrowser_enabled() && pipeline.parent_info.is_none() {
info!("Not closing mozbrowser pipeline {}.", pipeline_id);
} else {
// Note that we deliberately do not do any of the tidying up
// associated with closing a pipeline. The constellation should cope!
warn!("Randomly closing pipeline {}.", pipeline_id);
pipeline.force_exit();
}
}
}
}
}
// Convert a frame to a sendable form to pass to the compositor
fn frame_to_sendable(&self, frame_id: FrameId) -> Option<SendableFrameTree> {
self.frames.get(&frame_id).and_then(|frame: &Frame| {
self.pipelines.get(&frame.current.pipeline_id).map(|pipeline: &Pipeline| {
let mut frame_tree = SendableFrameTree {
pipeline: pipeline.to_sendable(),
size: pipeline.size,
children: vec!(),
};
for child_frame_id in &pipeline.children {
if let Some(frame) = self.frame_to_sendable(*child_frame_id) {
frame_tree.children.push(frame);
}
}
frame_tree
})
})
}
// Send the current frame tree to compositor
fn send_frame_tree(&mut self) {
// Note that this function can panic, due to ipc-channel creation failure.
// avoiding this panic would require a mechanism for dealing
// with low-resource scenarios.
debug!("Sending frame tree for frame {}.", self.root_frame_id);
if let Some(frame_tree) = self.frame_to_sendable(self.root_frame_id) {
let (chan, port) = ipc::channel().expect("Failed to create IPC channel!");
self.compositor_proxy.send(ToCompositorMsg::SetFrameTree(frame_tree,
chan));
if port.recv().is_err() {
warn!("Compositor has discarded SetFrameTree");
return; // Our message has been discarded, probably shutting down.
}
}
}
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowserlocationchange
// Note that this is a no-op if the pipeline is not a mozbrowser iframe
fn trigger_mozbrowserlocationchange(&self, pipeline_id: PipelineId) {
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => if let Some((parent_id, FrameType::MozBrowserIFrame)) = pipeline.parent_info {
match self.pipelines.get(&parent_id) {
Some(parent) => {
let can_go_forward = !self.joint_session_future_is_empty(pipeline.frame_id);
let can_go_back = !self.joint_session_past_is_empty(pipeline.frame_id);
let url = pipeline.url.to_string();
let event = MozBrowserEvent::LocationChange(url, can_go_back, can_go_forward);
parent.trigger_mozbrowser_event(Some(pipeline.frame_id), event);
},
None => warn!("triggered mozbrowser location change on closed parent {}", parent_id),
}
},
None => warn!("triggered mozbrowser location change on closed pipeline {}", pipeline_id),
}
}
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowsererror
// Note that this does not require the pipeline to be an immediate child of the root
fn trigger_mozbrowsererror(&mut self, top_level_frame_id: FrameId, reason: String, backtrace: Option<String>) {
if !PREFS.is_mozbrowser_enabled() { return; }
let mut report = String::new();
for (thread_name, warning) in self.handled_warnings.drain(..) {
report.push_str("\nWARNING: ");
if let Some(thread_name) = thread_name {
report.push_str("<");
report.push_str(&*thread_name);
report.push_str(">: ");
}
report.push_str(&*warning);
}
report.push_str("\nERROR: ");
report.push_str(&*reason);
if let Some(backtrace) = backtrace {
report.push_str("\n\n");
report.push_str(&*backtrace);
}
let event = MozBrowserEvent::Error(MozBrowserErrorType::Fatal, reason, report);
match self.frames.get(&top_level_frame_id) {
None => warn!("Mozbrowser error after top-level frame closed."),
Some(frame) => match self.pipelines.get(&frame.current.pipeline_id) {
None => warn!("Mozbrowser error after top-level pipeline closed."),
Some(pipeline) => match pipeline.parent_info {
None => pipeline.trigger_mozbrowser_event(None, event),
Some((parent_id, _)) => match self.pipelines.get(&parent_id) {
None => warn!("Mozbrowser error after root pipeline closed."),
Some(parent) => parent.trigger_mozbrowser_event(Some(top_level_frame_id), event),
},
},
},
};
}
fn focused_pipeline_in_tree(&self, frame_id: FrameId) -> bool {
self.focus_pipeline_id.map_or(false, |pipeline_id| {
self.pipeline_exists_in_tree(pipeline_id, frame_id)
})
}
fn pipeline_is_in_current_frame(&self, pipeline_id: PipelineId) -> bool {
self.pipeline_exists_in_tree(pipeline_id, self.root_frame_id)
}
fn pipeline_exists_in_tree(&self,
pipeline_id: PipelineId,
root_frame_id: FrameId) -> bool {
self.current_frame_tree_iter(root_frame_id)
.any(|current_frame| current_frame.current.pipeline_id == pipeline_id)
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use audio_video_metadata;
use document_loader::LoadType;
use dom::attr::Attr;
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::HTMLMediaElementBinding::CanPlayTypeResult;
use dom::bindings::codegen::Bindings::HTMLMediaElementBinding::HTMLMediaElementConstants;
use dom::bindings::codegen::Bindings::HTMLMediaElementBinding::HTMLMediaElementMethods;
use dom::bindings::codegen::Bindings::MediaErrorBinding::MediaErrorConstants::*;
use dom::bindings::codegen::Bindings::MediaErrorBinding::MediaErrorMethods;
use dom::bindings::codegen::InheritTypes::{ElementTypeId, HTMLElementTypeId};
use dom::bindings::codegen::InheritTypes::{HTMLMediaElementTypeId, NodeTypeId};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{MutNullableJS, Root};
use dom::bindings::refcounted::Trusted;
use dom::bindings::reflector::DomObject;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, AttributeMutation};
use dom::eventtarget::EventTarget;
use dom::htmlelement::HTMLElement;
use dom::htmlsourceelement::HTMLSourceElement;
use dom::mediaerror::MediaError;
use dom::node::{window_from_node, document_from_node, Node, UnbindContext};
use dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use microtask::{Microtask, MicrotaskRunnable};
use mime::{Mime, SubLevel, TopLevel};
use net_traits::{FetchResponseListener, FetchMetadata, Metadata, NetworkError};
use net_traits::request::{CredentialsMode, Destination, RequestInit, Type as RequestType};
use network_listener::{NetworkListener, PreInvoke};
use script_thread::ScriptThread;
use servo_url::ServoUrl;
use std::cell::Cell;
use std::sync::{Arc, Mutex};
use task_source::TaskSource;
use time::{self, Timespec, Duration};
#[dom_struct]
pub struct HTMLMediaElement {
htmlelement: HTMLElement,
/// https://html.spec.whatwg.org/multipage/#dom-media-networkstate
// FIXME(nox): Use an enum.
network_state: Cell<NetworkState>,
/// https://html.spec.whatwg.org/multipage/#dom-media-readystate
// FIXME(nox): Use an enum.
ready_state: Cell<ReadyState>,
/// https://html.spec.whatwg.org/multipage/#dom-media-currentsrc
current_src: DOMRefCell<String>,
// FIXME(nox): Document this one, I have no idea what it is used for.
generation_id: Cell<u32>,
/// https://html.spec.whatwg.org/multipage/#fire-loadeddata
///
/// Reset to false every time the load algorithm is invoked.
fired_loadeddata_event: Cell<bool>,
/// https://html.spec.whatwg.org/multipage/#dom-media-error
error: MutNullableJS<MediaError>,
/// https://html.spec.whatwg.org/multipage/#dom-media-paused
paused: Cell<bool>,
/// https://html.spec.whatwg.org/multipage/#attr-media-autoplay
autoplaying: Cell<bool>,
/// The details of the video currently related to this media element.
// FIXME(nox): Why isn't this in HTMLVideoElement?
video: DOMRefCell<Option<VideoMedia>>,
}
/// https://html.spec.whatwg.org/multipage/#dom-media-networkstate
#[derive(Clone, Copy, HeapSizeOf, JSTraceable, PartialEq)]
#[repr(u8)]
enum NetworkState {
Empty = HTMLMediaElementConstants::NETWORK_EMPTY as u8,
Idle = HTMLMediaElementConstants::NETWORK_IDLE as u8,
Loading = HTMLMediaElementConstants::NETWORK_LOADING as u8,
NoSource = HTMLMediaElementConstants::NETWORK_NO_SOURCE as u8,
}
/// https://html.spec.whatwg.org/multipage/#dom-media-readystate
#[derive(Clone, Copy, HeapSizeOf, JSTraceable, PartialEq, PartialOrd)]
#[repr(u8)]
enum ReadyState {
HaveNothing = HTMLMediaElementConstants::HAVE_NOTHING as u8,
HaveMetadata = HTMLMediaElementConstants::HAVE_METADATA as u8,
HaveCurrentData = HTMLMediaElementConstants::HAVE_CURRENT_DATA as u8,
HaveFutureData = HTMLMediaElementConstants::HAVE_FUTURE_DATA as u8,
HaveEnoughData = HTMLMediaElementConstants::HAVE_ENOUGH_DATA as u8,
}
#[derive(HeapSizeOf, JSTraceable)]
pub struct VideoMedia {
format: String,
#[ignore_heap_size_of = "defined in time"]
duration: Duration,
width: u32,
height: u32,
video: String,
audio: Option<String>,
}
impl HTMLMediaElement {
pub fn new_inherited(
tag_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> Self {
Self {
htmlelement: HTMLElement::new_inherited(tag_name, prefix, document),
network_state: Cell::new(NetworkState::Empty),
ready_state: Cell::new(ReadyState::HaveNothing),
current_src: DOMRefCell::new("".to_owned()),
generation_id: Cell::new(0),
fired_loadeddata_event: Cell::new(false),
error: Default::default(),
paused: Cell::new(true),
// FIXME(nox): Why is this initialised to true?
autoplaying: Cell::new(true),
video: DOMRefCell::new(None),
}
}
fn media_type_id(&self) -> HTMLMediaElementTypeId {
match self.upcast::<Node>().type_id() {
NodeTypeId::Element(ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLMediaElement(media_type_id),
)) => {
media_type_id
},
_ => unreachable!(),
}
}
/// https://html.spec.whatwg.org/multipage/#internal-pause-steps
fn internal_pause_steps(&self) {
// Step 1.
self.autoplaying.set(false);
// Step 2.
if !self.Paused() {
// Step 2.1.
self.paused.set(true);
// Step 2.2.
// FIXME(nox): Take pending play promises and let promises be the
// result.
// Step 2.3.
let window = window_from_node(self);
let target = Trusted::new(self.upcast::<EventTarget>());
// FIXME(nox): Why are errors silenced here?
let _ = window.dom_manipulation_task_source().queue(
task!(internal_pause_steps: move || {
let target = target.root();
// Step 2.3.1.
target.fire_event(atom!("timeupdate"));
// Step 2.3.2.
target.fire_event(atom!("pause"));
// Step 2.3.3.
// FIXME(nox): Reject pending play promises with promises
// and an "AbortError" DOMException.
}),
window.upcast(),
);
// Step 2.4.
// FIXME(nox): Set the official playback position to the current
// playback position.
}
}
// https://html.spec.whatwg.org/multipage/#notify-about-playing
fn notify_about_playing(&self) {
// Step 1.
// TODO(nox): Take pending play promises and let promises be the result.
// Step 2.
let target = Trusted::new(self.upcast::<EventTarget>());
let window = window_from_node(self);
// FIXME(nox): Why are errors silenced here?
let _ = window.dom_manipulation_task_source().queue(
task!(notify_about_playing: move || {
let target = target.root();
// Step 2.1.
target.fire_event(atom!("playing"));
// Step 2.2.
// FIXME(nox): Resolve pending play promises with promises.
}),
window.upcast(),
);
}
// https://html.spec.whatwg.org/multipage/#ready-states
fn change_ready_state(&self, ready_state: ReadyState) {
let old_ready_state = self.ready_state.get();
self.ready_state.set(ready_state);
if self.network_state.get() == NetworkState::Empty {
return;
}
let window = window_from_node(self);
let task_source = window.dom_manipulation_task_source();
// Step 1.
match (old_ready_state, ready_state) {
(ReadyState::HaveNothing, ReadyState::HaveMetadata) => {
task_source.queue_simple_event(
self.upcast(),
atom!("loadedmetadata"),
&window,
);
// No other steps are applicable in this case.
return;
},
(ReadyState::HaveMetadata, new) if new >= ReadyState::HaveCurrentData => {
if !self.fired_loadeddata_event.get() {
self.fired_loadeddata_event.set(true);
task_source.queue_simple_event(
self.upcast(),
atom!("loadeddata"),
&window,
);
}
// Steps for the transition from HaveMetadata to HaveCurrentData
// or HaveFutureData also apply here, as per the next match
// expression.
},
(ReadyState::HaveFutureData, new) if new <= ReadyState::HaveCurrentData => {
// FIXME(nox): Queue a task to fire timeupdate and waiting
// events if the conditions call from the spec are met.
// No other steps are applicable in this case.
return;
},
_ => (),
}
if old_ready_state <= ReadyState::HaveCurrentData && ready_state >= ReadyState::HaveFutureData {
task_source.queue_simple_event(
self.upcast(),
atom!("canplay"),
&window,
);
if !self.Paused() {
self.notify_about_playing();
}
}
if ready_state == ReadyState::HaveEnoughData {
// TODO: Check sandboxed automatic features browsing context flag.
// FIXME(nox): I have no idea what this TODO is about.
// FIXME(nox): Review this block.
if self.autoplaying.get() &&
self.Paused() &&
self.Autoplay() {
// Step 1
self.paused.set(false);
// TODO step 2: show poster
// Step 3
task_source.queue_simple_event(
self.upcast(),
atom!("play"),
&window,
);
// Step 4
self.notify_about_playing();
// Step 5
self.autoplaying.set(false);
}
// FIXME(nox): According to the spec, this should come *before* the
// "play" event.
task_source.queue_simple_event(
self.upcast(),
atom!("canplaythrough"),
&window,
);
}
// TODO Step 2: Media controller.
// FIXME(nox): There is no step 2 in the spec.
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-algorithm
fn invoke_resource_selection_algorithm(&self) {
// Step 1.
self.network_state.set(NetworkState::NoSource);
// Step 2.
// FIXME(nox): Set show poster flag to true.
// Step 3.
// FIXME(nox): Set the delaying-the-load-event flag to true.
// Step 4.
// If the resource selection mode in the synchronous section is
// "attribute", the URL of the resource to fetch is relative to the
// media element's node document when the src attribute was last
// changed, which is why we need to pass the base URL in the task
// right here.
let doc = document_from_node(self);
let task = MediaElementMicrotask::ResourceSelectionTask {
elem: Root::from_ref(self),
base_url: doc.base_url()
};
// FIXME(nox): This will later call the resource_selection_algorith_sync
// method from below, if microtasks were trait objects, we would be able
// to put the code directly in this method, without the boilerplate
// indirections.
ScriptThread::await_stable_state(Microtask::MediaElement(task));
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-algorithm
// FIXME(nox): Why does this need to be passed the base URL?
fn resource_selection_algorithm_sync(&self, base_url: ServoUrl) {
// Step 5.
// FIXME(nox): Maybe populate the list of pending text tracks.
// Step 6.
enum Mode {
// FIXME(nox): Support media object provider.
#[allow(dead_code)]
Object,
Attribute(String),
// FIXME(nox): Support source element child.
#[allow(dead_code)]
Children(Root<HTMLSourceElement>),
}
let mode = if let Some(attr) = self.upcast::<Element>().get_attribute(&ns!(), &local_name!("src")) {
Mode::Attribute(attr.Value().into())
} else {
self.network_state.set(NetworkState::Empty);
return;
};
// Step 7.
self.network_state.set(NetworkState::Loading);
// Step 8.
let window = window_from_node(self);
window.dom_manipulation_task_source().queue_simple_event(
self.upcast(),
atom!("loadstart"),
&window,
);
// Step 9.
match mode {
// Step 9.obj.
Mode::Object => {
// Step 9.obj.1.
*self.current_src.borrow_mut() = "".to_owned();
// Step 9.obj.2.
// FIXME(nox): The rest of the steps should be ran in parallel.
// Step 9.obj.3.
// Note that the resource fetch algorithm itself takes care
// of the cleanup in case of failure itself.
// FIXME(nox): Pass the assigned media provider here.
self.resource_fetch_algorithm(Resource::Object);
},
Mode::Attribute(src) => {
// Step 9.attr.1.
if src.is_empty() {
self.queue_dedicated_media_source_failure_steps();
return;
}
// Step 9.attr.2.
let url_record = match base_url.join(&src) {
Ok(url) => url,
Err(_) => {
self.queue_dedicated_media_source_failure_steps();
return;
}
};
// Step 9.attr.3.
*self.current_src.borrow_mut() = url_record.as_str().into();
// Step 9.attr.4.
// Note that the resource fetch algorithm itself takes care
// of the cleanup in case of failure itself.
self.resource_fetch_algorithm(Resource::Url(url_record));
},
Mode::Children(_source) => {
// Step 9.children.
self.queue_dedicated_media_source_failure_steps()
},
}
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-resource
fn resource_fetch_algorithm(&self, resource: Resource) {
// Steps 1-2.
// Unapplicable, the `resource` variable already conveys which mode
// is in use.
// Step 3.
// FIXME(nox): Remove all media-resource-specific text tracks.
// Step 4.
match resource {
Resource::Url(url) => {
// Step 4.remote.1.
if self.Preload() == "none" && !self.autoplaying.get() {
// Step 4.remote.1.1.
self.network_state.set(NetworkState::Idle);
// Step 4.remote.1.2.
let window = window_from_node(self);
window.dom_manipulation_task_source().queue_simple_event(
self.upcast(),
atom!("suspend"),
&window,
);
// Step 4.remote.1.3.
// FIXME(nox): Queue a task to set the delaying-the-load-event
// flag to false.
// Steps 4.remote.1.4.
// FIXME(nox): Somehow we should wait for the task from previous
// step to be ran before continuing.
// Steps 4.remote.1.5-4.remote.1.7.
// FIXME(nox): Wait for an implementation-defined event and
// then continue with the normal set of steps instead of just
// returning.
return;
}
// Step 4.remote.2.
// FIXME(nox): Handle CORS setting from crossorigin attribute.
let document = document_from_node(self);
let type_ = match self.media_type_id() {
HTMLMediaElementTypeId::HTMLAudioElement => RequestType::Audio,
HTMLMediaElementTypeId::HTMLVideoElement => RequestType::Video,
};
let request = RequestInit {
url: url.clone(),
type_,
destination: Destination::Media,
credentials_mode: CredentialsMode::Include,
use_url_credentials: true,
origin: document.origin().immutable().clone(),
pipeline_id: Some(self.global().pipeline_id()),
referrer_url: Some(document.url()),
referrer_policy: document.get_referrer_policy(),
.. RequestInit::default()
};
let context = Arc::new(Mutex::new(HTMLMediaElementContext::new(self, url.clone())));
let (action_sender, action_receiver) = ipc::channel().unwrap();
let window = window_from_node(self);
let listener = NetworkListener {
context: context,
task_source: window.networking_task_source(),
canceller: Some(window.task_canceller())
};
ROUTER.add_route(action_receiver.to_opaque(), box move |message| {
listener.notify_fetch(message.to().unwrap());
});
document.fetch_async(LoadType::Media(url), request, action_sender);
},
Resource::Object => {
// FIXME(nox): Use the current media resource.
self.queue_dedicated_media_source_failure_steps();
},
}
}
/// Queues the [dedicated media source failure steps][steps].
///
/// [steps]: https://html.spec.whatwg.org/multipage/#dedicated-media-source-failure-steps
fn queue_dedicated_media_source_failure_steps(&self) {
let this = Trusted::new(self);
let window = window_from_node(self);
// FIXME(nox): Why are errors silenced here?
let _ = window.dom_manipulation_task_source().queue(
task!(dedicated_media_source_failure_steps: move || {
let this = this.root();
// Step 1.
this.error.set(Some(&*MediaError::new(
&window_from_node(&*this),
MEDIA_ERR_SRC_NOT_SUPPORTED,
)));
// Step 2.
// FIXME(nox): Forget the media-resource-specific tracks.
// Step 3.
this.network_state.set(NetworkState::NoSource);
// Step 4.
// FIXME(nox): Set show poster flag to true.
// Step 5.
this.upcast::<EventTarget>().fire_event(atom!("error"));
// Step 6.
// FIXME(nox): Reject pending play promises.
// Step 7.
// FIXME(nox): Set the delaying-the-load-event flag to false.
}),
window.upcast(),
);
}
// https://html.spec.whatwg.org/multipage/#media-element-load-algorithm
fn media_element_load_algorithm(&self) {
// Reset the flag that signals whether loadeddata was ever fired for
// this invokation of the load algorithm.
self.fired_loadeddata_event.set(false);
// Step 1.
// FIXME(nox): Abort any already-running instance of the
// resource selection algorithm.
// Steps 2-4.
// FIXME(nox): Cancel all tasks related to this element and resolve or
// reject all pending play promises.
self.generation_id.set(self.generation_id.get() + 1);
let window = window_from_node(self);
let task_source = window.dom_manipulation_task_source();
// Step 5.
let network_state = self.network_state.get();
if network_state == NetworkState::Loading || network_state == NetworkState::Idle {
task_source.queue_simple_event(self.upcast(), atom!("abort"), &window);
}
// Step 6.
if network_state != NetworkState::Empty {
// Step 6.1.
task_source.queue_simple_event(self.upcast(), atom!("emptied"), &window);
// Step 6.2.
// FIXME(nox): Abort in-progress fetching process.
// Step 6.3.
// FIXME(nox): Detach MediaSource media provider object.
// Step 6.4.
// FIXME(nox): Forget the media-resource-specific tracks.
// Step 6.5.
if self.ready_state.get() != ReadyState::HaveNothing {
self.change_ready_state(ReadyState::HaveNothing);
}
// Step 6.6.
if !self.Paused() {
// Step 6.6.1.
self.paused.set(true);
// Step 6.6.2.
// FIXME(nox): Reject pending play promises.
}
// Step 6.7.
// FIXME(nox): If seeking is true, set it to false.
// Step 6.8.
// FIXME(nox): Set current and official playback position to 0 and
// maybe queue a task to fire a timeupdate event.
// Step 6.9.
// FIXME(nox): Set timeline offset to NaN.
// Step 6.10.
// FIXME(nox): Set duration to NaN.
}
// Step 7.
// FIXME(nox): Set playbackRate to defaultPlaybackRate.
// Step 8.
self.error.set(None);
self.autoplaying.set(true);
// Step 9.
self.invoke_resource_selection_algorithm();
// Step 10.
// FIXME(nox): Stop playback of any previously running media resource.
}
}
impl HTMLMediaElementMethods for HTMLMediaElement {
// https://html.spec.whatwg.org/multipage/#dom-media-networkstate
fn NetworkState(&self) -> u16 {
self.network_state.get() as u16
}
// https://html.spec.whatwg.org/multipage/#dom-media-readystate
fn ReadyState(&self) -> u16 {
self.ready_state.get() as u16
}
// https://html.spec.whatwg.org/multipage/#dom-media-autoplay
make_bool_getter!(Autoplay, "autoplay");
// https://html.spec.whatwg.org/multipage/#dom-media-autoplay
make_bool_setter!(SetAutoplay, "autoplay");
// https://html.spec.whatwg.org/multipage/#dom-media-src
make_url_getter!(Src, "src");
// https://html.spec.whatwg.org/multipage/#dom-media-src
make_setter!(SetSrc, "src");
// https://html.spec.whatwg.org/multipage/#attr-media-preload
// Missing value default is user-agent defined.
make_enumerated_getter!(Preload, "preload", "", "none" | "metadata" | "auto");
// https://html.spec.whatwg.org/multipage/#attr-media-preload
make_setter!(SetPreload, "preload");
// https://html.spec.whatwg.org/multipage/#dom-media-currentsrc
fn CurrentSrc(&self) -> DOMString {
DOMString::from(self.current_src.borrow().clone())
}
// https://html.spec.whatwg.org/multipage/#dom-media-load
fn Load(&self) {
self.media_element_load_algorithm();
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-canplaytype
fn CanPlayType(&self, type_: DOMString) -> CanPlayTypeResult {
match type_.parse::<Mime>() {
Ok(Mime(TopLevel::Application, SubLevel::OctetStream, _)) |
Err(_) => {
CanPlayTypeResult::_empty
},
_ => CanPlayTypeResult::Maybe
}
}
// https://html.spec.whatwg.org/multipage/#dom-media-error
fn GetError(&self) -> Option<Root<MediaError>> {
self.error.get()
}
// https://html.spec.whatwg.org/multipage/#dom-media-play
// FIXME(nox): This should return a promise.
fn Play(&self) {
// Step 1.
// FIXME(nox): Return a rejected promise if not allowed to play.
// Step 2.
if self.error.get().map_or(false, |e| e.Code() == MEDIA_ERR_SRC_NOT_SUPPORTED) {
// FIXME(nox): This should return a rejected promise.
return;
}
// Step 3.
// Create promise and add it to list of pending play promises.
// Step 4.
if self.network_state.get() == NetworkState::Empty {
self.invoke_resource_selection_algorithm();
}
// Step 5.
// FIXME(nox): Seek to earliest possible position if playback has ended
// and direction of playback is forwards.
let state = self.ready_state.get();
if self.Paused() {
// Step 6.1.
self.paused.set(false);
// Step 6.2.
// FIXME(nox): Set show poster flag to false and run time marches on
// steps if show poster flag is true.
// Step 6.3.
let window = window_from_node(self);
let task_source = window.dom_manipulation_task_source();
task_source.queue_simple_event(self.upcast(), atom!("play"), &window);
// Step 7.4.
match state {
ReadyState::HaveNothing |
ReadyState::HaveMetadata |
ReadyState::HaveCurrentData => {
task_source.queue_simple_event(
self.upcast(),
atom!("waiting"),
&window,
);
},
ReadyState::HaveFutureData |
ReadyState::HaveEnoughData => {
self.notify_about_playing();
}
}
} else if state == ReadyState::HaveFutureData || state == ReadyState::HaveEnoughData {
// Step 7.
// FIXME(nox): Queue a task to resolve pending play promises.
}
// Step 8.
self.autoplaying.set(false);
// Step 9.
// FIXME(nox): Return promise created in step 3.
}
// https://html.spec.whatwg.org/multipage/#dom-media-pause
fn Pause(&self) {
// Step 1
if self.network_state.get() == NetworkState::Empty {
self.invoke_resource_selection_algorithm();
}
// Step 2
self.internal_pause_steps();
}
// https://html.spec.whatwg.org/multipage/#dom-media-paused
fn Paused(&self) -> bool {
self.paused.get()
}
}
impl VirtualMethods for HTMLMediaElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&local_name!("src") => {
if mutation.new_value(attr).is_some() {
self.media_element_load_algorithm();
}
}
_ => (),
};
}
// https://html.spec.whatwg.org/multipage/#playing-the-media-resource:remove-an-element-from-a-document
fn unbind_from_tree(&self, context: &UnbindContext) {
self.super_type().unwrap().unbind_from_tree(context);
if context.tree_in_doc {
let task = MediaElementMicrotask::PauseIfNotInDocumentTask {
elem: Root::from_ref(self)
};
ScriptThread::await_stable_state(Microtask::MediaElement(task));
}
}
}
#[derive(HeapSizeOf, JSTraceable)]
pub enum MediaElementMicrotask {
ResourceSelectionTask {
elem: Root<HTMLMediaElement>,
base_url: ServoUrl
},
PauseIfNotInDocumentTask {
elem: Root<HTMLMediaElement>,
}
}
impl MicrotaskRunnable for MediaElementMicrotask {
fn handler(&self) {
match self {
&MediaElementMicrotask::ResourceSelectionTask { ref elem, ref base_url } => {
elem.resource_selection_algorithm_sync(base_url.clone());
},
&MediaElementMicrotask::PauseIfNotInDocumentTask { ref elem } => {
if !elem.upcast::<Node>().is_in_doc() {
elem.internal_pause_steps();
}
},
}
}
}
enum Resource {
Object,
Url(ServoUrl),
}
struct HTMLMediaElementContext {
/// The element that initiated the request.
elem: Trusted<HTMLMediaElement>,
/// The response body received to date.
data: Vec<u8>,
/// The response metadata received to date.
metadata: Option<Metadata>,
/// The generation of the media element when this fetch started.
generation_id: u32,
/// Time of last progress notification.
next_progress_event: Timespec,
/// Url of resource requested.
url: ServoUrl,
/// Whether the media metadata has been completely received.
have_metadata: bool,
/// True if this response is invalid and should be ignored.
ignore_response: bool,
}
// https://html.spec.whatwg.org/multipage/#media-data-processing-steps-list
impl FetchResponseListener for HTMLMediaElementContext {
fn process_request_body(&mut self) {}
fn process_request_eof(&mut self) {}
fn process_response(&mut self, metadata: Result<FetchMetadata, NetworkError>) {
self.metadata = metadata.ok().map(|m| {
match m {
FetchMetadata::Unfiltered(m) => m,
FetchMetadata::Filtered { unsafe_, .. } => unsafe_
}
});
let status_is_ok = self.metadata.as_ref()
.and_then(|m| m.status.as_ref())
.map_or(true, |s| s.0 >= 200 && s.0 < 300);
// => "If the media data cannot be fetched at all..."
if !status_is_ok {
// Ensure that the element doesn't receive any further notifications
// of the aborted fetch. The dedicated failure steps will be
// executed when response_complete runs.
// FIXME(nox): According to the spec, we shouldn't wait to receive
// the whole response before running the dedicated failure steps.
self.ignore_response = true;
}
}
fn process_response_chunk(&mut self, mut payload: Vec<u8>) {
if self.ignore_response {
// An error was received previously, skip processing the payload.
return;
}
self.data.append(&mut payload);
let elem = self.elem.root();
// https://html.spec.whatwg.org/multipage/#media-data-processing-steps-list
// => "Once enough of the media data has been fetched to determine the duration..."
if !self.have_metadata {
self.check_metadata(&elem);
} else {
elem.change_ready_state(ReadyState::HaveCurrentData);
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-resource step 4,
// => "If mode is remote" step 2
if time::get_time() > self.next_progress_event {
let window = window_from_node(&*elem);
window.dom_manipulation_task_source().queue_simple_event(
elem.upcast(),
atom!("progress"),
&window,
);
self.next_progress_event = time::get_time() + Duration::milliseconds(350);
}
}
// https://html.spec.whatwg.org/multipage/#media-data-processing-steps-list
fn process_response_eof(&mut self, status: Result<(), NetworkError>) {
let elem = self.elem.root();
// => "If the media data can be fetched but is found by inspection to be in an unsupported
// format, or can otherwise not be rendered at all"
if !self.have_metadata {
elem.queue_dedicated_media_source_failure_steps();
}
// => "Once the entire media resource has been fetched..."
else if status.is_ok() {
elem.change_ready_state(ReadyState::HaveEnoughData);
elem.upcast::<EventTarget>().fire_event(atom!("progress"));
elem.network_state.set(NetworkState::Idle);
elem.upcast::<EventTarget>().fire_event(atom!("suspend"));
}
// => "If the connection is interrupted after some media data has been received..."
else if elem.ready_state.get() != ReadyState::HaveNothing {
// Step 2
elem.error.set(Some(&*MediaError::new(&*window_from_node(&*elem),
MEDIA_ERR_NETWORK)));
// Step 3
elem.network_state.set(NetworkState::Idle);
// TODO: Step 4 - update delay load flag
// Step 5
elem.upcast::<EventTarget>().fire_event(atom!("error"));
} else {
// => "If the media data cannot be fetched at all..."
elem.queue_dedicated_media_source_failure_steps();
}
let document = document_from_node(&*elem);
document.finish_load(LoadType::Media(self.url.clone()));
}
}
impl PreInvoke for HTMLMediaElementContext {
fn should_invoke(&self) -> bool {
//TODO: finish_load needs to run at some point if the generation changes.
self.elem.root().generation_id.get() == self.generation_id
}
}
impl HTMLMediaElementContext {
fn new(elem: &HTMLMediaElement, url: ServoUrl) -> HTMLMediaElementContext {
HTMLMediaElementContext {
elem: Trusted::new(elem),
data: vec![],
metadata: None,
generation_id: elem.generation_id.get(),
next_progress_event: time::get_time() + Duration::milliseconds(350),
url: url,
have_metadata: false,
ignore_response: false,
}
}
fn check_metadata(&mut self, elem: &HTMLMediaElement) {
match audio_video_metadata::get_format_from_slice(&self.data) {
Ok(audio_video_metadata::Metadata::Video(meta)) => {
let dur = meta.audio.duration.unwrap_or(::std::time::Duration::new(0, 0));
*elem.video.borrow_mut() = Some(VideoMedia {
format: format!("{:?}", meta.format),
duration: Duration::seconds(dur.as_secs() as i64) +
Duration::nanoseconds(dur.subsec_nanos() as i64),
width: meta.dimensions.width,
height: meta.dimensions.height,
video: meta.video.unwrap_or("".to_owned()),
audio: meta.audio.audio,
});
// Step 6
elem.change_ready_state(ReadyState::HaveMetadata);
self.have_metadata = true;
}
_ => {}
}
}
}
Remove some obsolete comments
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use audio_video_metadata;
use document_loader::LoadType;
use dom::attr::Attr;
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::HTMLMediaElementBinding::CanPlayTypeResult;
use dom::bindings::codegen::Bindings::HTMLMediaElementBinding::HTMLMediaElementConstants;
use dom::bindings::codegen::Bindings::HTMLMediaElementBinding::HTMLMediaElementMethods;
use dom::bindings::codegen::Bindings::MediaErrorBinding::MediaErrorConstants::*;
use dom::bindings::codegen::Bindings::MediaErrorBinding::MediaErrorMethods;
use dom::bindings::codegen::InheritTypes::{ElementTypeId, HTMLElementTypeId};
use dom::bindings::codegen::InheritTypes::{HTMLMediaElementTypeId, NodeTypeId};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{MutNullableJS, Root};
use dom::bindings::refcounted::Trusted;
use dom::bindings::reflector::DomObject;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, AttributeMutation};
use dom::eventtarget::EventTarget;
use dom::htmlelement::HTMLElement;
use dom::htmlsourceelement::HTMLSourceElement;
use dom::mediaerror::MediaError;
use dom::node::{window_from_node, document_from_node, Node, UnbindContext};
use dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use microtask::{Microtask, MicrotaskRunnable};
use mime::{Mime, SubLevel, TopLevel};
use net_traits::{FetchResponseListener, FetchMetadata, Metadata, NetworkError};
use net_traits::request::{CredentialsMode, Destination, RequestInit, Type as RequestType};
use network_listener::{NetworkListener, PreInvoke};
use script_thread::ScriptThread;
use servo_url::ServoUrl;
use std::cell::Cell;
use std::sync::{Arc, Mutex};
use task_source::TaskSource;
use time::{self, Timespec, Duration};
#[dom_struct]
pub struct HTMLMediaElement {
htmlelement: HTMLElement,
/// https://html.spec.whatwg.org/multipage/#dom-media-networkstate
network_state: Cell<NetworkState>,
/// https://html.spec.whatwg.org/multipage/#dom-media-readystate
ready_state: Cell<ReadyState>,
/// https://html.spec.whatwg.org/multipage/#dom-media-currentsrc
current_src: DOMRefCell<String>,
// FIXME(nox): Document this one, I have no idea what it is used for.
generation_id: Cell<u32>,
/// https://html.spec.whatwg.org/multipage/#fire-loadeddata
///
/// Reset to false every time the load algorithm is invoked.
fired_loadeddata_event: Cell<bool>,
/// https://html.spec.whatwg.org/multipage/#dom-media-error
error: MutNullableJS<MediaError>,
/// https://html.spec.whatwg.org/multipage/#dom-media-paused
paused: Cell<bool>,
/// https://html.spec.whatwg.org/multipage/#attr-media-autoplay
autoplaying: Cell<bool>,
/// The details of the video currently related to this media element.
// FIXME(nox): Why isn't this in HTMLVideoElement?
video: DOMRefCell<Option<VideoMedia>>,
}
/// https://html.spec.whatwg.org/multipage/#dom-media-networkstate
#[derive(Clone, Copy, HeapSizeOf, JSTraceable, PartialEq)]
#[repr(u8)]
enum NetworkState {
Empty = HTMLMediaElementConstants::NETWORK_EMPTY as u8,
Idle = HTMLMediaElementConstants::NETWORK_IDLE as u8,
Loading = HTMLMediaElementConstants::NETWORK_LOADING as u8,
NoSource = HTMLMediaElementConstants::NETWORK_NO_SOURCE as u8,
}
/// https://html.spec.whatwg.org/multipage/#dom-media-readystate
#[derive(Clone, Copy, HeapSizeOf, JSTraceable, PartialEq, PartialOrd)]
#[repr(u8)]
enum ReadyState {
HaveNothing = HTMLMediaElementConstants::HAVE_NOTHING as u8,
HaveMetadata = HTMLMediaElementConstants::HAVE_METADATA as u8,
HaveCurrentData = HTMLMediaElementConstants::HAVE_CURRENT_DATA as u8,
HaveFutureData = HTMLMediaElementConstants::HAVE_FUTURE_DATA as u8,
HaveEnoughData = HTMLMediaElementConstants::HAVE_ENOUGH_DATA as u8,
}
#[derive(HeapSizeOf, JSTraceable)]
pub struct VideoMedia {
format: String,
#[ignore_heap_size_of = "defined in time"]
duration: Duration,
width: u32,
height: u32,
video: String,
audio: Option<String>,
}
impl HTMLMediaElement {
pub fn new_inherited(
tag_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> Self {
Self {
htmlelement: HTMLElement::new_inherited(tag_name, prefix, document),
network_state: Cell::new(NetworkState::Empty),
ready_state: Cell::new(ReadyState::HaveNothing),
current_src: DOMRefCell::new("".to_owned()),
generation_id: Cell::new(0),
fired_loadeddata_event: Cell::new(false),
error: Default::default(),
paused: Cell::new(true),
// FIXME(nox): Why is this initialised to true?
autoplaying: Cell::new(true),
video: DOMRefCell::new(None),
}
}
fn media_type_id(&self) -> HTMLMediaElementTypeId {
match self.upcast::<Node>().type_id() {
NodeTypeId::Element(ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLMediaElement(media_type_id),
)) => {
media_type_id
},
_ => unreachable!(),
}
}
/// https://html.spec.whatwg.org/multipage/#internal-pause-steps
fn internal_pause_steps(&self) {
// Step 1.
self.autoplaying.set(false);
// Step 2.
if !self.Paused() {
// Step 2.1.
self.paused.set(true);
// Step 2.2.
// FIXME(nox): Take pending play promises and let promises be the
// result.
// Step 2.3.
let window = window_from_node(self);
let target = Trusted::new(self.upcast::<EventTarget>());
// FIXME(nox): Why are errors silenced here?
let _ = window.dom_manipulation_task_source().queue(
task!(internal_pause_steps: move || {
let target = target.root();
// Step 2.3.1.
target.fire_event(atom!("timeupdate"));
// Step 2.3.2.
target.fire_event(atom!("pause"));
// Step 2.3.3.
// FIXME(nox): Reject pending play promises with promises
// and an "AbortError" DOMException.
}),
window.upcast(),
);
// Step 2.4.
// FIXME(nox): Set the official playback position to the current
// playback position.
}
}
// https://html.spec.whatwg.org/multipage/#notify-about-playing
fn notify_about_playing(&self) {
// Step 1.
// TODO(nox): Take pending play promises and let promises be the result.
// Step 2.
let target = Trusted::new(self.upcast::<EventTarget>());
let window = window_from_node(self);
// FIXME(nox): Why are errors silenced here?
let _ = window.dom_manipulation_task_source().queue(
task!(notify_about_playing: move || {
let target = target.root();
// Step 2.1.
target.fire_event(atom!("playing"));
// Step 2.2.
// FIXME(nox): Resolve pending play promises with promises.
}),
window.upcast(),
);
}
// https://html.spec.whatwg.org/multipage/#ready-states
fn change_ready_state(&self, ready_state: ReadyState) {
let old_ready_state = self.ready_state.get();
self.ready_state.set(ready_state);
if self.network_state.get() == NetworkState::Empty {
return;
}
let window = window_from_node(self);
let task_source = window.dom_manipulation_task_source();
// Step 1.
match (old_ready_state, ready_state) {
(ReadyState::HaveNothing, ReadyState::HaveMetadata) => {
task_source.queue_simple_event(
self.upcast(),
atom!("loadedmetadata"),
&window,
);
// No other steps are applicable in this case.
return;
},
(ReadyState::HaveMetadata, new) if new >= ReadyState::HaveCurrentData => {
if !self.fired_loadeddata_event.get() {
self.fired_loadeddata_event.set(true);
task_source.queue_simple_event(
self.upcast(),
atom!("loadeddata"),
&window,
);
}
// Steps for the transition from HaveMetadata to HaveCurrentData
// or HaveFutureData also apply here, as per the next match
// expression.
},
(ReadyState::HaveFutureData, new) if new <= ReadyState::HaveCurrentData => {
// FIXME(nox): Queue a task to fire timeupdate and waiting
// events if the conditions call from the spec are met.
// No other steps are applicable in this case.
return;
},
_ => (),
}
if old_ready_state <= ReadyState::HaveCurrentData && ready_state >= ReadyState::HaveFutureData {
task_source.queue_simple_event(
self.upcast(),
atom!("canplay"),
&window,
);
if !self.Paused() {
self.notify_about_playing();
}
}
if ready_state == ReadyState::HaveEnoughData {
// TODO: Check sandboxed automatic features browsing context flag.
// FIXME(nox): I have no idea what this TODO is about.
// FIXME(nox): Review this block.
if self.autoplaying.get() &&
self.Paused() &&
self.Autoplay() {
// Step 1
self.paused.set(false);
// TODO step 2: show poster
// Step 3
task_source.queue_simple_event(
self.upcast(),
atom!("play"),
&window,
);
// Step 4
self.notify_about_playing();
// Step 5
self.autoplaying.set(false);
}
// FIXME(nox): According to the spec, this should come *before* the
// "play" event.
task_source.queue_simple_event(
self.upcast(),
atom!("canplaythrough"),
&window,
);
}
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-algorithm
fn invoke_resource_selection_algorithm(&self) {
// Step 1.
self.network_state.set(NetworkState::NoSource);
// Step 2.
// FIXME(nox): Set show poster flag to true.
// Step 3.
// FIXME(nox): Set the delaying-the-load-event flag to true.
// Step 4.
// If the resource selection mode in the synchronous section is
// "attribute", the URL of the resource to fetch is relative to the
// media element's node document when the src attribute was last
// changed, which is why we need to pass the base URL in the task
// right here.
let doc = document_from_node(self);
let task = MediaElementMicrotask::ResourceSelectionTask {
elem: Root::from_ref(self),
base_url: doc.base_url()
};
// FIXME(nox): This will later call the resource_selection_algorith_sync
// method from below, if microtasks were trait objects, we would be able
// to put the code directly in this method, without the boilerplate
// indirections.
ScriptThread::await_stable_state(Microtask::MediaElement(task));
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-algorithm
fn resource_selection_algorithm_sync(&self, base_url: ServoUrl) {
// Step 5.
// FIXME(nox): Maybe populate the list of pending text tracks.
// Step 6.
enum Mode {
// FIXME(nox): Support media object provider.
#[allow(dead_code)]
Object,
Attribute(String),
// FIXME(nox): Support source element child.
#[allow(dead_code)]
Children(Root<HTMLSourceElement>),
}
let mode = if let Some(attr) = self.upcast::<Element>().get_attribute(&ns!(), &local_name!("src")) {
Mode::Attribute(attr.Value().into())
} else {
self.network_state.set(NetworkState::Empty);
return;
};
// Step 7.
self.network_state.set(NetworkState::Loading);
// Step 8.
let window = window_from_node(self);
window.dom_manipulation_task_source().queue_simple_event(
self.upcast(),
atom!("loadstart"),
&window,
);
// Step 9.
match mode {
// Step 9.obj.
Mode::Object => {
// Step 9.obj.1.
*self.current_src.borrow_mut() = "".to_owned();
// Step 9.obj.2.
// FIXME(nox): The rest of the steps should be ran in parallel.
// Step 9.obj.3.
// Note that the resource fetch algorithm itself takes care
// of the cleanup in case of failure itself.
// FIXME(nox): Pass the assigned media provider here.
self.resource_fetch_algorithm(Resource::Object);
},
Mode::Attribute(src) => {
// Step 9.attr.1.
if src.is_empty() {
self.queue_dedicated_media_source_failure_steps();
return;
}
// Step 9.attr.2.
let url_record = match base_url.join(&src) {
Ok(url) => url,
Err(_) => {
self.queue_dedicated_media_source_failure_steps();
return;
}
};
// Step 9.attr.3.
*self.current_src.borrow_mut() = url_record.as_str().into();
// Step 9.attr.4.
// Note that the resource fetch algorithm itself takes care
// of the cleanup in case of failure itself.
self.resource_fetch_algorithm(Resource::Url(url_record));
},
Mode::Children(_source) => {
// Step 9.children.
self.queue_dedicated_media_source_failure_steps()
},
}
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-resource
fn resource_fetch_algorithm(&self, resource: Resource) {
// Steps 1-2.
// Unapplicable, the `resource` variable already conveys which mode
// is in use.
// Step 3.
// FIXME(nox): Remove all media-resource-specific text tracks.
// Step 4.
match resource {
Resource::Url(url) => {
// Step 4.remote.1.
if self.Preload() == "none" && !self.autoplaying.get() {
// Step 4.remote.1.1.
self.network_state.set(NetworkState::Idle);
// Step 4.remote.1.2.
let window = window_from_node(self);
window.dom_manipulation_task_source().queue_simple_event(
self.upcast(),
atom!("suspend"),
&window,
);
// Step 4.remote.1.3.
// FIXME(nox): Queue a task to set the delaying-the-load-event
// flag to false.
// Steps 4.remote.1.4.
// FIXME(nox): Somehow we should wait for the task from previous
// step to be ran before continuing.
// Steps 4.remote.1.5-4.remote.1.7.
// FIXME(nox): Wait for an implementation-defined event and
// then continue with the normal set of steps instead of just
// returning.
return;
}
// Step 4.remote.2.
// FIXME(nox): Handle CORS setting from crossorigin attribute.
let document = document_from_node(self);
let type_ = match self.media_type_id() {
HTMLMediaElementTypeId::HTMLAudioElement => RequestType::Audio,
HTMLMediaElementTypeId::HTMLVideoElement => RequestType::Video,
};
let request = RequestInit {
url: url.clone(),
type_,
destination: Destination::Media,
credentials_mode: CredentialsMode::Include,
use_url_credentials: true,
origin: document.origin().immutable().clone(),
pipeline_id: Some(self.global().pipeline_id()),
referrer_url: Some(document.url()),
referrer_policy: document.get_referrer_policy(),
.. RequestInit::default()
};
let context = Arc::new(Mutex::new(HTMLMediaElementContext::new(self, url.clone())));
let (action_sender, action_receiver) = ipc::channel().unwrap();
let window = window_from_node(self);
let listener = NetworkListener {
context: context,
task_source: window.networking_task_source(),
canceller: Some(window.task_canceller())
};
ROUTER.add_route(action_receiver.to_opaque(), box move |message| {
listener.notify_fetch(message.to().unwrap());
});
document.fetch_async(LoadType::Media(url), request, action_sender);
},
Resource::Object => {
// FIXME(nox): Use the current media resource.
self.queue_dedicated_media_source_failure_steps();
},
}
}
/// Queues the [dedicated media source failure steps][steps].
///
/// [steps]: https://html.spec.whatwg.org/multipage/#dedicated-media-source-failure-steps
fn queue_dedicated_media_source_failure_steps(&self) {
let this = Trusted::new(self);
let window = window_from_node(self);
// FIXME(nox): Why are errors silenced here?
let _ = window.dom_manipulation_task_source().queue(
task!(dedicated_media_source_failure_steps: move || {
let this = this.root();
// Step 1.
this.error.set(Some(&*MediaError::new(
&window_from_node(&*this),
MEDIA_ERR_SRC_NOT_SUPPORTED,
)));
// Step 2.
// FIXME(nox): Forget the media-resource-specific tracks.
// Step 3.
this.network_state.set(NetworkState::NoSource);
// Step 4.
// FIXME(nox): Set show poster flag to true.
// Step 5.
this.upcast::<EventTarget>().fire_event(atom!("error"));
// Step 6.
// FIXME(nox): Reject pending play promises.
// Step 7.
// FIXME(nox): Set the delaying-the-load-event flag to false.
}),
window.upcast(),
);
}
// https://html.spec.whatwg.org/multipage/#media-element-load-algorithm
fn media_element_load_algorithm(&self) {
// Reset the flag that signals whether loadeddata was ever fired for
// this invokation of the load algorithm.
self.fired_loadeddata_event.set(false);
// Step 1.
// FIXME(nox): Abort any already-running instance of the
// resource selection algorithm.
// Steps 2-4.
// FIXME(nox): Cancel all tasks related to this element and resolve or
// reject all pending play promises.
self.generation_id.set(self.generation_id.get() + 1);
let window = window_from_node(self);
let task_source = window.dom_manipulation_task_source();
// Step 5.
let network_state = self.network_state.get();
if network_state == NetworkState::Loading || network_state == NetworkState::Idle {
task_source.queue_simple_event(self.upcast(), atom!("abort"), &window);
}
// Step 6.
if network_state != NetworkState::Empty {
// Step 6.1.
task_source.queue_simple_event(self.upcast(), atom!("emptied"), &window);
// Step 6.2.
// FIXME(nox): Abort in-progress fetching process.
// Step 6.3.
// FIXME(nox): Detach MediaSource media provider object.
// Step 6.4.
// FIXME(nox): Forget the media-resource-specific tracks.
// Step 6.5.
if self.ready_state.get() != ReadyState::HaveNothing {
self.change_ready_state(ReadyState::HaveNothing);
}
// Step 6.6.
if !self.Paused() {
// Step 6.6.1.
self.paused.set(true);
// Step 6.6.2.
// FIXME(nox): Reject pending play promises.
}
// Step 6.7.
// FIXME(nox): If seeking is true, set it to false.
// Step 6.8.
// FIXME(nox): Set current and official playback position to 0 and
// maybe queue a task to fire a timeupdate event.
// Step 6.9.
// FIXME(nox): Set timeline offset to NaN.
// Step 6.10.
// FIXME(nox): Set duration to NaN.
}
// Step 7.
// FIXME(nox): Set playbackRate to defaultPlaybackRate.
// Step 8.
self.error.set(None);
self.autoplaying.set(true);
// Step 9.
self.invoke_resource_selection_algorithm();
// Step 10.
// FIXME(nox): Stop playback of any previously running media resource.
}
}
impl HTMLMediaElementMethods for HTMLMediaElement {
// https://html.spec.whatwg.org/multipage/#dom-media-networkstate
fn NetworkState(&self) -> u16 {
self.network_state.get() as u16
}
// https://html.spec.whatwg.org/multipage/#dom-media-readystate
fn ReadyState(&self) -> u16 {
self.ready_state.get() as u16
}
// https://html.spec.whatwg.org/multipage/#dom-media-autoplay
make_bool_getter!(Autoplay, "autoplay");
// https://html.spec.whatwg.org/multipage/#dom-media-autoplay
make_bool_setter!(SetAutoplay, "autoplay");
// https://html.spec.whatwg.org/multipage/#dom-media-src
make_url_getter!(Src, "src");
// https://html.spec.whatwg.org/multipage/#dom-media-src
make_setter!(SetSrc, "src");
// https://html.spec.whatwg.org/multipage/#attr-media-preload
// Missing value default is user-agent defined.
make_enumerated_getter!(Preload, "preload", "", "none" | "metadata" | "auto");
// https://html.spec.whatwg.org/multipage/#attr-media-preload
make_setter!(SetPreload, "preload");
// https://html.spec.whatwg.org/multipage/#dom-media-currentsrc
fn CurrentSrc(&self) -> DOMString {
DOMString::from(self.current_src.borrow().clone())
}
// https://html.spec.whatwg.org/multipage/#dom-media-load
fn Load(&self) {
self.media_element_load_algorithm();
}
// https://html.spec.whatwg.org/multipage/#dom-navigator-canplaytype
fn CanPlayType(&self, type_: DOMString) -> CanPlayTypeResult {
match type_.parse::<Mime>() {
Ok(Mime(TopLevel::Application, SubLevel::OctetStream, _)) |
Err(_) => {
CanPlayTypeResult::_empty
},
_ => CanPlayTypeResult::Maybe
}
}
// https://html.spec.whatwg.org/multipage/#dom-media-error
fn GetError(&self) -> Option<Root<MediaError>> {
self.error.get()
}
// https://html.spec.whatwg.org/multipage/#dom-media-play
// FIXME(nox): This should return a promise.
fn Play(&self) {
// Step 1.
// FIXME(nox): Return a rejected promise if not allowed to play.
// Step 2.
if self.error.get().map_or(false, |e| e.Code() == MEDIA_ERR_SRC_NOT_SUPPORTED) {
// FIXME(nox): This should return a rejected promise.
return;
}
// Step 3.
// Create promise and add it to list of pending play promises.
// Step 4.
if self.network_state.get() == NetworkState::Empty {
self.invoke_resource_selection_algorithm();
}
// Step 5.
// FIXME(nox): Seek to earliest possible position if playback has ended
// and direction of playback is forwards.
let state = self.ready_state.get();
if self.Paused() {
// Step 6.1.
self.paused.set(false);
// Step 6.2.
// FIXME(nox): Set show poster flag to false and run time marches on
// steps if show poster flag is true.
// Step 6.3.
let window = window_from_node(self);
let task_source = window.dom_manipulation_task_source();
task_source.queue_simple_event(self.upcast(), atom!("play"), &window);
// Step 7.4.
match state {
ReadyState::HaveNothing |
ReadyState::HaveMetadata |
ReadyState::HaveCurrentData => {
task_source.queue_simple_event(
self.upcast(),
atom!("waiting"),
&window,
);
},
ReadyState::HaveFutureData |
ReadyState::HaveEnoughData => {
self.notify_about_playing();
}
}
} else if state == ReadyState::HaveFutureData || state == ReadyState::HaveEnoughData {
// Step 7.
// FIXME(nox): Queue a task to resolve pending play promises.
}
// Step 8.
self.autoplaying.set(false);
// Step 9.
// FIXME(nox): Return promise created in step 3.
}
// https://html.spec.whatwg.org/multipage/#dom-media-pause
fn Pause(&self) {
// Step 1
if self.network_state.get() == NetworkState::Empty {
self.invoke_resource_selection_algorithm();
}
// Step 2
self.internal_pause_steps();
}
// https://html.spec.whatwg.org/multipage/#dom-media-paused
fn Paused(&self) -> bool {
self.paused.get()
}
}
impl VirtualMethods for HTMLMediaElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&local_name!("src") => {
if mutation.new_value(attr).is_some() {
self.media_element_load_algorithm();
}
}
_ => (),
};
}
// https://html.spec.whatwg.org/multipage/#playing-the-media-resource:remove-an-element-from-a-document
fn unbind_from_tree(&self, context: &UnbindContext) {
self.super_type().unwrap().unbind_from_tree(context);
if context.tree_in_doc {
let task = MediaElementMicrotask::PauseIfNotInDocumentTask {
elem: Root::from_ref(self)
};
ScriptThread::await_stable_state(Microtask::MediaElement(task));
}
}
}
#[derive(HeapSizeOf, JSTraceable)]
pub enum MediaElementMicrotask {
ResourceSelectionTask {
elem: Root<HTMLMediaElement>,
base_url: ServoUrl
},
PauseIfNotInDocumentTask {
elem: Root<HTMLMediaElement>,
}
}
impl MicrotaskRunnable for MediaElementMicrotask {
fn handler(&self) {
match self {
&MediaElementMicrotask::ResourceSelectionTask { ref elem, ref base_url } => {
elem.resource_selection_algorithm_sync(base_url.clone());
},
&MediaElementMicrotask::PauseIfNotInDocumentTask { ref elem } => {
if !elem.upcast::<Node>().is_in_doc() {
elem.internal_pause_steps();
}
},
}
}
}
enum Resource {
Object,
Url(ServoUrl),
}
struct HTMLMediaElementContext {
/// The element that initiated the request.
elem: Trusted<HTMLMediaElement>,
/// The response body received to date.
data: Vec<u8>,
/// The response metadata received to date.
metadata: Option<Metadata>,
/// The generation of the media element when this fetch started.
generation_id: u32,
/// Time of last progress notification.
next_progress_event: Timespec,
/// Url of resource requested.
url: ServoUrl,
/// Whether the media metadata has been completely received.
have_metadata: bool,
/// True if this response is invalid and should be ignored.
ignore_response: bool,
}
// https://html.spec.whatwg.org/multipage/#media-data-processing-steps-list
impl FetchResponseListener for HTMLMediaElementContext {
fn process_request_body(&mut self) {}
fn process_request_eof(&mut self) {}
fn process_response(&mut self, metadata: Result<FetchMetadata, NetworkError>) {
self.metadata = metadata.ok().map(|m| {
match m {
FetchMetadata::Unfiltered(m) => m,
FetchMetadata::Filtered { unsafe_, .. } => unsafe_
}
});
let status_is_ok = self.metadata.as_ref()
.and_then(|m| m.status.as_ref())
.map_or(true, |s| s.0 >= 200 && s.0 < 300);
// => "If the media data cannot be fetched at all..."
if !status_is_ok {
// Ensure that the element doesn't receive any further notifications
// of the aborted fetch. The dedicated failure steps will be
// executed when response_complete runs.
// FIXME(nox): According to the spec, we shouldn't wait to receive
// the whole response before running the dedicated failure steps.
self.ignore_response = true;
}
}
fn process_response_chunk(&mut self, mut payload: Vec<u8>) {
if self.ignore_response {
// An error was received previously, skip processing the payload.
return;
}
self.data.append(&mut payload);
let elem = self.elem.root();
// https://html.spec.whatwg.org/multipage/#media-data-processing-steps-list
// => "Once enough of the media data has been fetched to determine the duration..."
if !self.have_metadata {
self.check_metadata(&elem);
} else {
elem.change_ready_state(ReadyState::HaveCurrentData);
}
// https://html.spec.whatwg.org/multipage/#concept-media-load-resource step 4,
// => "If mode is remote" step 2
if time::get_time() > self.next_progress_event {
let window = window_from_node(&*elem);
window.dom_manipulation_task_source().queue_simple_event(
elem.upcast(),
atom!("progress"),
&window,
);
self.next_progress_event = time::get_time() + Duration::milliseconds(350);
}
}
// https://html.spec.whatwg.org/multipage/#media-data-processing-steps-list
fn process_response_eof(&mut self, status: Result<(), NetworkError>) {
let elem = self.elem.root();
// => "If the media data can be fetched but is found by inspection to be in an unsupported
// format, or can otherwise not be rendered at all"
if !self.have_metadata {
elem.queue_dedicated_media_source_failure_steps();
}
// => "Once the entire media resource has been fetched..."
else if status.is_ok() {
elem.change_ready_state(ReadyState::HaveEnoughData);
elem.upcast::<EventTarget>().fire_event(atom!("progress"));
elem.network_state.set(NetworkState::Idle);
elem.upcast::<EventTarget>().fire_event(atom!("suspend"));
}
// => "If the connection is interrupted after some media data has been received..."
else if elem.ready_state.get() != ReadyState::HaveNothing {
// Step 2
elem.error.set(Some(&*MediaError::new(&*window_from_node(&*elem),
MEDIA_ERR_NETWORK)));
// Step 3
elem.network_state.set(NetworkState::Idle);
// TODO: Step 4 - update delay load flag
// Step 5
elem.upcast::<EventTarget>().fire_event(atom!("error"));
} else {
// => "If the media data cannot be fetched at all..."
elem.queue_dedicated_media_source_failure_steps();
}
let document = document_from_node(&*elem);
document.finish_load(LoadType::Media(self.url.clone()));
}
}
impl PreInvoke for HTMLMediaElementContext {
fn should_invoke(&self) -> bool {
//TODO: finish_load needs to run at some point if the generation changes.
self.elem.root().generation_id.get() == self.generation_id
}
}
impl HTMLMediaElementContext {
fn new(elem: &HTMLMediaElement, url: ServoUrl) -> HTMLMediaElementContext {
HTMLMediaElementContext {
elem: Trusted::new(elem),
data: vec![],
metadata: None,
generation_id: elem.generation_id.get(),
next_progress_event: time::get_time() + Duration::milliseconds(350),
url: url,
have_metadata: false,
ignore_response: false,
}
}
fn check_metadata(&mut self, elem: &HTMLMediaElement) {
match audio_video_metadata::get_format_from_slice(&self.data) {
Ok(audio_video_metadata::Metadata::Video(meta)) => {
let dur = meta.audio.duration.unwrap_or(::std::time::Duration::new(0, 0));
*elem.video.borrow_mut() = Some(VideoMedia {
format: format!("{:?}", meta.format),
duration: Duration::seconds(dur.as_secs() as i64) +
Duration::nanoseconds(dur.subsec_nanos() as i64),
width: meta.dimensions.width,
height: meta.dimensions.height,
video: meta.video.unwrap_or("".to_owned()),
audio: meta.audio.audio,
});
// Step 6
elem.change_ready_state(ReadyState::HaveMetadata);
self.have_metadata = true;
}
_ => {}
}
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Necessary types for [grid](https://drafts.csswg.org/css-grid/).
use cssparser::{Parser, Token, serialize_identifier};
use parser::{Parse, ParserContext};
use std::ascii::AsciiExt;
use std::fmt;
use std::mem;
use style_traits::ToCss;
use values::{CSSFloat, CustomIdent, HasViewportPercentage};
use values::computed::{ComputedValueAsSpecified, Context, ToComputedValue};
use values::specified::{Integer, LengthOrPercentage};
#[derive(PartialEq, Clone, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
/// A `<grid-line>` type.
///
/// https://drafts.csswg.org/css-grid/#typedef-grid-row-start-grid-line
pub struct GridLine {
/// Flag to check whether it's a `span` keyword.
pub is_span: bool,
/// A custom identifier for named lines.
///
/// https://drafts.csswg.org/css-grid/#grid-placement-slot
pub ident: Option<String>,
/// Denotes the nth grid line from grid item's placement.
pub integer: Option<i32>,
}
impl Default for GridLine {
fn default() -> Self {
GridLine {
is_span: false,
ident: None,
integer: None,
}
}
}
impl ToCss for GridLine {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
if !self.is_span && self.ident.is_none() && self.integer.is_none() {
return dest.write_str("auto")
}
if self.is_span {
try!(dest.write_str("span"));
}
if let Some(i) = self.integer {
try!(write!(dest, " {}", i));
}
if let Some(ref s) = self.ident {
try!(write!(dest, " {}", s));
}
Ok(())
}
}
impl Parse for GridLine {
fn parse(_context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
let mut grid_line = Default::default();
if input.try(|i| i.expect_ident_matching("auto")).is_ok() {
return Ok(grid_line)
}
for _ in 0..3 { // Maximum possible entities for <grid-line>
if input.try(|i| i.expect_ident_matching("span")).is_ok() {
if grid_line.is_span {
return Err(())
}
grid_line.is_span = true;
} else if let Ok(i) = input.try(|i| i.expect_integer()) {
if i == 0 || grid_line.integer.is_some() {
return Err(())
}
grid_line.integer = Some(i);
} else if let Ok(name) = input.try(|i| i.expect_ident()) {
if grid_line.ident.is_some() {
return Err(())
}
grid_line.ident = Some(name.into_owned());
} else {
break
}
}
if grid_line.is_span {
if let Some(i) = grid_line.integer {
if i < 0 { // disallow negative integers for grid spans
return Err(())
}
} else {
grid_line.integer = Some(1);
}
}
Ok(grid_line)
}
}
impl ComputedValueAsSpecified for GridLine {}
no_viewport_percentage!(GridLine);
define_css_keyword_enum!{ TrackKeyword:
"auto" => Auto,
"max-content" => MaxContent,
"min-content" => MinContent
}
#[derive(Clone, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
/// A track breadth for explicit grid track sizing. It's generic solely to
/// avoid re-implementing it for the computed type.
///
/// https://drafts.csswg.org/css-grid/#typedef-track-breadth
pub enum TrackBreadth<L> {
/// The generic type is almost always a non-negative `<length-percentage>`
Breadth(L),
/// A flex fraction specified in `fr` units.
Flex(CSSFloat),
/// One of the track-sizing keywords (`auto`, `min-content`, `max-content`)
Keyword(TrackKeyword),
}
impl<L> TrackBreadth<L> {
/// Check whether this is a `<fixed-breadth>` (i.e., it only has `<length-percentage>`)
///
/// https://drafts.csswg.org/css-grid/#typedef-fixed-breadth
#[inline]
pub fn is_fixed(&self) -> bool {
match *self {
TrackBreadth::Breadth(ref _lop) => true,
_ => false,
}
}
}
/// Parse a single flexible length.
pub fn parse_flex(input: &mut Parser) -> Result<CSSFloat, ()> {
match try!(input.next()) {
Token::Dimension(ref value, ref unit) if unit.eq_ignore_ascii_case("fr") && value.value.is_sign_positive()
=> Ok(value.value),
_ => Err(()),
}
}
impl Parse for TrackBreadth<LengthOrPercentage> {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
if let Ok(lop) = input.try(|i| LengthOrPercentage::parse_non_negative(context, i)) {
return Ok(TrackBreadth::Breadth(lop))
}
if let Ok(f) = input.try(parse_flex) {
return Ok(TrackBreadth::Flex(f))
}
TrackKeyword::parse(input).map(TrackBreadth::Keyword)
}
}
impl<L: ToCss> ToCss for TrackBreadth<L> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
TrackBreadth::Breadth(ref lop) => lop.to_css(dest),
TrackBreadth::Flex(ref value) => write!(dest, "{}fr", value),
TrackBreadth::Keyword(ref k) => k.to_css(dest),
}
}
}
impl HasViewportPercentage for TrackBreadth<LengthOrPercentage> {
#[inline]
fn has_viewport_percentage(&self) -> bool {
if let TrackBreadth::Breadth(ref lop) = *self {
lop.has_viewport_percentage()
} else {
false
}
}
}
impl<L: ToComputedValue> ToComputedValue for TrackBreadth<L> {
type ComputedValue = TrackBreadth<L::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
match *self {
TrackBreadth::Breadth(ref lop) => TrackBreadth::Breadth(lop.to_computed_value(context)),
TrackBreadth::Flex(fr) => TrackBreadth::Flex(fr),
TrackBreadth::Keyword(k) => TrackBreadth::Keyword(k),
}
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
match *computed {
TrackBreadth::Breadth(ref lop) =>
TrackBreadth::Breadth(ToComputedValue::from_computed_value(lop)),
TrackBreadth::Flex(fr) => TrackBreadth::Flex(fr),
TrackBreadth::Keyword(k) => TrackBreadth::Keyword(k),
}
}
}
#[derive(Clone, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
/// A `<track-size>` type for explicit grid track sizing. Like `<track-breadth>`, this is
/// generic only to avoid code bloat. It only takes `<length-percentage>`
///
/// https://drafts.csswg.org/css-grid/#typedef-track-size
pub enum TrackSize<L> {
/// A flexible `<track-breadth>`
Breadth(TrackBreadth<L>),
/// A `minmax` function for a range over an inflexible `<track-breadth>`
/// and a flexible `<track-breadth>`
///
/// https://drafts.csswg.org/css-grid/#valdef-grid-template-columns-minmax
MinMax(TrackBreadth<L>, TrackBreadth<L>),
/// A `fit-content` function.
///
/// https://drafts.csswg.org/css-grid/#valdef-grid-template-columns-fit-content
FitContent(L),
}
impl<L> TrackSize<L> {
/// Check whether this is a `<fixed-size>`
///
/// https://drafts.csswg.org/css-grid/#typedef-fixed-size
pub fn is_fixed(&self) -> bool {
match *self {
TrackSize::Breadth(ref breadth) => breadth.is_fixed(),
// For minmax function, it could be either
// minmax(<fixed-breadth>, <track-breadth>) or minmax(<inflexible-breadth>, <fixed-breadth>),
// and since both variants are a subset of minmax(<inflexible-breadth>, <track-breadth>), we only
// need to make sure that they're fixed. So, we don't have to modify the parsing function.
TrackSize::MinMax(ref breadth_1, ref breadth_2) => {
if breadth_1.is_fixed() {
return true // the second value is always a <track-breadth>
}
match *breadth_1 {
TrackBreadth::Flex(_) => false, // should be <inflexible-breadth> at this point
_ => breadth_2.is_fixed(),
}
},
TrackSize::FitContent(_) => false,
}
}
}
impl<L> Default for TrackSize<L> {
fn default() -> Self {
TrackSize::Breadth(TrackBreadth::Keyword(TrackKeyword::Auto))
}
}
impl Parse for TrackSize<LengthOrPercentage> {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
if let Ok(b) = input.try(|i| TrackBreadth::parse(context, i)) {
return Ok(TrackSize::Breadth(b))
}
if input.try(|i| i.expect_function_matching("minmax")).is_ok() {
return input.parse_nested_block(|input| {
let inflexible_breadth =
match input.try(|i| LengthOrPercentage::parse_non_negative(context, i)) {
Ok(lop) => TrackBreadth::Breadth(lop),
Err(..) => {
let keyword = try!(TrackKeyword::parse(input));
TrackBreadth::Keyword(keyword)
}
};
try!(input.expect_comma());
Ok(TrackSize::MinMax(inflexible_breadth, try!(TrackBreadth::parse(context, input))))
});
}
try!(input.expect_function_matching("fit-content"));
// FIXME(emilio): This needs a parse_nested_block, doesn't it?
Ok(try!(LengthOrPercentage::parse(context, input).map(TrackSize::FitContent)))
}
}
impl<L: ToCss> ToCss for TrackSize<L> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
TrackSize::Breadth(ref b) => b.to_css(dest),
TrackSize::MinMax(ref infexible, ref flexible) => {
try!(dest.write_str("minmax("));
try!(infexible.to_css(dest));
try!(dest.write_str(", "));
try!(flexible.to_css(dest));
dest.write_str(")")
},
TrackSize::FitContent(ref lop) => {
try!(dest.write_str("fit-content("));
try!(lop.to_css(dest));
dest.write_str(")")
},
}
}
}
impl HasViewportPercentage for TrackSize<LengthOrPercentage> {
#[inline]
fn has_viewport_percentage(&self) -> bool {
match *self {
TrackSize::Breadth(ref b) => b.has_viewport_percentage(),
TrackSize::MinMax(ref inf_b, ref b) => inf_b.has_viewport_percentage() || b.has_viewport_percentage(),
TrackSize::FitContent(ref lop) => lop.has_viewport_percentage(),
}
}
}
impl<L: ToComputedValue> ToComputedValue for TrackSize<L> {
type ComputedValue = TrackSize<L::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
match *self {
TrackSize::Breadth(ref b) => TrackSize::Breadth(b.to_computed_value(context)),
TrackSize::MinMax(ref b_1, ref b_2) =>
TrackSize::MinMax(b_1.to_computed_value(context), b_2.to_computed_value(context)),
TrackSize::FitContent(ref lop) => TrackSize::FitContent(lop.to_computed_value(context)),
}
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
match *computed {
TrackSize::Breadth(ref b) =>
TrackSize::Breadth(ToComputedValue::from_computed_value(b)),
TrackSize::MinMax(ref b_1, ref b_2) =>
TrackSize::MinMax(ToComputedValue::from_computed_value(b_1),
ToComputedValue::from_computed_value(b_2)),
TrackSize::FitContent(ref lop) =>
TrackSize::FitContent(ToComputedValue::from_computed_value(lop)),
}
}
}
/// Parse the grid line names into a vector of owned strings.
///
/// https://drafts.csswg.org/css-grid/#typedef-line-names
pub fn parse_line_names(input: &mut Parser) -> Result<Vec<String>, ()> {
input.expect_square_bracket_block()?;
input.parse_nested_block(|input| {
let mut values = vec![];
while let Ok(ident) = input.try(|i| i.expect_ident()) {
if CustomIdent::from_ident((&*ident).into(), &["span"]).is_err() {
return Err(())
}
values.push(ident.into_owned());
}
Ok(values)
})
}
fn concat_serialize_idents<W>(prefix: &str, suffix: &str,
slice: &[String], sep: &str, dest: &mut W) -> fmt::Result
where W: fmt::Write
{
if let Some((ref first, rest)) = slice.split_first() {
dest.write_str(prefix)?;
serialize_identifier(first, dest)?;
for thing in rest {
dest.write_str(sep)?;
serialize_identifier(thing, dest)?;
}
dest.write_str(suffix)?;
}
Ok(())
}
/// The initial argument of the `repeat` function.
///
/// https://drafts.csswg.org/css-grid/#typedef-track-repeat
#[derive(Clone, Copy, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum RepeatCount {
/// A positive integer. This is allowed only for `<track-repeat>` and `<fixed-repeat>`
Number(Integer),
/// An `<auto-fill>` keyword allowed only for `<auto-repeat>`
AutoFill,
/// An `<auto-fit>` keyword allowed only for `<auto-repeat>`
AutoFit,
}
impl Parse for RepeatCount {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
if let Ok(i) = input.try(|i| Integer::parse(context, i)) {
if i.value > 0 {
Ok(RepeatCount::Number(i))
} else {
Err(())
}
} else {
match_ignore_ascii_case! { &input.expect_ident()?,
"auto-fill" => Ok(RepeatCount::AutoFill),
"auto-fit" => Ok(RepeatCount::AutoFit),
_ => Err(()),
}
}
}
}
impl ToCss for RepeatCount {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
RepeatCount::Number(ref c) => c.to_css(dest),
RepeatCount::AutoFill => dest.write_str("auto-fill"),
RepeatCount::AutoFit => dest.write_str("auto-fit"),
}
}
}
impl ComputedValueAsSpecified for RepeatCount {}
no_viewport_percentage!(RepeatCount);
/// The type of `repeat` function (only used in parsing).
///
/// https://drafts.csswg.org/css-grid/#typedef-track-repeat
#[derive(Clone, Copy, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
enum RepeatType {
/// [`<auto-repeat>`](https://drafts.csswg.org/css-grid/#typedef-auto-repeat)
Auto,
/// [`<track-repeat>`](https://drafts.csswg.org/css-grid/#typedef-track-repeat)
Normal,
/// [`<fixed-repeat>`](https://drafts.csswg.org/css-grid/#typedef-fixed-repeat)
Fixed,
}
/// The structure containing `<line-names>` and `<track-size>` values.
///
/// It can also hold `repeat()` function parameters, which expands into the respective
/// values in its computed form.
#[derive(Clone, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct TrackRepeat<L> {
/// The number of times for the value to be repeated (could also be `auto-fit` or `auto-fill`)
pub count: RepeatCount,
/// `<line-names>` accompanying `<track_size>` values.
///
/// If there's no `<line-names>`, then it's represented by an empty vector.
/// For N `<track-size>` values, there will be N+1 `<line-names>`, and so this vector's
/// length is always one value more than that of the `<track-size>`.
pub line_names: Vec<Vec<String>>,
/// `<track-size>` values.
pub track_sizes: Vec<TrackSize<L>>,
}
impl TrackRepeat<LengthOrPercentage> {
fn parse_with_repeat_type(context: &ParserContext, input: &mut Parser)
-> Result<(TrackRepeat<LengthOrPercentage>, RepeatType), ()> {
input.try(|i| i.expect_function_matching("repeat")).and_then(|_| {
input.parse_nested_block(|input| {
let count = RepeatCount::parse(context, input)?;
input.expect_comma()?;
let is_auto = count == RepeatCount::AutoFit || count == RepeatCount::AutoFill;
let mut repeat_type = if is_auto {
RepeatType::Auto
} else { // <fixed-size> is a subset of <track_size>, so it should work for both
RepeatType::Fixed
};
let mut names = vec![];
let mut values = vec![];
let mut current_names;
loop {
current_names = input.try(parse_line_names).unwrap_or(vec![]);
if let Ok(track_size) = input.try(|i| TrackSize::parse(context, i)) {
if !track_size.is_fixed() {
if is_auto {
return Err(()) // should be <fixed-size> for <auto-repeat>
}
if repeat_type == RepeatType::Fixed {
repeat_type = RepeatType::Normal // <track-size> for sure
}
}
values.push(track_size);
names.push(current_names);
} else {
if values.is_empty() {
return Err(()) // expecting at least one <track-size>
}
names.push(current_names); // final `<line-names>`
break // no more <track-size>, breaking
}
}
let repeat = TrackRepeat {
count: count,
track_sizes: values,
line_names: names,
};
Ok((repeat, repeat_type))
})
})
}
}
impl<L: ToCss> ToCss for TrackRepeat<L> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str("repeat(")?;
self.count.to_css(dest)?;
dest.write_str(", ")?;
let mut line_names_iter = self.line_names.iter();
for (i, (ref size, ref names)) in self.track_sizes.iter()
.zip(&mut line_names_iter).enumerate() {
if i > 0 {
dest.write_str(" ")?;
}
concat_serialize_idents("[", "] ", names, " ", dest)?;
size.to_css(dest)?;
}
if let Some(line_names_last) = line_names_iter.next() {
concat_serialize_idents(" [", "]", line_names_last, " ", dest)?;
}
dest.write_str(")")?;
Ok(())
}
}
impl HasViewportPercentage for TrackRepeat<LengthOrPercentage> {
#[inline]
fn has_viewport_percentage(&self) -> bool {
self.track_sizes.iter().any(|ref v| v.has_viewport_percentage())
}
}
impl<L: ToComputedValue> ToComputedValue for TrackRepeat<L> {
type ComputedValue = TrackRepeat<L::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
// If the repeat count is numeric, then expand the values and merge accordingly.
if let RepeatCount::Number(num) = self.count {
let mut line_names = vec![];
let mut track_sizes = vec![];
let mut prev_names = vec![];
for _ in 0..num.value {
let mut names_iter = self.line_names.iter();
for (size, names) in self.track_sizes.iter().zip(&mut names_iter) {
prev_names.extend_from_slice(&names);
line_names.push(mem::replace(&mut prev_names, vec![]));
track_sizes.push(size.to_computed_value(context));
}
if let Some(names) = names_iter.next() {
prev_names.extend_from_slice(&names);
}
}
line_names.push(prev_names);
TrackRepeat {
count: self.count,
track_sizes: track_sizes,
line_names: line_names,
}
} else { // if it's auto-fit/auto-fill, then it's left to the layout.
TrackRepeat {
count: self.count,
track_sizes: self.track_sizes.iter()
.map(|l| l.to_computed_value(context))
.collect(),
line_names: self.line_names.clone(),
}
}
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
TrackRepeat {
count: computed.count,
track_sizes: computed.track_sizes.iter()
.map(ToComputedValue::from_computed_value)
.collect(),
line_names: computed.line_names.clone(),
}
}
}
Add parsing/serialization for <track-list>
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Necessary types for [grid](https://drafts.csswg.org/css-grid/).
use cssparser::{Parser, Token, serialize_identifier};
use parser::{Parse, ParserContext};
use std::{fmt, mem, usize};
use std::ascii::AsciiExt;
use style_traits::ToCss;
use values::{CSSFloat, CustomIdent, Either, HasViewportPercentage};
use values::computed::{ComputedValueAsSpecified, Context, ToComputedValue};
use values::specified::{Integer, LengthOrPercentage};
#[derive(PartialEq, Clone, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
/// A `<grid-line>` type.
///
/// https://drafts.csswg.org/css-grid/#typedef-grid-row-start-grid-line
pub struct GridLine {
/// Flag to check whether it's a `span` keyword.
pub is_span: bool,
/// A custom identifier for named lines.
///
/// https://drafts.csswg.org/css-grid/#grid-placement-slot
pub ident: Option<String>,
/// Denotes the nth grid line from grid item's placement.
pub integer: Option<i32>,
}
impl Default for GridLine {
fn default() -> Self {
GridLine {
is_span: false,
ident: None,
integer: None,
}
}
}
impl ToCss for GridLine {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
if !self.is_span && self.ident.is_none() && self.integer.is_none() {
return dest.write_str("auto")
}
if self.is_span {
try!(dest.write_str("span"));
}
if let Some(i) = self.integer {
try!(write!(dest, " {}", i));
}
if let Some(ref s) = self.ident {
try!(write!(dest, " {}", s));
}
Ok(())
}
}
impl Parse for GridLine {
fn parse(_context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
let mut grid_line = Default::default();
if input.try(|i| i.expect_ident_matching("auto")).is_ok() {
return Ok(grid_line)
}
for _ in 0..3 { // Maximum possible entities for <grid-line>
if input.try(|i| i.expect_ident_matching("span")).is_ok() {
if grid_line.is_span {
return Err(())
}
grid_line.is_span = true;
} else if let Ok(i) = input.try(|i| i.expect_integer()) {
if i == 0 || grid_line.integer.is_some() {
return Err(())
}
grid_line.integer = Some(i);
} else if let Ok(name) = input.try(|i| i.expect_ident()) {
if grid_line.ident.is_some() {
return Err(())
}
grid_line.ident = Some(name.into_owned());
} else {
break
}
}
if grid_line.is_span {
if let Some(i) = grid_line.integer {
if i < 0 { // disallow negative integers for grid spans
return Err(())
}
} else {
grid_line.integer = Some(1);
}
}
Ok(grid_line)
}
}
impl ComputedValueAsSpecified for GridLine {}
no_viewport_percentage!(GridLine);
define_css_keyword_enum!{ TrackKeyword:
"auto" => Auto,
"max-content" => MaxContent,
"min-content" => MinContent
}
#[derive(Clone, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
/// A track breadth for explicit grid track sizing. It's generic solely to
/// avoid re-implementing it for the computed type.
///
/// https://drafts.csswg.org/css-grid/#typedef-track-breadth
pub enum TrackBreadth<L> {
/// The generic type is almost always a non-negative `<length-percentage>`
Breadth(L),
/// A flex fraction specified in `fr` units.
Flex(CSSFloat),
/// One of the track-sizing keywords (`auto`, `min-content`, `max-content`)
Keyword(TrackKeyword),
}
impl<L> TrackBreadth<L> {
/// Check whether this is a `<fixed-breadth>` (i.e., it only has `<length-percentage>`)
///
/// https://drafts.csswg.org/css-grid/#typedef-fixed-breadth
#[inline]
pub fn is_fixed(&self) -> bool {
match *self {
TrackBreadth::Breadth(ref _lop) => true,
_ => false,
}
}
}
/// Parse a single flexible length.
pub fn parse_flex(input: &mut Parser) -> Result<CSSFloat, ()> {
match try!(input.next()) {
Token::Dimension(ref value, ref unit) if unit.eq_ignore_ascii_case("fr") && value.value.is_sign_positive()
=> Ok(value.value),
_ => Err(()),
}
}
impl Parse for TrackBreadth<LengthOrPercentage> {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
if let Ok(lop) = input.try(|i| LengthOrPercentage::parse_non_negative(context, i)) {
return Ok(TrackBreadth::Breadth(lop))
}
if let Ok(f) = input.try(parse_flex) {
return Ok(TrackBreadth::Flex(f))
}
TrackKeyword::parse(input).map(TrackBreadth::Keyword)
}
}
impl<L: ToCss> ToCss for TrackBreadth<L> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
TrackBreadth::Breadth(ref lop) => lop.to_css(dest),
TrackBreadth::Flex(ref value) => write!(dest, "{}fr", value),
TrackBreadth::Keyword(ref k) => k.to_css(dest),
}
}
}
impl HasViewportPercentage for TrackBreadth<LengthOrPercentage> {
#[inline]
fn has_viewport_percentage(&self) -> bool {
if let TrackBreadth::Breadth(ref lop) = *self {
lop.has_viewport_percentage()
} else {
false
}
}
}
impl<L: ToComputedValue> ToComputedValue for TrackBreadth<L> {
type ComputedValue = TrackBreadth<L::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
match *self {
TrackBreadth::Breadth(ref lop) => TrackBreadth::Breadth(lop.to_computed_value(context)),
TrackBreadth::Flex(fr) => TrackBreadth::Flex(fr),
TrackBreadth::Keyword(k) => TrackBreadth::Keyword(k),
}
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
match *computed {
TrackBreadth::Breadth(ref lop) =>
TrackBreadth::Breadth(ToComputedValue::from_computed_value(lop)),
TrackBreadth::Flex(fr) => TrackBreadth::Flex(fr),
TrackBreadth::Keyword(k) => TrackBreadth::Keyword(k),
}
}
}
#[derive(Clone, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
/// A `<track-size>` type for explicit grid track sizing. Like `<track-breadth>`, this is
/// generic only to avoid code bloat. It only takes `<length-percentage>`
///
/// https://drafts.csswg.org/css-grid/#typedef-track-size
pub enum TrackSize<L> {
/// A flexible `<track-breadth>`
Breadth(TrackBreadth<L>),
/// A `minmax` function for a range over an inflexible `<track-breadth>`
/// and a flexible `<track-breadth>`
///
/// https://drafts.csswg.org/css-grid/#valdef-grid-template-columns-minmax
MinMax(TrackBreadth<L>, TrackBreadth<L>),
/// A `fit-content` function.
///
/// https://drafts.csswg.org/css-grid/#valdef-grid-template-columns-fit-content
FitContent(L),
}
impl<L> TrackSize<L> {
/// Check whether this is a `<fixed-size>`
///
/// https://drafts.csswg.org/css-grid/#typedef-fixed-size
pub fn is_fixed(&self) -> bool {
match *self {
TrackSize::Breadth(ref breadth) => breadth.is_fixed(),
// For minmax function, it could be either
// minmax(<fixed-breadth>, <track-breadth>) or minmax(<inflexible-breadth>, <fixed-breadth>),
// and since both variants are a subset of minmax(<inflexible-breadth>, <track-breadth>), we only
// need to make sure that they're fixed. So, we don't have to modify the parsing function.
TrackSize::MinMax(ref breadth_1, ref breadth_2) => {
if breadth_1.is_fixed() {
return true // the second value is always a <track-breadth>
}
match *breadth_1 {
TrackBreadth::Flex(_) => false, // should be <inflexible-breadth> at this point
_ => breadth_2.is_fixed(),
}
},
TrackSize::FitContent(_) => false,
}
}
}
impl<L> Default for TrackSize<L> {
fn default() -> Self {
TrackSize::Breadth(TrackBreadth::Keyword(TrackKeyword::Auto))
}
}
impl Parse for TrackSize<LengthOrPercentage> {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
if let Ok(b) = input.try(|i| TrackBreadth::parse(context, i)) {
return Ok(TrackSize::Breadth(b))
}
if input.try(|i| i.expect_function_matching("minmax")).is_ok() {
return input.parse_nested_block(|input| {
let inflexible_breadth =
match input.try(|i| LengthOrPercentage::parse_non_negative(context, i)) {
Ok(lop) => TrackBreadth::Breadth(lop),
Err(..) => {
let keyword = try!(TrackKeyword::parse(input));
TrackBreadth::Keyword(keyword)
}
};
try!(input.expect_comma());
Ok(TrackSize::MinMax(inflexible_breadth, try!(TrackBreadth::parse(context, input))))
});
}
try!(input.expect_function_matching("fit-content"));
// FIXME(emilio): This needs a parse_nested_block, doesn't it?
Ok(try!(LengthOrPercentage::parse(context, input).map(TrackSize::FitContent)))
}
}
impl<L: ToCss> ToCss for TrackSize<L> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
TrackSize::Breadth(ref b) => b.to_css(dest),
TrackSize::MinMax(ref infexible, ref flexible) => {
try!(dest.write_str("minmax("));
try!(infexible.to_css(dest));
try!(dest.write_str(", "));
try!(flexible.to_css(dest));
dest.write_str(")")
},
TrackSize::FitContent(ref lop) => {
try!(dest.write_str("fit-content("));
try!(lop.to_css(dest));
dest.write_str(")")
},
}
}
}
impl HasViewportPercentage for TrackSize<LengthOrPercentage> {
#[inline]
fn has_viewport_percentage(&self) -> bool {
match *self {
TrackSize::Breadth(ref b) => b.has_viewport_percentage(),
TrackSize::MinMax(ref inf_b, ref b) => inf_b.has_viewport_percentage() || b.has_viewport_percentage(),
TrackSize::FitContent(ref lop) => lop.has_viewport_percentage(),
}
}
}
impl<L: ToComputedValue> ToComputedValue for TrackSize<L> {
type ComputedValue = TrackSize<L::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
match *self {
TrackSize::Breadth(ref b) => TrackSize::Breadth(b.to_computed_value(context)),
TrackSize::MinMax(ref b_1, ref b_2) =>
TrackSize::MinMax(b_1.to_computed_value(context), b_2.to_computed_value(context)),
TrackSize::FitContent(ref lop) => TrackSize::FitContent(lop.to_computed_value(context)),
}
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
match *computed {
TrackSize::Breadth(ref b) =>
TrackSize::Breadth(ToComputedValue::from_computed_value(b)),
TrackSize::MinMax(ref b_1, ref b_2) =>
TrackSize::MinMax(ToComputedValue::from_computed_value(b_1),
ToComputedValue::from_computed_value(b_2)),
TrackSize::FitContent(ref lop) =>
TrackSize::FitContent(ToComputedValue::from_computed_value(lop)),
}
}
}
/// Parse the grid line names into a vector of owned strings.
///
/// https://drafts.csswg.org/css-grid/#typedef-line-names
pub fn parse_line_names(input: &mut Parser) -> Result<Vec<String>, ()> {
input.expect_square_bracket_block()?;
input.parse_nested_block(|input| {
let mut values = vec![];
while let Ok(ident) = input.try(|i| i.expect_ident()) {
if CustomIdent::from_ident((&*ident).into(), &["span"]).is_err() {
return Err(())
}
values.push(ident.into_owned());
}
Ok(values)
})
}
fn concat_serialize_idents<W>(prefix: &str, suffix: &str,
slice: &[String], sep: &str, dest: &mut W) -> fmt::Result
where W: fmt::Write
{
if let Some((ref first, rest)) = slice.split_first() {
dest.write_str(prefix)?;
serialize_identifier(first, dest)?;
for thing in rest {
dest.write_str(sep)?;
serialize_identifier(thing, dest)?;
}
dest.write_str(suffix)?;
}
Ok(())
}
/// The initial argument of the `repeat` function.
///
/// https://drafts.csswg.org/css-grid/#typedef-track-repeat
#[derive(Clone, Copy, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum RepeatCount {
/// A positive integer. This is allowed only for `<track-repeat>` and `<fixed-repeat>`
Number(Integer),
/// An `<auto-fill>` keyword allowed only for `<auto-repeat>`
AutoFill,
/// An `<auto-fit>` keyword allowed only for `<auto-repeat>`
AutoFit,
}
impl Parse for RepeatCount {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
if let Ok(i) = input.try(|i| Integer::parse(context, i)) {
if i.value > 0 {
Ok(RepeatCount::Number(i))
} else {
Err(())
}
} else {
match_ignore_ascii_case! { &input.expect_ident()?,
"auto-fill" => Ok(RepeatCount::AutoFill),
"auto-fit" => Ok(RepeatCount::AutoFit),
_ => Err(()),
}
}
}
}
impl ToCss for RepeatCount {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
RepeatCount::Number(ref c) => c.to_css(dest),
RepeatCount::AutoFill => dest.write_str("auto-fill"),
RepeatCount::AutoFit => dest.write_str("auto-fit"),
}
}
}
impl ComputedValueAsSpecified for RepeatCount {}
no_viewport_percentage!(RepeatCount);
/// The type of `repeat` function (only used in parsing).
///
/// https://drafts.csswg.org/css-grid/#typedef-track-repeat
#[derive(Clone, Copy, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
enum RepeatType {
/// [`<auto-repeat>`](https://drafts.csswg.org/css-grid/#typedef-auto-repeat)
Auto,
/// [`<track-repeat>`](https://drafts.csswg.org/css-grid/#typedef-track-repeat)
Normal,
/// [`<fixed-repeat>`](https://drafts.csswg.org/css-grid/#typedef-fixed-repeat)
Fixed,
}
/// The structure containing `<line-names>` and `<track-size>` values.
///
/// It can also hold `repeat()` function parameters, which expands into the respective
/// values in its computed form.
#[derive(Clone, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct TrackRepeat<L> {
/// The number of times for the value to be repeated (could also be `auto-fit` or `auto-fill`)
pub count: RepeatCount,
/// `<line-names>` accompanying `<track_size>` values.
///
/// If there's no `<line-names>`, then it's represented by an empty vector.
/// For N `<track-size>` values, there will be N+1 `<line-names>`, and so this vector's
/// length is always one value more than that of the `<track-size>`.
pub line_names: Vec<Vec<String>>,
/// `<track-size>` values.
pub track_sizes: Vec<TrackSize<L>>,
}
impl TrackRepeat<LengthOrPercentage> {
fn parse_with_repeat_type(context: &ParserContext, input: &mut Parser)
-> Result<(TrackRepeat<LengthOrPercentage>, RepeatType), ()> {
input.try(|i| i.expect_function_matching("repeat")).and_then(|_| {
input.parse_nested_block(|input| {
let count = RepeatCount::parse(context, input)?;
input.expect_comma()?;
let is_auto = count == RepeatCount::AutoFit || count == RepeatCount::AutoFill;
let mut repeat_type = if is_auto {
RepeatType::Auto
} else { // <fixed-size> is a subset of <track_size>, so it should work for both
RepeatType::Fixed
};
let mut names = vec![];
let mut values = vec![];
let mut current_names;
loop {
current_names = input.try(parse_line_names).unwrap_or(vec![]);
if let Ok(track_size) = input.try(|i| TrackSize::parse(context, i)) {
if !track_size.is_fixed() {
if is_auto {
return Err(()) // should be <fixed-size> for <auto-repeat>
}
if repeat_type == RepeatType::Fixed {
repeat_type = RepeatType::Normal // <track-size> for sure
}
}
values.push(track_size);
names.push(current_names);
} else {
if values.is_empty() {
return Err(()) // expecting at least one <track-size>
}
names.push(current_names); // final `<line-names>`
break // no more <track-size>, breaking
}
}
let repeat = TrackRepeat {
count: count,
track_sizes: values,
line_names: names,
};
Ok((repeat, repeat_type))
})
})
}
}
impl<L: ToCss> ToCss for TrackRepeat<L> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str("repeat(")?;
self.count.to_css(dest)?;
dest.write_str(", ")?;
let mut line_names_iter = self.line_names.iter();
for (i, (ref size, ref names)) in self.track_sizes.iter()
.zip(&mut line_names_iter).enumerate() {
if i > 0 {
dest.write_str(" ")?;
}
concat_serialize_idents("[", "] ", names, " ", dest)?;
size.to_css(dest)?;
}
if let Some(line_names_last) = line_names_iter.next() {
concat_serialize_idents(" [", "]", line_names_last, " ", dest)?;
}
dest.write_str(")")?;
Ok(())
}
}
impl HasViewportPercentage for TrackRepeat<LengthOrPercentage> {
#[inline]
fn has_viewport_percentage(&self) -> bool {
self.track_sizes.iter().any(|ref v| v.has_viewport_percentage())
}
}
impl<L: ToComputedValue> ToComputedValue for TrackRepeat<L> {
type ComputedValue = TrackRepeat<L::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
// If the repeat count is numeric, then expand the values and merge accordingly.
if let RepeatCount::Number(num) = self.count {
let mut line_names = vec![];
let mut track_sizes = vec![];
let mut prev_names = vec![];
for _ in 0..num.value {
let mut names_iter = self.line_names.iter();
for (size, names) in self.track_sizes.iter().zip(&mut names_iter) {
prev_names.extend_from_slice(&names);
line_names.push(mem::replace(&mut prev_names, vec![]));
track_sizes.push(size.to_computed_value(context));
}
if let Some(names) = names_iter.next() {
prev_names.extend_from_slice(&names);
}
}
line_names.push(prev_names);
TrackRepeat {
count: self.count,
track_sizes: track_sizes,
line_names: line_names,
}
} else { // if it's auto-fit/auto-fill, then it's left to the layout.
TrackRepeat {
count: self.count,
track_sizes: self.track_sizes.iter()
.map(|l| l.to_computed_value(context))
.collect(),
line_names: self.line_names.clone(),
}
}
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
TrackRepeat {
count: computed.count,
track_sizes: computed.track_sizes.iter()
.map(ToComputedValue::from_computed_value)
.collect(),
line_names: computed.line_names.clone(),
}
}
}
/// The type of a `<track-list>` as determined during parsing.
///
/// https://drafts.csswg.org/css-grid/#typedef-track-list
#[derive(Clone, Copy, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum TrackListType {
/// [`<auto-track-list>`](https://drafts.csswg.org/css-grid/#typedef-auto-track-list)
///
/// If this type exists, then the value at the index in `line_names` field in `TrackList`
/// has the `<line-names>?` list that comes before `<auto-repeat>`. If it's a specified value,
/// then the `repeat()` function (that follows the line names list) is also at the given index
/// in `values` field. On the contrary, if it's a computed value, then the `repeat()` function
/// is in the `auto_repeat` field.
Auto(u16),
/// [`<track-list>`](https://drafts.csswg.org/css-grid/#typedef-track-list)
Normal,
/// [`<explicit-track-list>`](https://drafts.csswg.org/css-grid/#typedef-explicit-track-list)
///
/// Note that this is a subset of the normal `<track-list>`, and so it could be used in place
/// of the latter.
Explicit,
}
/// A grid `<track-list>` type.
///
/// https://drafts.csswg.org/css-grid/#typedef-track-list
#[derive(Clone, PartialEq, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct TrackList<T> {
/// The type of this `<track-list>` (auto, explicit or general).
///
/// In order to avoid parsing the same value multiple times, this does a single traversal
/// and arrives at the type of value it has parsed (or bails out gracefully with an error).
pub list_type: TrackListType,
/// A vector of `<track-size> | <track-repeat>` values. In its specified form, it may contain
/// any value, but once it's computed, it contains only `<track_size>` values.
///
/// Note that this may also contain `<auto-repeat>` at an index. If it exists, it's
/// given by the index in `TrackListType::Auto`
pub values: Vec<T>,
/// `<line-names>` accompanying `<track-size> | <track-repeat>` values.
///
/// If there's no `<line-names>`, then it's represented by an empty vector.
/// For N values, there will be N+1 `<line-names>`, and so this vector's
/// length is always one value more than that of the `<track-size>`.
pub line_names: Vec<Vec<String>>,
/// `<auto-repeat>` value after computation. This field is necessary, because
/// the `values` field (after computation) will only contain `<track-size>` values, and
/// we need something to represent this function.
pub auto_repeat: Option<TrackRepeat<computed::LengthOrPercentage>>,
}
/// Either a `<track-size>` or `<track-repeat>` component of `<track-list>`
///
/// This is required only for the specified form of `<track-list>`, and will become
/// `TrackSize<LengthOrPercentage>` in its computed form.
pub type TrackSizeOrRepeat = Either<TrackSize<LengthOrPercentage>, TrackRepeat<LengthOrPercentage>>;
impl Parse for TrackList<TrackSizeOrRepeat> {
fn parse(context: &ParserContext, input: &mut Parser) -> Result<Self, ()> {
let mut current_names;
let mut names = vec![];
let mut values = vec![];
let mut list_type = TrackListType::Explicit; // assume it's the simplest case
// marker to check whether we've already encountered <auto-repeat> along the way
let mut is_auto = false;
// assume that everything is <fixed-size>. This flag is useful when we encounter <auto-repeat>
let mut atleast_one_not_fixed = false;
loop {
current_names = input.try(parse_line_names).unwrap_or(vec![]);
if let Ok(track_size) = input.try(|i| TrackSize::parse(context, i)) {
if !track_size.is_fixed() {
atleast_one_not_fixed = true;
if is_auto {
return Err(()) // <auto-track-list> only accepts <fixed-size> and <fixed-repeat>
}
}
names.push(current_names);
values.push(Either::First(track_size));
} else if let Ok((repeat, type_)) = input.try(|i| TrackRepeat::parse_with_repeat_type(context, i)) {
if list_type == TrackListType::Explicit {
list_type = TrackListType::Normal; // <explicit-track-list> doesn't contain repeat()
}
match type_ {
RepeatType::Normal => {
atleast_one_not_fixed = true;
if is_auto { // only <fixed-repeat>
return Err(())
}
},
RepeatType::Auto => {
if is_auto || atleast_one_not_fixed {
// We've either seen <auto-repeat> earlier, or there's at least one non-fixed value
return Err(())
}
is_auto = true;
list_type = TrackListType::Auto(values.len() as u16);
},
RepeatType::Fixed => (),
}
names.push(current_names);
values.push(Either::Second(repeat));
} else {
if values.is_empty() {
return Err(())
}
names.push(current_names);
break
}
}
Ok(TrackList {
list_type: list_type,
values: values,
line_names: names,
auto_repeat: None, // filled only in computation
})
}
}
impl<T: ToCss> ToCss for TrackList<T> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
let auto_idx = match self.list_type {
TrackListType::Auto(i) => i as usize,
_ => usize::MAX,
};
let mut values_iter = self.values.iter().peekable();
let mut line_names_iter = self.line_names.iter().peekable();
for idx in 0.. {
let names = line_names_iter.next().unwrap(); // This should exist!
concat_serialize_idents("[", "]", names, " ", dest)?;
match self.auto_repeat {
Some(ref repeat) if idx == auto_idx => {
if !names.is_empty() {
dest.write_str(" ")?;
}
repeat.to_css(dest)?;
},
_ => match values_iter.next() {
Some(value) => {
if !names.is_empty() {
dest.write_str(" ")?;
}
value.to_css(dest)?;
},
None => break,
},
}
if values_iter.peek().is_some() || line_names_iter.peek().map_or(false, |v| !v.is_empty()) {
dest.write_str(" ")?;
}
}
Ok(())
}
}
impl HasViewportPercentage for TrackList<TrackSizeOrRepeat> {
#[inline]
fn has_viewport_percentage(&self) -> bool {
self.values.iter().any(|ref v| v.has_viewport_percentage())
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Layout for elements with a CSS `display` property of `flex`.
#![deny(unsafe_code)]
use app_units::{Au, MAX_AU};
use block::BlockFlow;
use context::{LayoutContext, SharedLayoutContext};
use display_list_builder::{DisplayListBuildState, FlexFlowDisplayListBuilding};
use euclid::Point2D;
use floats::FloatKind;
use flow;
use flow::{Flow, FlowClass, ImmutableFlowUtils, OpaqueFlow};
use flow::{INLINE_POSITION_IS_STATIC, IS_ABSOLUTELY_POSITIONED};
use fragment::{Fragment, FragmentBorderBoxIterator, Overflow};
use gfx::display_list::StackingContext;
use layout_debug;
use model::{Direction, IntrinsicISizes, MaybeAuto, MinMaxConstraint};
use model::{specified, specified_or_none};
use script_layout_interface::restyle_damage::{REFLOW, REFLOW_OUT_OF_FLOW};
use std::cmp::{max, min};
use std::ops::Range;
use std::sync::Arc;
use style::computed_values::{align_content, align_self, flex_direction, flex_wrap, justify_content};
use style::computed_values::border_collapse;
use style::context::{SharedStyleContext, StyleContext};
use style::logical_geometry::LogicalSize;
use style::properties::ServoComputedValues;
use style::values::computed::{LengthOrPercentage, LengthOrPercentageOrAuto};
use style::values::computed::{LengthOrPercentageOrAutoOrContent, LengthOrPercentageOrNone};
/// The size of an axis. May be a specified size, a min/max
/// constraint, or an unlimited size
#[derive(Debug)]
enum AxisSize {
Definite(Au),
MinMax(MinMaxConstraint),
Infinite,
}
impl AxisSize {
/// Generate a new available cross or main axis size from the specified size of the container,
/// containing block size, min constraint, and max constraint
pub fn new(size: LengthOrPercentageOrAuto, content_size: Option<Au>, min: LengthOrPercentage,
max: LengthOrPercentageOrNone) -> AxisSize {
match size {
LengthOrPercentageOrAuto::Length(length) => AxisSize::Definite(length),
LengthOrPercentageOrAuto::Percentage(percent) => {
match content_size {
Some(size) => AxisSize::Definite(size.scale_by(percent)),
None => AxisSize::Infinite
}
}
LengthOrPercentageOrAuto::Calc(calc) => {
match content_size {
Some(size) => AxisSize::Definite(size.scale_by(calc.percentage())),
None => AxisSize::Infinite
}
}
LengthOrPercentageOrAuto::Auto => {
AxisSize::MinMax(MinMaxConstraint::new(content_size, min, max))
}
}
}
}
/// This function accepts the flex-basis and the size property in main direction from style,
/// and the container size, then return the used value of flex basis. it can be used to help
/// determining the flex base size and to indicate whether the main size of the item
/// is definite after flex size resolving.
fn from_flex_basis(flex_basis: LengthOrPercentageOrAutoOrContent,
main_length: LengthOrPercentageOrAuto,
containing_length: Option<Au>) -> MaybeAuto {
match (flex_basis, containing_length) {
(LengthOrPercentageOrAutoOrContent::Length(length), _) =>
MaybeAuto::Specified(length),
(LengthOrPercentageOrAutoOrContent::Percentage(percent), Some(size)) =>
MaybeAuto::Specified(size.scale_by(percent)),
(LengthOrPercentageOrAutoOrContent::Percentage(_), None) =>
MaybeAuto::Auto,
(LengthOrPercentageOrAutoOrContent::Calc(calc), Some(size)) =>
MaybeAuto::Specified(calc.length() + size.scale_by(calc.percentage())),
(LengthOrPercentageOrAutoOrContent::Calc(_), None) =>
MaybeAuto::Auto,
(LengthOrPercentageOrAutoOrContent::Content, _) =>
MaybeAuto::Auto,
(LengthOrPercentageOrAutoOrContent::Auto, Some(size)) =>
MaybeAuto::from_style(main_length, size),
(LengthOrPercentageOrAutoOrContent::Auto, None) => {
if let LengthOrPercentageOrAuto::Length(length) = main_length {
MaybeAuto::Specified(length)
} else {
MaybeAuto::Auto
}
}
}
}
/// Represents a child in a flex container. Most fields here are used in
/// flex size resolving, and items are sorted by the 'order' property.
#[derive(Debug)]
struct FlexItem {
/// Main size of a flex item, used to store results of flexible length calcuation.
pub main_size: Au,
/// Used flex base size.
pub base_size: Au,
/// The minimal size in main direction.
pub min_size: Au,
/// The maximal main size. If this property is not actually set by style
/// It will be the largest size available for code reuse.
pub max_size: Au,
/// The index of the actual flow in our child list.
pub index: usize,
/// The 'flex-grow' property of this item.
pub flex_grow: f32,
/// The 'flex-shrink' property of this item.
pub flex_shrink: f32,
/// The 'order' property of this item.
pub order: i32,
/// Whether the main size has met its constraint.
pub is_frozen: bool,
/// True if this flow has property 'visibility::collapse'.
pub is_strut: bool
}
impl FlexItem {
pub fn new(index: usize, flow: &Flow) -> FlexItem {
let style = &flow.as_block().fragment.style;
let flex_grow = style.get_position().flex_grow;
let flex_shrink = style.get_position().flex_shrink;
let order = style.get_position().order;
// TODO(stshine): for item with 'visibility:collapse', set is_strut to true.
FlexItem {
main_size: Au(0),
base_size: Au(0),
min_size: Au(0),
max_size: MAX_AU,
index: index,
flex_grow: flex_grow,
flex_shrink: flex_shrink,
order: order,
is_frozen: false,
is_strut: false
}
}
/// Initialize the used flex base size, minimal main size and maximal main size.
/// For block mode container this method should be called in assign_block_size()
/// pass so that the item has already been layouted.
pub fn init_sizes(&mut self, flow: &mut Flow, containing_length: Au, direction: Direction) {
let block = flow.as_mut_block();
match direction {
// TODO(stshine): the definition of min-{width, height} in style component
// should change to LengthOrPercentageOrAuto for automatic implied minimal size.
// https://drafts.csswg.org/css-flexbox-1/#min-size-auto
Direction::Inline => {
let basis = from_flex_basis(block.fragment.style.get_position().flex_basis,
block.fragment.style.content_inline_size(),
Some(containing_length));
// These methods compute auto margins to zero length, which is exactly what we want.
block.fragment.compute_border_and_padding(containing_length,
border_collapse::T::separate);
block.fragment.compute_inline_direction_margins(containing_length);
block.fragment.compute_block_direction_margins(containing_length);
let (border_padding, margin) = block.fragment.surrounding_intrinsic_inline_size();
let content_size = block.base.intrinsic_inline_sizes.preferred_inline_size
- border_padding
- margin
+ block.fragment.box_sizing_boundary(direction);
self.base_size = basis.specified_or_default(content_size);
self.max_size = specified_or_none(block.fragment.style.max_inline_size(),
containing_length).unwrap_or(MAX_AU);
self.min_size = specified(block.fragment.style.min_inline_size(),
containing_length);
}
Direction::Block => {
let basis = from_flex_basis(block.fragment.style.get_position().flex_basis,
block.fragment.style.content_block_size(),
Some(containing_length));
let content_size = block.fragment.border_box.size.block
- block.fragment.border_padding.block_start_end()
+ block.fragment.box_sizing_boundary(direction);
self.base_size = basis.specified_or_default(content_size);
self.max_size = specified_or_none(block.fragment.style.max_block_size(),
containing_length).unwrap_or(MAX_AU);
self.min_size = specified(block.fragment.style.min_block_size(),
containing_length);
}
}
}
/// Returns the outer main size of the item, including paddings and margins,
/// clamped by max and min size.
pub fn outer_main_size(&self, flow: &Flow, direction: Direction) -> Au {
let ref fragment = flow.as_block().fragment;
let outer_width = match direction {
Direction::Inline => {
fragment.border_padding.inline_start_end() + fragment.margin.inline_start_end()
}
Direction::Block => {
fragment.border_padding.block_start_end() + fragment.margin.block_start_end()
}
};
max(self.min_size, min(self.base_size, self.max_size))
- fragment.box_sizing_boundary(direction) + outer_width
}
/// Returns the number of auto margins in given direction.
pub fn auto_margin_count(&self, flow: &Flow, direction: Direction) -> i32 {
let margin = flow.as_block().fragment.style.logical_margin();
let mut margin_count = 0;
match direction {
Direction::Inline => {
if margin.inline_start == LengthOrPercentageOrAuto::Auto {
margin_count += 1;
}
if margin.inline_end == LengthOrPercentageOrAuto::Auto {
margin_count += 1;
}
}
Direction::Block => {
if margin.block_start == LengthOrPercentageOrAuto::Auto {
margin_count += 1;
}
if margin.block_end == LengthOrPercentageOrAuto::Auto {
margin_count += 1;
}
}
}
margin_count
}
}
/// A line in a flex container.
// TODO(stshine): More fields are required to handle collapsed items and baseline alignment.
#[derive(Debug)]
struct FlexLine {
/// Range of items belong to this line in 'self.items'.
pub range: Range<usize>,
/// Remaining free space of this line, items will grow or shrink based on it being positive or negative.
pub free_space: Au,
/// The number of auto margins of items.
pub auto_margin_count: i32,
/// Line size in the block direction.
pub cross_size: Au,
}
impl FlexLine {
pub fn new(range: Range<usize>, free_space: Au, auto_margin_count: i32) -> FlexLine {
FlexLine {
range: range,
auto_margin_count: auto_margin_count,
free_space: free_space,
cross_size: Au(0)
}
}
/// This method implements the flexible lengths resolving algorithm.
/// The 'collapse' parameter is used to indicate whether items with 'visibility: collapse'
/// is included in length resolving. The result main size is stored in 'item.main_size'.
/// https://drafts.csswg.org/css-flexbox/#resolve-flexible-lengths
pub fn flex_resolve(&mut self, items: &mut [FlexItem], collapse: bool) {
let mut total_grow = 0.0;
let mut total_shrink = 0.0;
let mut total_scaled = 0.0;
let mut active_count = 0;
// Iterate through items, collect total factors and freeze those that have already met
// their constraints or won't grow/shrink in corresponding scenario.
// https://drafts.csswg.org/css-flexbox/#resolve-flexible-lengths
for item in items.iter_mut().filter(|i| !(i.is_strut && collapse)) {
item.main_size = max(item.min_size, min(item.base_size, item.max_size));
if (self.free_space > Au(0) && (item.flex_grow == 0.0 || item.base_size >= item.max_size)) ||
(self.free_space < Au(0) && (item.flex_shrink == 0.0 || item.base_size <= item.min_size)) {
item.is_frozen = true;
} else {
item.is_frozen = false;
total_grow += item.flex_grow;
total_shrink += item.flex_shrink;
// The scaled factor is used to calculate flex shrink
total_scaled += item.flex_shrink * item.base_size.0 as f32;
active_count += 1;
}
}
let initial_free_space = self.free_space;
let mut total_variation = Au(1);
// If there is no remaining free space or all items are frozen, stop loop.
while total_variation != Au(0) && self.free_space != Au(0) && active_count > 0 {
self.free_space =
// https://drafts.csswg.org/css-flexbox/#remaining-free-space
if self.free_space > Au(0) {
min(initial_free_space.scale_by(total_grow), self.free_space)
} else {
max(initial_free_space.scale_by(total_shrink), self.free_space)
};
total_variation = Au(0);
for item in items.iter_mut().filter(|i| !i.is_frozen).filter(|i| !(i.is_strut && collapse)) {
// Use this and the 'abs()' below to make the code work in both grow and shrink scenarios.
let (factor, end_size) = if self.free_space > Au(0) {
(item.flex_grow / total_grow, item.max_size)
} else {
(item.flex_shrink * item.base_size.0 as f32 / total_scaled, item.min_size)
};
let variation = self.free_space.scale_by(factor);
if variation.0.abs() >= (end_size - item.main_size).0.abs() {
// Use constraint as the target main size, and freeze item.
total_variation += end_size - item.main_size;
item.main_size = end_size;
item.is_frozen = true;
active_count -= 1;
total_shrink -= item.flex_shrink;
total_grow -= item.flex_grow;
total_scaled -= item.flex_shrink * item.base_size.0 as f32;
} else {
total_variation += variation;
item.main_size += variation;
}
}
self.free_space -= total_variation;
}
}
}
/// A block with the CSS `display` property equal to `flex`.
#[derive(Debug)]
pub struct FlexFlow {
/// Data common to all block flows.
block_flow: BlockFlow,
/// The logical axis which the main axis will be parallel with.
/// The cross axis will be parallel with the opposite logical axis.
main_mode: Direction,
/// The available main axis size
available_main_size: AxisSize,
/// The available cross axis size
available_cross_size: AxisSize,
/// List of flex lines in the container.
lines: Vec<FlexLine>,
/// List of flex-items that belong to this flex-container
items: Vec<FlexItem>,
/// True if the flex-direction is *-reversed
main_reverse: bool,
/// True if this flex container can be multiline.
is_wrappable: bool,
/// True if the cross direction is reversed.
cross_reverse: bool
}
impl FlexFlow {
pub fn from_fragment(fragment: Fragment,
flotation: Option<FloatKind>)
-> FlexFlow {
let main_mode;
let main_reverse;
let is_wrappable;
let cross_reverse;
{
let style = fragment.style();
let (mode, reverse) = match style.get_position().flex_direction {
flex_direction::T::row => (Direction::Inline, false),
flex_direction::T::row_reverse => (Direction::Inline, true),
flex_direction::T::column => (Direction::Block, false),
flex_direction::T::column_reverse => (Direction::Block, true),
};
main_mode = mode;
main_reverse =
reverse == style.writing_mode.is_bidi_ltr();
let (wrappable, reverse) = match fragment.style.get_position().flex_wrap {
flex_wrap::T::nowrap => (false, false),
flex_wrap::T::wrap => (true, false),
flex_wrap::T::wrap_reverse => (true, true),
};
is_wrappable = wrappable;
// TODO(stshine): Handle vertical writing mode.
cross_reverse = reverse;
}
FlexFlow {
block_flow: BlockFlow::from_fragment_and_float_kind(fragment, flotation),
main_mode: main_mode,
available_main_size: AxisSize::Infinite,
available_cross_size: AxisSize::Infinite,
lines: Vec::new(),
items: Vec::new(),
main_reverse: main_reverse,
is_wrappable: is_wrappable,
cross_reverse: cross_reverse
}
}
/// Returns a line start after the last item that is already in a line.
/// Note that when the container main size is infinite(i.e. A column flexbox with auto height),
/// we do not need to do flex resolving and this can be considered as a fast-path, so the
/// 'container_size' param does not need to be 'None'. A line has to contain at least one item;
/// (except this) if the container can be multi-line the sum of outer main size of items should
/// be less than the container size; a line should be filled by items as much as possible.
/// After been collected in a line a item should have its main sizes initialized.
fn get_flex_line(&mut self, container_size: Au) -> Option<FlexLine> {
let start = self.lines.last().map(|line| line.range.end).unwrap_or(0);
if start == self.items.len() {
return None;
}
let mut end = start;
let mut total_line_size = Au(0);
let mut margin_count = 0;
let items = &mut self.items[start..];
let mut children = self.block_flow.base.children.random_access_mut();
for mut item in items {
let kid = children.get(item.index);
item.init_sizes(kid, container_size, self.main_mode);
let outer_main_size = item.outer_main_size(kid, self.main_mode);
if total_line_size + outer_main_size > container_size && end != start && self.is_wrappable {
break;
}
margin_count += item.auto_margin_count(kid, self.main_mode);
total_line_size += outer_main_size;
end += 1;
}
let line = FlexLine::new(start..end, container_size - total_line_size, margin_count);
Some(line)
}
// TODO(zentner): This function should use flex-basis.
// Currently, this is the core of BlockFlow::bubble_inline_sizes() with all float logic
// stripped out, and max replaced with union_nonbreaking_inline.
fn inline_mode_bubble_inline_sizes(&mut self) {
let fixed_width = match self.block_flow.fragment.style().get_position().width {
LengthOrPercentageOrAuto::Length(_) => true,
_ => false,
};
let mut computation = self.block_flow.fragment.compute_intrinsic_inline_sizes();
if !fixed_width {
for kid in self.block_flow.base.children.iter_mut() {
let base = flow::mut_base(kid);
let is_absolutely_positioned = base.flags.contains(IS_ABSOLUTELY_POSITIONED);
if !is_absolutely_positioned {
let flex_item_inline_sizes = IntrinsicISizes {
minimum_inline_size: base.intrinsic_inline_sizes.minimum_inline_size,
preferred_inline_size: base.intrinsic_inline_sizes.preferred_inline_size,
};
computation.union_nonbreaking_inline(&flex_item_inline_sizes);
}
}
}
self.block_flow.base.intrinsic_inline_sizes = computation.finish();
}
// TODO(zentner): This function should use flex-basis.
// Currently, this is the core of BlockFlow::bubble_inline_sizes() with all float logic
// stripped out.
fn block_mode_bubble_inline_sizes(&mut self) {
let fixed_width = match self.block_flow.fragment.style().get_position().width {
LengthOrPercentageOrAuto::Length(_) => true,
_ => false,
};
let mut computation = self.block_flow.fragment.compute_intrinsic_inline_sizes();
if !fixed_width {
for kid in self.block_flow.base.children.iter_mut() {
let base = flow::mut_base(kid);
let is_absolutely_positioned = base.flags.contains(IS_ABSOLUTELY_POSITIONED);
if !is_absolutely_positioned {
computation.content_intrinsic_sizes.minimum_inline_size =
max(computation.content_intrinsic_sizes.minimum_inline_size,
base.intrinsic_inline_sizes.minimum_inline_size);
computation.content_intrinsic_sizes.preferred_inline_size =
max(computation.content_intrinsic_sizes.preferred_inline_size,
base.intrinsic_inline_sizes.preferred_inline_size);
}
}
}
self.block_flow.base.intrinsic_inline_sizes = computation.finish();
}
// TODO(zentner): This function needs to be radically different for multi-line flexbox.
// Currently, this is the core of BlockFlow::propagate_assigned_inline_size_to_children() with
// all float and table logic stripped out.
fn block_mode_assign_inline_sizes(&mut self,
_shared_context: &SharedStyleContext,
inline_start_content_edge: Au,
inline_end_content_edge: Au,
content_inline_size: Au) {
let _scope = layout_debug_scope!("flex::block_mode_assign_inline_sizes");
debug!("block_mode_assign_inline_sizes");
// FIXME (mbrubeck): Get correct mode for absolute containing block
let containing_block_mode = self.block_flow.base.writing_mode;
let container_block_size = match self.available_main_size {
AxisSize::Definite(length) => Some(length),
_ => None
};
let container_inline_size = match self.available_cross_size {
AxisSize::Definite(length) => length,
AxisSize::MinMax(ref constraint) => constraint.clamp(content_inline_size),
AxisSize::Infinite => content_inline_size
};
let mut children = self.block_flow.base.children.random_access_mut();
for kid in &mut self.items {
let kid_base = flow::mut_base(children.get(kid.index));
kid_base.block_container_explicit_block_size = container_block_size;
if kid_base.flags.contains(INLINE_POSITION_IS_STATIC) {
// The inline-start margin edge of the child flow is at our inline-start content
// edge, and its inline-size is our content inline-size.
kid_base.position.start.i =
if kid_base.writing_mode.is_bidi_ltr() == containing_block_mode.is_bidi_ltr() {
inline_start_content_edge
} else {
// The kid's inline 'start' is at the parent's 'end'
inline_end_content_edge
};
}
kid_base.block_container_inline_size = container_inline_size;
kid_base.block_container_writing_mode = containing_block_mode;
kid_base.position.start.i = inline_start_content_edge;
}
}
fn inline_mode_assign_inline_sizes(&mut self,
_shared_context: &SharedStyleContext,
inline_start_content_edge: Au,
_inline_end_content_edge: Au,
content_inline_size: Au) {
let _scope = layout_debug_scope!("flex::inline_mode_assign_inline_sizes");
debug!("inline_mode_assign_inline_sizes");
debug!("content_inline_size = {:?}", content_inline_size);
let child_count = ImmutableFlowUtils::child_count(self as &Flow) as i32;
debug!("child_count = {:?}", child_count);
if child_count == 0 {
return;
}
let inline_size = match self.available_main_size {
AxisSize::Definite(length) => length,
AxisSize::MinMax(ref constraint) => constraint.clamp(content_inline_size),
AxisSize::Infinite => content_inline_size,
};
let container_mode = self.block_flow.base.block_container_writing_mode;
self.block_flow.base.position.size.inline = inline_size;
// Calculate non-auto block size to pass to children.
let box_border = self.block_flow.fragment.box_sizing_boundary(Direction::Block);
let parent_container_size = self.block_flow.explicit_block_containing_size(_shared_context);
// https://drafts.csswg.org/css-ui-3/#box-sizing
let explicit_content_size = self
.block_flow
.explicit_block_size(parent_container_size)
.map(|x| max(x - box_border, Au(0)));
let containing_block_text_align =
self.block_flow.fragment.style().get_inheritedtext().text_align;
while let Some(mut line) = self.get_flex_line(inline_size) {
let items = &mut self.items[line.range.clone()];
line.flex_resolve(items, false);
// TODO(stshine): if this flex line contain children that have
// property visibility:collapse, exclude them and resolve again.
let item_count = items.len() as i32;
let mut cur_i = inline_start_content_edge;
let item_interval = if line.free_space >= Au(0) && line.auto_margin_count == 0 {
match self.block_flow.fragment.style().get_position().justify_content {
justify_content::T::space_between => {
if item_count == 1 {
Au(0)
} else {
line.free_space / (item_count - 1)
}
}
justify_content::T::space_around => {
line.free_space / item_count
}
_ => Au(0),
}
} else {
Au(0)
};
match self.block_flow.fragment.style().get_position().justify_content {
// Overflow equally in both ends of line.
justify_content::T::center | justify_content::T::space_around => {
cur_i += (line.free_space - item_interval * (item_count - 1)) / 2;
}
justify_content::T::flex_end => {
cur_i += line.free_space;
}
_ => {}
}
let mut children = self.block_flow.base.children.random_access_mut();
for item in items.iter_mut() {
let mut block = children.get(item.index).as_mut_block();
block.base.block_container_writing_mode = container_mode;
block.base.block_container_inline_size = inline_size;
block.base.block_container_explicit_block_size = explicit_content_size;
// Per CSS 2.1 § 16.3.1, text alignment propagates to all children in flow.
//
// TODO(#2265, pcwalton): Do this in the cascade instead.
block.base.flags.set_text_align(containing_block_text_align);
// FIXME(stshine): should this be done during construction?
block.mark_as_flex();
let margin = block.fragment.style().logical_margin();
let auto_len =
if line.auto_margin_count == 0 || line.free_space <= Au(0) {
Au(0)
} else {
line.free_space / line.auto_margin_count
};
let margin_inline_start = MaybeAuto::from_style(margin.inline_start, inline_size)
.specified_or_default(auto_len);
let margin_inline_end = MaybeAuto::from_style(margin.inline_end, inline_size)
.specified_or_default(auto_len);
let item_inline_size = item.main_size
- block.fragment.box_sizing_boundary(self.main_mode)
+ block.fragment.border_padding.inline_start_end();
let item_outer_size = item_inline_size + block.fragment.margin.inline_start_end();
block.fragment.margin.inline_start = margin_inline_start;
block.fragment.margin.inline_end = margin_inline_end;
block.fragment.border_box.start.i = margin_inline_start;
block.fragment.border_box.size.inline = item_inline_size;
block.base.position.start.i = if !self.main_reverse {
cur_i
} else {
inline_start_content_edge * 2 + content_inline_size - cur_i - item_outer_size
};
block.base.position.size.inline = item_outer_size;
cur_i += item_outer_size + item_interval;
}
self.lines.push(line);
}
}
// TODO(zentner): This function should actually flex elements!
fn block_mode_assign_block_size<'a>(&mut self, layout_context: &'a LayoutContext<'a>) {
let mut cur_b = if !self.main_reverse {
self.block_flow.fragment.border_padding.block_start
} else {
self.block_flow.fragment.border_box.size.block
};
let mut children = self.block_flow.base.children.random_access_mut();
for item in &mut self.items {
let mut base = flow::mut_base(children.get(item.index));
if !self.main_reverse {
base.position.start.b = cur_b;
cur_b = cur_b + base.position.size.block;
} else {
cur_b = cur_b - base.position.size.block;
base.position.start.b = cur_b;
}
}
self.block_flow.assign_block_size(layout_context)
}
fn inline_mode_assign_block_size<'a>(&mut self, layout_context: &'a LayoutContext<'a>) {
let _scope = layout_debug_scope!("flex::inline_mode_assign_block_size");
let line_count = self.lines.len() as i32;
let line_align = self.block_flow.fragment.style().get_position().align_content;
let mut cur_b = self.block_flow.fragment.border_padding.block_start;
let mut total_cross_size = Au(0);
let mut line_interval = Au(0);
{
let mut children = self.block_flow.base.children.random_access_mut();
for line in self.lines.iter_mut() {
for item in &self.items[line.range.clone()] {
let fragment = &children.get(item.index).as_block().fragment;
line.cross_size = max(line.cross_size,
fragment.border_box.size.block +
fragment.margin.block_start_end());
}
total_cross_size += line.cross_size;
}
}
let box_border = self.block_flow.fragment.box_sizing_boundary(Direction::Block);
let parent_container_size =
self.block_flow.explicit_block_containing_size(layout_context.shared_context());
// https://drafts.csswg.org/css-ui-3/#box-sizing
let explicit_content_size = self
.block_flow
.explicit_block_size(parent_container_size)
.map(|x| max(x - box_border, Au(0)));
if let Some(container_block_size) = explicit_content_size {
let free_space = container_block_size - total_cross_size;
total_cross_size = container_block_size;
if line_align == align_content::T::stretch && free_space > Au(0) {
for line in self.lines.iter_mut() {
line.cross_size += free_space / line_count;
}
}
line_interval = match line_align {
align_content::T::space_between => {
if line_count == 1 {
Au(0)
} else {
free_space / (line_count - 1)
}
}
align_content::T::space_around => {
free_space / line_count
}
_ => Au(0),
};
match line_align {
align_content::T::center | align_content::T::space_around => {
cur_b += (free_space - line_interval * (line_count - 1)) / 2;
}
align_content::T::flex_end => {
cur_b += free_space;
}
_ => {}
}
}
let mut children = self.block_flow.base.children.random_access_mut();
for line in &self.lines {
for item in self.items[line.range.clone()].iter_mut() {
let block = children.get(item.index).as_mut_block();
let auto_margin_count = item.auto_margin_count(block, Direction::Block);
let margin = block.fragment.style().logical_margin();
let mut margin_block_start = block.fragment.margin.block_start;
let mut margin_block_end = block.fragment.margin.block_end;
let mut free_space = line.cross_size - block.base.position.size.block
- block.fragment.margin.block_start_end();
// The spec is a little vague here, but if I understand it correctly, the outer
// cross size of item should equal to the line size if any auto margin exists.
// https://drafts.csswg.org/css-flexbox/#algo-cross-margins
if auto_margin_count > 0 {
if margin.block_start == LengthOrPercentageOrAuto::Auto {
margin_block_start = if free_space < Au(0) {
Au(0)
} else {
free_space / auto_margin_count
};
}
margin_block_end = line.cross_size - margin_block_start - block.base.position.size.block;
free_space = Au(0);
}
let self_align = block.fragment.style().get_position().align_self;
if self_align == align_self::T::stretch &&
block.fragment.style().content_block_size() == LengthOrPercentageOrAuto::Auto {
free_space = Au(0);
block.base.block_container_explicit_block_size = Some(line.cross_size);
block.base.position.size.block =
line.cross_size - margin_block_start - margin_block_end;
block.fragment.border_box.size.block = block.base.position.size.block;
// FIXME(stshine): item with 'align-self: stretch' and auto cross size should act
// as if it has a fixed cross size, all child blocks should resolve against it.
// block.assign_block_size(layout_context);
}
block.base.position.start.b = margin_block_start +
if !self.cross_reverse {
cur_b
} else {
self.block_flow.fragment.border_padding.block_start * 2
+ total_cross_size - cur_b - line.cross_size
};
// TODO(stshine): support baseline alignment.
if free_space != Au(0) {
let flex_cross = match self_align {
align_self::T::flex_end => free_space,
align_self::T::center => free_space / 2,
_ => Au(0),
};
block.base.position.start.b +=
if !self.cross_reverse {
flex_cross
} else {
free_space - flex_cross
};
}
}
cur_b += line_interval + line.cross_size;
}
let total_block_size = total_cross_size + self.block_flow.fragment.border_padding.block_start_end();
self.block_flow.fragment.border_box.size.block = total_block_size;
self.block_flow.base.position.size.block = total_block_size;
}
}
impl Flow for FlexFlow {
fn class(&self) -> FlowClass {
FlowClass::Flex
}
fn as_block(&self) -> &BlockFlow {
&self.block_flow
}
fn as_mut_block(&mut self) -> &mut BlockFlow {
&mut self.block_flow
}
fn mark_as_root(&mut self) {
self.block_flow.mark_as_root();
}
fn bubble_inline_sizes(&mut self) {
let _scope = layout_debug_scope!("flex::bubble_inline_sizes {:x}",
self.block_flow.base.debug_id());
// Flexbox Section 9.0: Generate anonymous flex items:
// This part was handled in the flow constructor.
// Flexbox Section 9.1: Re-order flex items according to their order.
// FIXME(stshine): This should be done during flow construction.
let mut items: Vec<FlexItem> =
self.block_flow
.base
.children
.iter()
.enumerate()
.filter(|&(_, flow)| {
!flow.as_block().base.flags.contains(IS_ABSOLUTELY_POSITIONED)
})
.map(|(index, flow)| FlexItem::new(index, flow))
.collect();
items.sort_by_key(|item| item.order);
self.items = items;
match self.main_mode {
Direction::Inline => self.inline_mode_bubble_inline_sizes(),
Direction::Block => self.block_mode_bubble_inline_sizes()
}
}
fn assign_inline_sizes(&mut self, shared_context: &SharedStyleContext) {
let _scope = layout_debug_scope!("flex::assign_inline_sizes {:x}", self.block_flow.base.debug_id());
debug!("assign_inline_sizes");
if !self.block_flow.base.restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW) {
return
}
// Our inline-size was set to the inline-size of the containing block by the flow's parent.
// Now compute the real value.
let containing_block_inline_size = self.block_flow.base.block_container_inline_size;
self.block_flow.compute_used_inline_size(shared_context, containing_block_inline_size);
if self.block_flow.base.flags.is_float() {
self.block_flow.float.as_mut().unwrap().containing_inline_size = containing_block_inline_size
}
let (available_block_size, available_inline_size) = {
let style = &self.block_flow.fragment.style;
let (specified_block_size, specified_inline_size) = if style.writing_mode.is_vertical() {
(style.get_position().width, style.get_position().height)
} else {
(style.get_position().height, style.get_position().width)
};
let available_inline_size = AxisSize::new(specified_inline_size,
Some(self.block_flow.base.block_container_inline_size),
style.min_inline_size(),
style.max_inline_size());
let available_block_size = AxisSize::new(specified_block_size,
self.block_flow.base.block_container_explicit_block_size,
style.min_block_size(),
style.max_block_size());
(available_block_size, available_inline_size)
};
// Move in from the inline-start border edge.
let inline_start_content_edge = self.block_flow.fragment.border_box.start.i +
self.block_flow.fragment.border_padding.inline_start;
debug!("inline_start_content_edge = {:?}", inline_start_content_edge);
let padding_and_borders = self.block_flow.fragment.border_padding.inline_start_end();
// Distance from the inline-end margin edge to the inline-end content edge.
let inline_end_content_edge =
self.block_flow.fragment.margin.inline_end +
self.block_flow.fragment.border_padding.inline_end;
debug!("padding_and_borders = {:?}", padding_and_borders);
debug!("self.block_flow.fragment.border_box.size.inline = {:?}",
self.block_flow.fragment.border_box.size.inline);
let content_inline_size = self.block_flow.fragment.border_box.size.inline - padding_and_borders;
match self.main_mode {
Direction::Inline => {
self.available_main_size = available_inline_size;
self.available_cross_size = available_block_size;
self.inline_mode_assign_inline_sizes(shared_context,
inline_start_content_edge,
inline_end_content_edge,
content_inline_size)
}
Direction::Block => {
self.available_main_size = available_block_size;
self.available_cross_size = available_inline_size;
self.block_mode_assign_inline_sizes(shared_context,
inline_start_content_edge,
inline_end_content_edge,
content_inline_size)
}
}
}
fn assign_block_size<'a>(&mut self, layout_context: &'a LayoutContext<'a>) {
self.block_flow.assign_block_size(layout_context);
match self.main_mode {
Direction::Inline => self.inline_mode_assign_block_size(layout_context),
Direction::Block => self.block_mode_assign_block_size(),
}
}
fn compute_absolute_position(&mut self, layout_context: &SharedLayoutContext) {
self.block_flow.compute_absolute_position(layout_context)
}
fn place_float_if_applicable<'a>(&mut self) {
self.block_flow.place_float_if_applicable()
}
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au) {
self.block_flow.update_late_computed_inline_position_if_necessary(inline_position)
}
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au) {
self.block_flow.update_late_computed_block_position_if_necessary(block_position)
}
fn build_display_list(&mut self, state: &mut DisplayListBuildState) {
self.build_display_list_for_flex(state);
}
fn collect_stacking_contexts(&mut self, parent: &mut StackingContext) {
self.block_flow.collect_stacking_contexts(parent);
}
fn repair_style(&mut self, new_style: &Arc<ServoComputedValues>) {
self.block_flow.repair_style(new_style)
}
fn compute_overflow(&self) -> Overflow {
self.block_flow.compute_overflow()
}
fn generated_containing_block_size(&self, flow: OpaqueFlow) -> LogicalSize<Au> {
self.block_flow.generated_containing_block_size(flow)
}
fn iterate_through_fragment_border_boxes(&self,
iterator: &mut FragmentBorderBoxIterator,
level: i32,
stacking_context_position: &Point2D<Au>) {
self.block_flow.iterate_through_fragment_border_boxes(iterator, level, stacking_context_position);
}
fn mutate_fragments(&mut self, mutator: &mut FnMut(&mut Fragment)) {
self.block_flow.mutate_fragments(mutator);
}
}
layout: Don't use `BlockFlow::assign_block_size()` when assigning flex
flows' block sizes.
This ensures that we never collapse margins for flex flows.
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Layout for elements with a CSS `display` property of `flex`.
#![deny(unsafe_code)]
use app_units::{Au, MAX_AU};
use block::{BlockFlow, MarginsMayCollapseFlag};
use context::{LayoutContext, SharedLayoutContext};
use display_list_builder::{DisplayListBuildState, FlexFlowDisplayListBuilding};
use euclid::Point2D;
use floats::FloatKind;
use flow;
use flow::{Flow, FlowClass, ImmutableFlowUtils, OpaqueFlow};
use flow::{INLINE_POSITION_IS_STATIC, IS_ABSOLUTELY_POSITIONED};
use fragment::{Fragment, FragmentBorderBoxIterator, Overflow};
use gfx::display_list::StackingContext;
use layout_debug;
use model::{Direction, IntrinsicISizes, MaybeAuto, MinMaxConstraint};
use model::{specified, specified_or_none};
use script_layout_interface::restyle_damage::{REFLOW, REFLOW_OUT_OF_FLOW};
use std::cmp::{max, min};
use std::ops::Range;
use std::sync::Arc;
use style::computed_values::{align_content, align_self, flex_direction, flex_wrap, justify_content};
use style::computed_values::border_collapse;
use style::context::{SharedStyleContext, StyleContext};
use style::logical_geometry::LogicalSize;
use style::properties::ServoComputedValues;
use style::values::computed::{LengthOrPercentage, LengthOrPercentageOrAuto};
use style::values::computed::{LengthOrPercentageOrAutoOrContent, LengthOrPercentageOrNone};
/// The size of an axis. May be a specified size, a min/max
/// constraint, or an unlimited size
#[derive(Debug)]
enum AxisSize {
Definite(Au),
MinMax(MinMaxConstraint),
Infinite,
}
impl AxisSize {
/// Generate a new available cross or main axis size from the specified size of the container,
/// containing block size, min constraint, and max constraint
pub fn new(size: LengthOrPercentageOrAuto, content_size: Option<Au>, min: LengthOrPercentage,
max: LengthOrPercentageOrNone) -> AxisSize {
match size {
LengthOrPercentageOrAuto::Length(length) => AxisSize::Definite(length),
LengthOrPercentageOrAuto::Percentage(percent) => {
match content_size {
Some(size) => AxisSize::Definite(size.scale_by(percent)),
None => AxisSize::Infinite
}
}
LengthOrPercentageOrAuto::Calc(calc) => {
match content_size {
Some(size) => AxisSize::Definite(size.scale_by(calc.percentage())),
None => AxisSize::Infinite
}
}
LengthOrPercentageOrAuto::Auto => {
AxisSize::MinMax(MinMaxConstraint::new(content_size, min, max))
}
}
}
}
/// This function accepts the flex-basis and the size property in main direction from style,
/// and the container size, then return the used value of flex basis. it can be used to help
/// determining the flex base size and to indicate whether the main size of the item
/// is definite after flex size resolving.
fn from_flex_basis(flex_basis: LengthOrPercentageOrAutoOrContent,
main_length: LengthOrPercentageOrAuto,
containing_length: Option<Au>) -> MaybeAuto {
match (flex_basis, containing_length) {
(LengthOrPercentageOrAutoOrContent::Length(length), _) =>
MaybeAuto::Specified(length),
(LengthOrPercentageOrAutoOrContent::Percentage(percent), Some(size)) =>
MaybeAuto::Specified(size.scale_by(percent)),
(LengthOrPercentageOrAutoOrContent::Percentage(_), None) =>
MaybeAuto::Auto,
(LengthOrPercentageOrAutoOrContent::Calc(calc), Some(size)) =>
MaybeAuto::Specified(calc.length() + size.scale_by(calc.percentage())),
(LengthOrPercentageOrAutoOrContent::Calc(_), None) =>
MaybeAuto::Auto,
(LengthOrPercentageOrAutoOrContent::Content, _) =>
MaybeAuto::Auto,
(LengthOrPercentageOrAutoOrContent::Auto, Some(size)) =>
MaybeAuto::from_style(main_length, size),
(LengthOrPercentageOrAutoOrContent::Auto, None) => {
if let LengthOrPercentageOrAuto::Length(length) = main_length {
MaybeAuto::Specified(length)
} else {
MaybeAuto::Auto
}
}
}
}
/// Represents a child in a flex container. Most fields here are used in
/// flex size resolving, and items are sorted by the 'order' property.
#[derive(Debug)]
struct FlexItem {
/// Main size of a flex item, used to store results of flexible length calcuation.
pub main_size: Au,
/// Used flex base size.
pub base_size: Au,
/// The minimal size in main direction.
pub min_size: Au,
/// The maximal main size. If this property is not actually set by style
/// It will be the largest size available for code reuse.
pub max_size: Au,
/// The index of the actual flow in our child list.
pub index: usize,
/// The 'flex-grow' property of this item.
pub flex_grow: f32,
/// The 'flex-shrink' property of this item.
pub flex_shrink: f32,
/// The 'order' property of this item.
pub order: i32,
/// Whether the main size has met its constraint.
pub is_frozen: bool,
/// True if this flow has property 'visibility::collapse'.
pub is_strut: bool
}
impl FlexItem {
pub fn new(index: usize, flow: &Flow) -> FlexItem {
let style = &flow.as_block().fragment.style;
let flex_grow = style.get_position().flex_grow;
let flex_shrink = style.get_position().flex_shrink;
let order = style.get_position().order;
// TODO(stshine): for item with 'visibility:collapse', set is_strut to true.
FlexItem {
main_size: Au(0),
base_size: Au(0),
min_size: Au(0),
max_size: MAX_AU,
index: index,
flex_grow: flex_grow,
flex_shrink: flex_shrink,
order: order,
is_frozen: false,
is_strut: false
}
}
/// Initialize the used flex base size, minimal main size and maximal main size.
/// For block mode container this method should be called in assign_block_size()
/// pass so that the item has already been layouted.
pub fn init_sizes(&mut self, flow: &mut Flow, containing_length: Au, direction: Direction) {
let block = flow.as_mut_block();
match direction {
// TODO(stshine): the definition of min-{width, height} in style component
// should change to LengthOrPercentageOrAuto for automatic implied minimal size.
// https://drafts.csswg.org/css-flexbox-1/#min-size-auto
Direction::Inline => {
let basis = from_flex_basis(block.fragment.style.get_position().flex_basis,
block.fragment.style.content_inline_size(),
Some(containing_length));
// These methods compute auto margins to zero length, which is exactly what we want.
block.fragment.compute_border_and_padding(containing_length,
border_collapse::T::separate);
block.fragment.compute_inline_direction_margins(containing_length);
block.fragment.compute_block_direction_margins(containing_length);
let (border_padding, margin) = block.fragment.surrounding_intrinsic_inline_size();
let content_size = block.base.intrinsic_inline_sizes.preferred_inline_size
- border_padding
- margin
+ block.fragment.box_sizing_boundary(direction);
self.base_size = basis.specified_or_default(content_size);
self.max_size = specified_or_none(block.fragment.style.max_inline_size(),
containing_length).unwrap_or(MAX_AU);
self.min_size = specified(block.fragment.style.min_inline_size(),
containing_length);
}
Direction::Block => {
let basis = from_flex_basis(block.fragment.style.get_position().flex_basis,
block.fragment.style.content_block_size(),
Some(containing_length));
let content_size = block.fragment.border_box.size.block
- block.fragment.border_padding.block_start_end()
+ block.fragment.box_sizing_boundary(direction);
self.base_size = basis.specified_or_default(content_size);
self.max_size = specified_or_none(block.fragment.style.max_block_size(),
containing_length).unwrap_or(MAX_AU);
self.min_size = specified(block.fragment.style.min_block_size(),
containing_length);
}
}
}
/// Returns the outer main size of the item, including paddings and margins,
/// clamped by max and min size.
pub fn outer_main_size(&self, flow: &Flow, direction: Direction) -> Au {
let ref fragment = flow.as_block().fragment;
let outer_width = match direction {
Direction::Inline => {
fragment.border_padding.inline_start_end() + fragment.margin.inline_start_end()
}
Direction::Block => {
fragment.border_padding.block_start_end() + fragment.margin.block_start_end()
}
};
max(self.min_size, min(self.base_size, self.max_size))
- fragment.box_sizing_boundary(direction) + outer_width
}
/// Returns the number of auto margins in given direction.
pub fn auto_margin_count(&self, flow: &Flow, direction: Direction) -> i32 {
let margin = flow.as_block().fragment.style.logical_margin();
let mut margin_count = 0;
match direction {
Direction::Inline => {
if margin.inline_start == LengthOrPercentageOrAuto::Auto {
margin_count += 1;
}
if margin.inline_end == LengthOrPercentageOrAuto::Auto {
margin_count += 1;
}
}
Direction::Block => {
if margin.block_start == LengthOrPercentageOrAuto::Auto {
margin_count += 1;
}
if margin.block_end == LengthOrPercentageOrAuto::Auto {
margin_count += 1;
}
}
}
margin_count
}
}
/// A line in a flex container.
// TODO(stshine): More fields are required to handle collapsed items and baseline alignment.
#[derive(Debug)]
struct FlexLine {
/// Range of items belong to this line in 'self.items'.
pub range: Range<usize>,
/// Remaining free space of this line, items will grow or shrink based on it being positive or negative.
pub free_space: Au,
/// The number of auto margins of items.
pub auto_margin_count: i32,
/// Line size in the block direction.
pub cross_size: Au,
}
impl FlexLine {
pub fn new(range: Range<usize>, free_space: Au, auto_margin_count: i32) -> FlexLine {
FlexLine {
range: range,
auto_margin_count: auto_margin_count,
free_space: free_space,
cross_size: Au(0)
}
}
/// This method implements the flexible lengths resolving algorithm.
/// The 'collapse' parameter is used to indicate whether items with 'visibility: collapse'
/// is included in length resolving. The result main size is stored in 'item.main_size'.
/// https://drafts.csswg.org/css-flexbox/#resolve-flexible-lengths
pub fn flex_resolve(&mut self, items: &mut [FlexItem], collapse: bool) {
let mut total_grow = 0.0;
let mut total_shrink = 0.0;
let mut total_scaled = 0.0;
let mut active_count = 0;
// Iterate through items, collect total factors and freeze those that have already met
// their constraints or won't grow/shrink in corresponding scenario.
// https://drafts.csswg.org/css-flexbox/#resolve-flexible-lengths
for item in items.iter_mut().filter(|i| !(i.is_strut && collapse)) {
item.main_size = max(item.min_size, min(item.base_size, item.max_size));
if (self.free_space > Au(0) && (item.flex_grow == 0.0 || item.base_size >= item.max_size)) ||
(self.free_space < Au(0) && (item.flex_shrink == 0.0 || item.base_size <= item.min_size)) {
item.is_frozen = true;
} else {
item.is_frozen = false;
total_grow += item.flex_grow;
total_shrink += item.flex_shrink;
// The scaled factor is used to calculate flex shrink
total_scaled += item.flex_shrink * item.base_size.0 as f32;
active_count += 1;
}
}
let initial_free_space = self.free_space;
let mut total_variation = Au(1);
// If there is no remaining free space or all items are frozen, stop loop.
while total_variation != Au(0) && self.free_space != Au(0) && active_count > 0 {
self.free_space =
// https://drafts.csswg.org/css-flexbox/#remaining-free-space
if self.free_space > Au(0) {
min(initial_free_space.scale_by(total_grow), self.free_space)
} else {
max(initial_free_space.scale_by(total_shrink), self.free_space)
};
total_variation = Au(0);
for item in items.iter_mut().filter(|i| !i.is_frozen).filter(|i| !(i.is_strut && collapse)) {
// Use this and the 'abs()' below to make the code work in both grow and shrink scenarios.
let (factor, end_size) = if self.free_space > Au(0) {
(item.flex_grow / total_grow, item.max_size)
} else {
(item.flex_shrink * item.base_size.0 as f32 / total_scaled, item.min_size)
};
let variation = self.free_space.scale_by(factor);
if variation.0.abs() >= (end_size - item.main_size).0.abs() {
// Use constraint as the target main size, and freeze item.
total_variation += end_size - item.main_size;
item.main_size = end_size;
item.is_frozen = true;
active_count -= 1;
total_shrink -= item.flex_shrink;
total_grow -= item.flex_grow;
total_scaled -= item.flex_shrink * item.base_size.0 as f32;
} else {
total_variation += variation;
item.main_size += variation;
}
}
self.free_space -= total_variation;
}
}
}
/// A block with the CSS `display` property equal to `flex`.
#[derive(Debug)]
pub struct FlexFlow {
/// Data common to all block flows.
block_flow: BlockFlow,
/// The logical axis which the main axis will be parallel with.
/// The cross axis will be parallel with the opposite logical axis.
main_mode: Direction,
/// The available main axis size
available_main_size: AxisSize,
/// The available cross axis size
available_cross_size: AxisSize,
/// List of flex lines in the container.
lines: Vec<FlexLine>,
/// List of flex-items that belong to this flex-container
items: Vec<FlexItem>,
/// True if the flex-direction is *-reversed
main_reverse: bool,
/// True if this flex container can be multiline.
is_wrappable: bool,
/// True if the cross direction is reversed.
cross_reverse: bool
}
impl FlexFlow {
pub fn from_fragment(fragment: Fragment,
flotation: Option<FloatKind>)
-> FlexFlow {
let main_mode;
let main_reverse;
let is_wrappable;
let cross_reverse;
{
let style = fragment.style();
let (mode, reverse) = match style.get_position().flex_direction {
flex_direction::T::row => (Direction::Inline, false),
flex_direction::T::row_reverse => (Direction::Inline, true),
flex_direction::T::column => (Direction::Block, false),
flex_direction::T::column_reverse => (Direction::Block, true),
};
main_mode = mode;
main_reverse =
reverse == style.writing_mode.is_bidi_ltr();
let (wrappable, reverse) = match fragment.style.get_position().flex_wrap {
flex_wrap::T::nowrap => (false, false),
flex_wrap::T::wrap => (true, false),
flex_wrap::T::wrap_reverse => (true, true),
};
is_wrappable = wrappable;
// TODO(stshine): Handle vertical writing mode.
cross_reverse = reverse;
}
FlexFlow {
block_flow: BlockFlow::from_fragment_and_float_kind(fragment, flotation),
main_mode: main_mode,
available_main_size: AxisSize::Infinite,
available_cross_size: AxisSize::Infinite,
lines: Vec::new(),
items: Vec::new(),
main_reverse: main_reverse,
is_wrappable: is_wrappable,
cross_reverse: cross_reverse
}
}
/// Returns a line start after the last item that is already in a line.
/// Note that when the container main size is infinite(i.e. A column flexbox with auto height),
/// we do not need to do flex resolving and this can be considered as a fast-path, so the
/// 'container_size' param does not need to be 'None'. A line has to contain at least one item;
/// (except this) if the container can be multi-line the sum of outer main size of items should
/// be less than the container size; a line should be filled by items as much as possible.
/// After been collected in a line a item should have its main sizes initialized.
fn get_flex_line(&mut self, container_size: Au) -> Option<FlexLine> {
let start = self.lines.last().map(|line| line.range.end).unwrap_or(0);
if start == self.items.len() {
return None;
}
let mut end = start;
let mut total_line_size = Au(0);
let mut margin_count = 0;
let items = &mut self.items[start..];
let mut children = self.block_flow.base.children.random_access_mut();
for mut item in items {
let kid = children.get(item.index);
item.init_sizes(kid, container_size, self.main_mode);
let outer_main_size = item.outer_main_size(kid, self.main_mode);
if total_line_size + outer_main_size > container_size && end != start && self.is_wrappable {
break;
}
margin_count += item.auto_margin_count(kid, self.main_mode);
total_line_size += outer_main_size;
end += 1;
}
let line = FlexLine::new(start..end, container_size - total_line_size, margin_count);
Some(line)
}
// TODO(zentner): This function should use flex-basis.
// Currently, this is the core of BlockFlow::bubble_inline_sizes() with all float logic
// stripped out, and max replaced with union_nonbreaking_inline.
fn inline_mode_bubble_inline_sizes(&mut self) {
let fixed_width = match self.block_flow.fragment.style().get_position().width {
LengthOrPercentageOrAuto::Length(_) => true,
_ => false,
};
let mut computation = self.block_flow.fragment.compute_intrinsic_inline_sizes();
if !fixed_width {
for kid in self.block_flow.base.children.iter_mut() {
let base = flow::mut_base(kid);
let is_absolutely_positioned = base.flags.contains(IS_ABSOLUTELY_POSITIONED);
if !is_absolutely_positioned {
let flex_item_inline_sizes = IntrinsicISizes {
minimum_inline_size: base.intrinsic_inline_sizes.minimum_inline_size,
preferred_inline_size: base.intrinsic_inline_sizes.preferred_inline_size,
};
computation.union_nonbreaking_inline(&flex_item_inline_sizes);
}
}
}
self.block_flow.base.intrinsic_inline_sizes = computation.finish();
}
// TODO(zentner): This function should use flex-basis.
// Currently, this is the core of BlockFlow::bubble_inline_sizes() with all float logic
// stripped out.
fn block_mode_bubble_inline_sizes(&mut self) {
let fixed_width = match self.block_flow.fragment.style().get_position().width {
LengthOrPercentageOrAuto::Length(_) => true,
_ => false,
};
let mut computation = self.block_flow.fragment.compute_intrinsic_inline_sizes();
if !fixed_width {
for kid in self.block_flow.base.children.iter_mut() {
let base = flow::mut_base(kid);
let is_absolutely_positioned = base.flags.contains(IS_ABSOLUTELY_POSITIONED);
if !is_absolutely_positioned {
computation.content_intrinsic_sizes.minimum_inline_size =
max(computation.content_intrinsic_sizes.minimum_inline_size,
base.intrinsic_inline_sizes.minimum_inline_size);
computation.content_intrinsic_sizes.preferred_inline_size =
max(computation.content_intrinsic_sizes.preferred_inline_size,
base.intrinsic_inline_sizes.preferred_inline_size);
}
}
}
self.block_flow.base.intrinsic_inline_sizes = computation.finish();
}
// TODO(zentner): This function needs to be radically different for multi-line flexbox.
// Currently, this is the core of BlockFlow::propagate_assigned_inline_size_to_children() with
// all float and table logic stripped out.
fn block_mode_assign_inline_sizes(&mut self,
_shared_context: &SharedStyleContext,
inline_start_content_edge: Au,
inline_end_content_edge: Au,
content_inline_size: Au) {
let _scope = layout_debug_scope!("flex::block_mode_assign_inline_sizes");
debug!("block_mode_assign_inline_sizes");
// FIXME (mbrubeck): Get correct mode for absolute containing block
let containing_block_mode = self.block_flow.base.writing_mode;
let container_block_size = match self.available_main_size {
AxisSize::Definite(length) => Some(length),
_ => None
};
let container_inline_size = match self.available_cross_size {
AxisSize::Definite(length) => length,
AxisSize::MinMax(ref constraint) => constraint.clamp(content_inline_size),
AxisSize::Infinite => content_inline_size
};
let mut children = self.block_flow.base.children.random_access_mut();
for kid in &mut self.items {
let kid_base = flow::mut_base(children.get(kid.index));
kid_base.block_container_explicit_block_size = container_block_size;
if kid_base.flags.contains(INLINE_POSITION_IS_STATIC) {
// The inline-start margin edge of the child flow is at our inline-start content
// edge, and its inline-size is our content inline-size.
kid_base.position.start.i =
if kid_base.writing_mode.is_bidi_ltr() == containing_block_mode.is_bidi_ltr() {
inline_start_content_edge
} else {
// The kid's inline 'start' is at the parent's 'end'
inline_end_content_edge
};
}
kid_base.block_container_inline_size = container_inline_size;
kid_base.block_container_writing_mode = containing_block_mode;
kid_base.position.start.i = inline_start_content_edge;
}
}
fn inline_mode_assign_inline_sizes(&mut self,
_shared_context: &SharedStyleContext,
inline_start_content_edge: Au,
_inline_end_content_edge: Au,
content_inline_size: Au) {
let _scope = layout_debug_scope!("flex::inline_mode_assign_inline_sizes");
debug!("inline_mode_assign_inline_sizes");
debug!("content_inline_size = {:?}", content_inline_size);
let child_count = ImmutableFlowUtils::child_count(self as &Flow) as i32;
debug!("child_count = {:?}", child_count);
if child_count == 0 {
return;
}
let inline_size = match self.available_main_size {
AxisSize::Definite(length) => length,
AxisSize::MinMax(ref constraint) => constraint.clamp(content_inline_size),
AxisSize::Infinite => content_inline_size,
};
let container_mode = self.block_flow.base.block_container_writing_mode;
self.block_flow.base.position.size.inline = inline_size;
// Calculate non-auto block size to pass to children.
let box_border = self.block_flow.fragment.box_sizing_boundary(Direction::Block);
let parent_container_size = self.block_flow.explicit_block_containing_size(_shared_context);
// https://drafts.csswg.org/css-ui-3/#box-sizing
let explicit_content_size = self
.block_flow
.explicit_block_size(parent_container_size)
.map(|x| max(x - box_border, Au(0)));
let containing_block_text_align =
self.block_flow.fragment.style().get_inheritedtext().text_align;
while let Some(mut line) = self.get_flex_line(inline_size) {
let items = &mut self.items[line.range.clone()];
line.flex_resolve(items, false);
// TODO(stshine): if this flex line contain children that have
// property visibility:collapse, exclude them and resolve again.
let item_count = items.len() as i32;
let mut cur_i = inline_start_content_edge;
let item_interval = if line.free_space >= Au(0) && line.auto_margin_count == 0 {
match self.block_flow.fragment.style().get_position().justify_content {
justify_content::T::space_between => {
if item_count == 1 {
Au(0)
} else {
line.free_space / (item_count - 1)
}
}
justify_content::T::space_around => {
line.free_space / item_count
}
_ => Au(0),
}
} else {
Au(0)
};
match self.block_flow.fragment.style().get_position().justify_content {
// Overflow equally in both ends of line.
justify_content::T::center | justify_content::T::space_around => {
cur_i += (line.free_space - item_interval * (item_count - 1)) / 2;
}
justify_content::T::flex_end => {
cur_i += line.free_space;
}
_ => {}
}
let mut children = self.block_flow.base.children.random_access_mut();
for item in items.iter_mut() {
let mut block = children.get(item.index).as_mut_block();
block.base.block_container_writing_mode = container_mode;
block.base.block_container_inline_size = inline_size;
block.base.block_container_explicit_block_size = explicit_content_size;
// Per CSS 2.1 § 16.3.1, text alignment propagates to all children in flow.
//
// TODO(#2265, pcwalton): Do this in the cascade instead.
block.base.flags.set_text_align(containing_block_text_align);
// FIXME(stshine): should this be done during construction?
block.mark_as_flex();
let margin = block.fragment.style().logical_margin();
let auto_len =
if line.auto_margin_count == 0 || line.free_space <= Au(0) {
Au(0)
} else {
line.free_space / line.auto_margin_count
};
let margin_inline_start = MaybeAuto::from_style(margin.inline_start, inline_size)
.specified_or_default(auto_len);
let margin_inline_end = MaybeAuto::from_style(margin.inline_end, inline_size)
.specified_or_default(auto_len);
let item_inline_size = item.main_size
- block.fragment.box_sizing_boundary(self.main_mode)
+ block.fragment.border_padding.inline_start_end();
let item_outer_size = item_inline_size + block.fragment.margin.inline_start_end();
block.fragment.margin.inline_start = margin_inline_start;
block.fragment.margin.inline_end = margin_inline_end;
block.fragment.border_box.start.i = margin_inline_start;
block.fragment.border_box.size.inline = item_inline_size;
block.base.position.start.i = if !self.main_reverse {
cur_i
} else {
inline_start_content_edge * 2 + content_inline_size - cur_i - item_outer_size
};
block.base.position.size.inline = item_outer_size;
cur_i += item_outer_size + item_interval;
}
self.lines.push(line);
}
}
// TODO(zentner): This function should actually flex elements!
fn block_mode_assign_block_size(&mut self) {
let mut cur_b = if !self.main_reverse {
self.block_flow.fragment.border_padding.block_start
} else {
self.block_flow.fragment.border_box.size.block
};
let mut children = self.block_flow.base.children.random_access_mut();
for item in &mut self.items {
let mut base = flow::mut_base(children.get(item.index));
if !self.main_reverse {
base.position.start.b = cur_b;
cur_b = cur_b + base.position.size.block;
} else {
cur_b = cur_b - base.position.size.block;
base.position.start.b = cur_b;
}
}
}
fn inline_mode_assign_block_size<'a>(&mut self, layout_context: &'a LayoutContext<'a>) {
let _scope = layout_debug_scope!("flex::inline_mode_assign_block_size");
let line_count = self.lines.len() as i32;
let line_align = self.block_flow.fragment.style().get_position().align_content;
let mut cur_b = self.block_flow.fragment.border_padding.block_start;
let mut total_cross_size = Au(0);
let mut line_interval = Au(0);
{
let mut children = self.block_flow.base.children.random_access_mut();
for line in self.lines.iter_mut() {
for item in &self.items[line.range.clone()] {
let fragment = &children.get(item.index).as_block().fragment;
line.cross_size = max(line.cross_size,
fragment.border_box.size.block +
fragment.margin.block_start_end());
}
total_cross_size += line.cross_size;
}
}
let box_border = self.block_flow.fragment.box_sizing_boundary(Direction::Block);
let parent_container_size =
self.block_flow.explicit_block_containing_size(layout_context.shared_context());
// https://drafts.csswg.org/css-ui-3/#box-sizing
let explicit_content_size = self
.block_flow
.explicit_block_size(parent_container_size)
.map(|x| max(x - box_border, Au(0)));
if let Some(container_block_size) = explicit_content_size {
let free_space = container_block_size - total_cross_size;
total_cross_size = container_block_size;
if line_align == align_content::T::stretch && free_space > Au(0) {
for line in self.lines.iter_mut() {
line.cross_size += free_space / line_count;
}
}
line_interval = match line_align {
align_content::T::space_between => {
if line_count == 1 {
Au(0)
} else {
free_space / (line_count - 1)
}
}
align_content::T::space_around => {
free_space / line_count
}
_ => Au(0),
};
match line_align {
align_content::T::center | align_content::T::space_around => {
cur_b += (free_space - line_interval * (line_count - 1)) / 2;
}
align_content::T::flex_end => {
cur_b += free_space;
}
_ => {}
}
}
let mut children = self.block_flow.base.children.random_access_mut();
for line in &self.lines {
for item in self.items[line.range.clone()].iter_mut() {
let block = children.get(item.index).as_mut_block();
let auto_margin_count = item.auto_margin_count(block, Direction::Block);
let margin = block.fragment.style().logical_margin();
let mut margin_block_start = block.fragment.margin.block_start;
let mut margin_block_end = block.fragment.margin.block_end;
let mut free_space = line.cross_size - block.base.position.size.block
- block.fragment.margin.block_start_end();
// The spec is a little vague here, but if I understand it correctly, the outer
// cross size of item should equal to the line size if any auto margin exists.
// https://drafts.csswg.org/css-flexbox/#algo-cross-margins
if auto_margin_count > 0 {
if margin.block_start == LengthOrPercentageOrAuto::Auto {
margin_block_start = if free_space < Au(0) {
Au(0)
} else {
free_space / auto_margin_count
};
}
margin_block_end = line.cross_size - margin_block_start - block.base.position.size.block;
free_space = Au(0);
}
let self_align = block.fragment.style().get_position().align_self;
if self_align == align_self::T::stretch &&
block.fragment.style().content_block_size() == LengthOrPercentageOrAuto::Auto {
free_space = Au(0);
block.base.block_container_explicit_block_size = Some(line.cross_size);
block.base.position.size.block =
line.cross_size - margin_block_start - margin_block_end;
block.fragment.border_box.size.block = block.base.position.size.block;
// FIXME(stshine): item with 'align-self: stretch' and auto cross size should act
// as if it has a fixed cross size, all child blocks should resolve against it.
// block.assign_block_size(layout_context);
}
block.base.position.start.b = margin_block_start +
if !self.cross_reverse {
cur_b
} else {
self.block_flow.fragment.border_padding.block_start * 2
+ total_cross_size - cur_b - line.cross_size
};
// TODO(stshine): support baseline alignment.
if free_space != Au(0) {
let flex_cross = match self_align {
align_self::T::flex_end => free_space,
align_self::T::center => free_space / 2,
_ => Au(0),
};
block.base.position.start.b +=
if !self.cross_reverse {
flex_cross
} else {
free_space - flex_cross
};
}
}
cur_b += line_interval + line.cross_size;
}
let total_block_size = total_cross_size + self.block_flow.fragment.border_padding.block_start_end();
self.block_flow.fragment.border_box.size.block = total_block_size;
self.block_flow.base.position.size.block = total_block_size;
}
}
impl Flow for FlexFlow {
fn class(&self) -> FlowClass {
FlowClass::Flex
}
fn as_block(&self) -> &BlockFlow {
&self.block_flow
}
fn as_mut_block(&mut self) -> &mut BlockFlow {
&mut self.block_flow
}
fn mark_as_root(&mut self) {
self.block_flow.mark_as_root();
}
fn bubble_inline_sizes(&mut self) {
let _scope = layout_debug_scope!("flex::bubble_inline_sizes {:x}",
self.block_flow.base.debug_id());
// Flexbox Section 9.0: Generate anonymous flex items:
// This part was handled in the flow constructor.
// Flexbox Section 9.1: Re-order flex items according to their order.
// FIXME(stshine): This should be done during flow construction.
let mut items: Vec<FlexItem> =
self.block_flow
.base
.children
.iter()
.enumerate()
.filter(|&(_, flow)| {
!flow.as_block().base.flags.contains(IS_ABSOLUTELY_POSITIONED)
})
.map(|(index, flow)| FlexItem::new(index, flow))
.collect();
items.sort_by_key(|item| item.order);
self.items = items;
match self.main_mode {
Direction::Inline => self.inline_mode_bubble_inline_sizes(),
Direction::Block => self.block_mode_bubble_inline_sizes()
}
}
fn assign_inline_sizes(&mut self, shared_context: &SharedStyleContext) {
let _scope = layout_debug_scope!("flex::assign_inline_sizes {:x}", self.block_flow.base.debug_id());
debug!("assign_inline_sizes");
if !self.block_flow.base.restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW) {
return
}
// Our inline-size was set to the inline-size of the containing block by the flow's parent.
// Now compute the real value.
let containing_block_inline_size = self.block_flow.base.block_container_inline_size;
self.block_flow.compute_used_inline_size(shared_context, containing_block_inline_size);
if self.block_flow.base.flags.is_float() {
self.block_flow.float.as_mut().unwrap().containing_inline_size = containing_block_inline_size
}
let (available_block_size, available_inline_size) = {
let style = &self.block_flow.fragment.style;
let (specified_block_size, specified_inline_size) = if style.writing_mode.is_vertical() {
(style.get_position().width, style.get_position().height)
} else {
(style.get_position().height, style.get_position().width)
};
let available_inline_size = AxisSize::new(specified_inline_size,
Some(self.block_flow.base.block_container_inline_size),
style.min_inline_size(),
style.max_inline_size());
let available_block_size = AxisSize::new(specified_block_size,
self.block_flow.base.block_container_explicit_block_size,
style.min_block_size(),
style.max_block_size());
(available_block_size, available_inline_size)
};
// Move in from the inline-start border edge.
let inline_start_content_edge = self.block_flow.fragment.border_box.start.i +
self.block_flow.fragment.border_padding.inline_start;
debug!("inline_start_content_edge = {:?}", inline_start_content_edge);
let padding_and_borders = self.block_flow.fragment.border_padding.inline_start_end();
// Distance from the inline-end margin edge to the inline-end content edge.
let inline_end_content_edge =
self.block_flow.fragment.margin.inline_end +
self.block_flow.fragment.border_padding.inline_end;
debug!("padding_and_borders = {:?}", padding_and_borders);
debug!("self.block_flow.fragment.border_box.size.inline = {:?}",
self.block_flow.fragment.border_box.size.inline);
let content_inline_size = self.block_flow.fragment.border_box.size.inline - padding_and_borders;
match self.main_mode {
Direction::Inline => {
self.available_main_size = available_inline_size;
self.available_cross_size = available_block_size;
self.inline_mode_assign_inline_sizes(shared_context,
inline_start_content_edge,
inline_end_content_edge,
content_inline_size)
}
Direction::Block => {
self.available_main_size = available_block_size;
self.available_cross_size = available_inline_size;
self.block_mode_assign_inline_sizes(shared_context,
inline_start_content_edge,
inline_end_content_edge,
content_inline_size)
}
}
}
fn assign_block_size<'a>(&mut self, layout_context: &'a LayoutContext<'a>) {
self.block_flow
.assign_block_size_block_base(layout_context,
None,
MarginsMayCollapseFlag::MarginsMayNotCollapse);
match self.main_mode {
Direction::Inline => self.inline_mode_assign_block_size(layout_context),
Direction::Block => self.block_mode_assign_block_size(),
}
}
fn compute_absolute_position(&mut self, layout_context: &SharedLayoutContext) {
self.block_flow.compute_absolute_position(layout_context)
}
fn place_float_if_applicable<'a>(&mut self) {
self.block_flow.place_float_if_applicable()
}
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au) {
self.block_flow.update_late_computed_inline_position_if_necessary(inline_position)
}
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au) {
self.block_flow.update_late_computed_block_position_if_necessary(block_position)
}
fn build_display_list(&mut self, state: &mut DisplayListBuildState) {
self.build_display_list_for_flex(state);
}
fn collect_stacking_contexts(&mut self, parent: &mut StackingContext) {
self.block_flow.collect_stacking_contexts(parent);
}
fn repair_style(&mut self, new_style: &Arc<ServoComputedValues>) {
self.block_flow.repair_style(new_style)
}
fn compute_overflow(&self) -> Overflow {
self.block_flow.compute_overflow()
}
fn generated_containing_block_size(&self, flow: OpaqueFlow) -> LogicalSize<Au> {
self.block_flow.generated_containing_block_size(flow)
}
fn iterate_through_fragment_border_boxes(&self,
iterator: &mut FragmentBorderBoxIterator,
level: i32,
stacking_context_position: &Point2D<Au>) {
self.block_flow.iterate_through_fragment_border_boxes(iterator, level, stacking_context_position);
}
fn mutate_fragments(&mut self, mutator: &mut FnMut(&mut Fragment)) {
self.block_flow.mutate_fragments(mutator);
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo's experimental layout system builds a tree of `Flow` and `Fragment` objects and solves
//! layout constraints to obtain positions and display attributes of tree nodes. Positions are
//! computed in several tree traversals driven by the fundamental data dependencies required by
/// inline and block layout.
///
/// Flows are interior nodes in the layout tree and correspond closely to *flow contexts* in the
/// CSS specification. Flows are responsible for positioning their child flow contexts and
/// fragments. Flows have purpose-specific fields, such as auxiliary line structs, out-of-flow
/// child lists, and so on.
///
/// Currently, the important types of flows are:
///
/// * `BlockFlow`: A flow that establishes a block context. It has several child flows, each of
/// which are positioned according to block formatting context rules (CSS block boxes). Block
/// flows also contain a single box to represent their rendered borders, padding, etc.
/// The BlockFlow at the root of the tree has special behavior: it stretches to the boundaries of
/// the viewport.
///
/// * `InlineFlow`: A flow that establishes an inline context. It has a flat list of child
/// fragments/flows that are subject to inline layout and line breaking and structs to represent
/// line breaks and mapping to CSS boxes, for the purpose of handling `getClientRects()` and
/// similar methods.
use css::node_style::StyledNode;
use block::BlockFlow;
use context::LayoutContext;
use display_list_builder::{DisplayListBuildingResult, DisplayListResult};
use display_list_builder::{NoDisplayListBuildingResult, StackingContextResult};
use floats::Floats;
use flow_list::{FlowList, FlowListIterator, MutFlowListIterator};
use flow_ref::FlowRef;
use fragment::{Fragment, FragmentBoundsIterator, TableRowFragment, TableCellFragment};
use incremental::{RECONSTRUCT_FLOW, REFLOW, REFLOW_OUT_OF_FLOW, RestyleDamage};
use inline::InlineFlow;
use model::{CollapsibleMargins, IntrinsicISizes, MarginCollapseInfo};
use parallel::FlowParallelInfo;
use table::{ColumnComputedInlineSize, ColumnIntrinsicInlineSize, TableFlow};
use table_caption::TableCaptionFlow;
use table_cell::TableCellFlow;
use table_colgroup::TableColGroupFlow;
use table_row::TableRowFlow;
use table_rowgroup::TableRowGroupFlow;
use table_wrapper::TableWrapperFlow;
use wrapper::ThreadSafeLayoutNode;
use geom::{Point2D, Rect, Size2D};
use serialize::{Encoder, Encodable};
use servo_msg::compositor_msg::LayerId;
use servo_util::geometry::Au;
use servo_util::logical_geometry::WritingMode;
use servo_util::logical_geometry::{LogicalRect, LogicalSize};
use std::mem;
use std::num::Zero;
use std::fmt;
use std::iter::Zip;
use std::raw;
use std::sync::atomic::{AtomicUint, SeqCst};
use std::slice::MutItems;
use style::computed_values::{clear, float, position, text_align};
use style::ComputedValues;
use sync::Arc;
/// Virtual methods that make up a float context.
///
/// Note that virtual methods have a cost; we should not overuse them in Servo. Consider adding
/// methods to `ImmutableFlowUtils` or `MutableFlowUtils` before adding more methods here.
pub trait Flow: fmt::Show + ToString + Sync {
// RTTI
//
// TODO(pcwalton): Use Rust's RTTI, once that works.
/// Returns the class of flow that this is.
fn class(&self) -> FlowClass;
/// If this is a block flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_block<'a>(&'a self) -> &'a BlockFlow {
panic!("called as_immutable_block() on a non-block flow")
}
/// If this is a block flow, returns the underlying object. Fails otherwise.
fn as_block<'a>(&'a mut self) -> &'a mut BlockFlow {
debug!("called as_block() on a flow of type {}", self.class());
panic!("called as_block() on a non-block flow")
}
/// If this is an inline flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_inline<'a>(&'a self) -> &'a InlineFlow {
panic!("called as_immutable_inline() on a non-inline flow")
}
/// If this is an inline flow, returns the underlying object. Fails otherwise.
fn as_inline<'a>(&'a mut self) -> &'a mut InlineFlow {
panic!("called as_inline() on a non-inline flow")
}
/// If this is a table wrapper flow, returns the underlying object. Fails otherwise.
fn as_table_wrapper<'a>(&'a mut self) -> &'a mut TableWrapperFlow {
panic!("called as_table_wrapper() on a non-tablewrapper flow")
}
/// If this is a table wrapper flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_table_wrapper<'a>(&'a self) -> &'a TableWrapperFlow {
panic!("called as_immutable_table_wrapper() on a non-tablewrapper flow")
}
/// If this is a table flow, returns the underlying object. Fails otherwise.
fn as_table<'a>(&'a mut self) -> &'a mut TableFlow {
panic!("called as_table() on a non-table flow")
}
/// If this is a table flow, returns the underlying object, borrowed immutably. Fails otherwise.
fn as_immutable_table<'a>(&'a self) -> &'a TableFlow {
panic!("called as_table() on a non-table flow")
}
/// If this is a table colgroup flow, returns the underlying object. Fails otherwise.
fn as_table_colgroup<'a>(&'a mut self) -> &'a mut TableColGroupFlow {
panic!("called as_table_colgroup() on a non-tablecolgroup flow")
}
/// If this is a table rowgroup flow, returns the underlying object. Fails otherwise.
fn as_table_rowgroup<'a>(&'a mut self) -> &'a mut TableRowGroupFlow {
panic!("called as_table_rowgroup() on a non-tablerowgroup flow")
}
/// If this is a table rowgroup flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_table_rowgroup<'a>(&'a self) -> &'a TableRowGroupFlow {
panic!("called as_table_rowgroup() on a non-tablerowgroup flow")
}
/// If this is a table row flow, returns the underlying object. Fails otherwise.
fn as_table_row<'a>(&'a mut self) -> &'a mut TableRowFlow {
panic!("called as_table_row() on a non-tablerow flow")
}
/// If this is a table row flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_table_row<'a>(&'a self) -> &'a TableRowFlow {
panic!("called as_table_row() on a non-tablerow flow")
}
/// If this is a table cell flow, returns the underlying object. Fails otherwise.
fn as_table_caption<'a>(&'a mut self) -> &'a mut TableCaptionFlow {
panic!("called as_table_caption() on a non-tablecaption flow")
}
/// If this is a table cell flow, returns the underlying object. Fails otherwise.
fn as_table_cell<'a>(&'a mut self) -> &'a mut TableCellFlow {
panic!("called as_table_cell() on a non-tablecell flow")
}
/// If this is a table cell flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_table_cell<'a>(&'a self) -> &'a TableCellFlow {
panic!("called as_table_cell() on a non-tablecell flow")
}
/// If this is a table row, table rowgroup, or table flow, returns column intrinsic
/// inline-sizes. Fails otherwise.
fn column_intrinsic_inline_sizes<'a>(&'a mut self) -> &'a mut Vec<ColumnIntrinsicInlineSize> {
panic!("called column_intrinsic_inline_sizes() on non-table flow")
}
/// If this is a table row, table rowgroup, or table flow, returns column computed
/// inline-sizes. Fails otherwise.
fn column_computed_inline_sizes<'a>(&'a mut self) -> &'a mut Vec<ColumnComputedInlineSize> {
panic!("called column_intrinsic_inline_sizes() on non-table flow")
}
// Main methods
/// Pass 1 of reflow: computes minimum and preferred inline-sizes.
///
/// Recursively (bottom-up) determine the flow's minimum and preferred inline-sizes. When
/// called on this flow, all child flows have had their minimum and preferred inline-sizes set.
/// This function must decide minimum/preferred inline-sizes based on its children's inline-
/// sizes and the dimensions of any boxes it is responsible for flowing.
fn bubble_inline_sizes(&mut self) {
panic!("bubble_inline_sizes not yet implemented")
}
/// Pass 2 of reflow: computes inline-size.
fn assign_inline_sizes(&mut self, _ctx: &LayoutContext) {
panic!("assign_inline_sizes not yet implemented")
}
/// Pass 3a of reflow: computes block-size.
fn assign_block_size<'a>(&mut self, _ctx: &'a LayoutContext<'a>) {
panic!("assign_block_size not yet implemented")
}
/// If this is a float, places it. The default implementation does nothing.
fn place_float_if_applicable<'a>(&mut self, _: &'a LayoutContext<'a>) {}
/// Assigns block-sizes in-order; or, if this is a float, places the float. The default
/// implementation simply assigns block-sizes if this flow is impacted by floats. Returns true
/// if this child was impacted by floats or false otherwise.
fn assign_block_size_for_inorder_child_if_necessary<'a>(&mut self,
layout_context: &'a LayoutContext<'a>)
-> bool {
let impacted = base(&*self).flags.impacted_by_floats();
if impacted {
self.assign_block_size(layout_context);
mut_base(&mut *self).restyle_damage.remove(REFLOW_OUT_OF_FLOW | REFLOW);
}
impacted
}
/// Phase 4 of reflow: computes absolute positions.
fn compute_absolute_position(&mut self) {
// The default implementation is a no-op.
}
/// Phase 5 of reflow: builds display lists.
fn build_display_list(&mut self, layout_context: &LayoutContext);
/// Perform an iteration of fragment bounds on this flow.
fn iterate_through_fragment_bounds(&self, iterator: &mut FragmentBoundsIterator);
fn compute_collapsible_block_start_margin(&mut self,
_layout_context: &mut LayoutContext,
_margin_collapse_info: &mut MarginCollapseInfo) {
// The default implementation is a no-op.
}
/// Marks this flow as the root flow. The default implementation is a no-op.
fn mark_as_root(&mut self) {}
// Note that the following functions are mostly called using static method
// dispatch, so it's ok to have them in this trait. Plus, they have
// different behaviour for different types of Flow, so they can't go into
// the Immutable / Mutable Flow Utils traits without additional casts.
/// Return true if store overflow is delayed for this flow.
///
/// Currently happens only for absolutely positioned flows.
fn is_store_overflow_delayed(&mut self) -> bool {
false
}
fn is_root(&self) -> bool {
false
}
/// The 'position' property of this flow.
fn positioning(&self) -> position::T {
position::static_
}
/// Return true if this flow has position 'fixed'.
fn is_fixed(&self) -> bool {
self.positioning() == position::fixed
}
fn is_positioned(&self) -> bool {
self.is_relatively_positioned() || base(self).flags.contains(IS_ABSOLUTELY_POSITIONED)
}
fn is_relatively_positioned(&self) -> bool {
self.positioning() == position::relative
}
/// Return true if this is the root of an absolute flow tree.
fn is_root_of_absolute_flow_tree(&self) -> bool {
false
}
/// Returns true if this is an absolute containing block.
fn is_absolute_containing_block(&self) -> bool {
false
}
/// Updates the inline position of a child flow during the assign-height traversal. At present,
/// this is only used for absolutely-positioned inline-blocks.
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au);
/// Updates the block position of a child flow during the assign-height traversal. At present,
/// this is only used for absolutely-positioned inline-blocks.
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au);
/// Return the dimensions of the containing block generated by this flow for absolutely-
/// positioned descendants. For block flows, this is the padding box.
///
/// NB: Do not change this `&self` to `&mut self` under any circumstances! It has security
/// implications because this can be called on parents concurrently from descendants!
fn generated_containing_block_rect(&self) -> LogicalRect<Au> {
panic!("generated_containing_block_position not yet implemented for this flow")
}
/// Returns a layer ID for the given fragment.
fn layer_id(&self, fragment_id: uint) -> LayerId {
unsafe {
let pointer: uint = mem::transmute(self);
LayerId(pointer, fragment_id)
}
}
/// Attempts to perform incremental fixup of this flow by replacing its fragment's style with
/// the new style. This can only succeed if the flow has exactly one fragment.
fn repair_style(&mut self, new_style: &Arc<ComputedValues>);
}
// Base access
#[inline(always)]
pub fn base<'a>(this: &'a Flow) -> &'a BaseFlow {
unsafe {
let obj = mem::transmute::<&'a Flow, raw::TraitObject>(this);
mem::transmute::<*mut (), &'a BaseFlow>(obj.data)
}
}
/// Iterates over the children of this immutable flow.
pub fn imm_child_iter<'a>(flow: &'a Flow) -> FlowListIterator<'a> {
base(flow).children.iter()
}
#[inline(always)]
pub fn mut_base<'a>(this: &'a mut Flow) -> &'a mut BaseFlow {
unsafe {
let obj = mem::transmute::<&'a mut Flow, raw::TraitObject>(this);
mem::transmute::<*mut (), &'a mut BaseFlow>(obj.data)
}
}
/// Iterates over the children of this flow.
pub fn child_iter<'a>(flow: &'a mut Flow) -> MutFlowListIterator<'a> {
mut_base(flow).children.iter_mut()
}
pub trait ImmutableFlowUtils {
// Convenience functions
/// Returns true if this flow is a block or a float flow.
fn is_block_like(self) -> bool;
/// Returns true if this flow is a table flow.
fn is_table(self) -> bool;
/// Returns true if this flow is a table caption flow.
fn is_table_caption(self) -> bool;
/// Returns true if this flow is a proper table child.
fn is_proper_table_child(self) -> bool;
/// Returns true if this flow is a table row flow.
fn is_table_row(self) -> bool;
/// Returns true if this flow is a table cell flow.
fn is_table_cell(self) -> bool;
/// Returns true if this flow is a table colgroup flow.
fn is_table_colgroup(self) -> bool;
/// Returns true if this flow is a table rowgroup flow.
fn is_table_rowgroup(self) -> bool;
/// Returns true if this flow is one of table-related flows.
fn is_table_kind(self) -> bool;
/// Returns true if anonymous flow is needed between this flow and child flow.
fn need_anonymous_flow(self, child: &Flow) -> bool;
/// Generates missing child flow of this flow.
fn generate_missing_child_flow(self, node: &ThreadSafeLayoutNode) -> FlowRef;
/// Returns true if this flow has no children.
fn is_leaf(self) -> bool;
/// Returns the number of children that this flow possesses.
fn child_count(self) -> uint;
/// Return true if this flow is a Block Container.
fn is_block_container(self) -> bool;
/// Returns true if this flow is a block flow.
fn is_block_flow(self) -> bool;
/// Returns true if this flow is an inline flow.
fn is_inline_flow(self) -> bool;
/// Dumps the flow tree for debugging.
fn dump(self);
/// Dumps the flow tree for debugging, with a prefix to indicate that we're at the given level.
fn dump_with_level(self, level: uint);
}
pub trait MutableFlowUtils {
// Traversals
/// Traverses the tree in preorder.
fn traverse_preorder<T:PreorderFlowTraversal>(self, traversal: &T);
/// Traverses the tree in postorder.
fn traverse_postorder<T:PostorderFlowTraversal>(self, traversal: &T);
// Mutators
/// Computes the overflow region for this flow.
fn store_overflow(self, _: &LayoutContext);
/// Gathers static block-offsets bubbled up by kids.
///
/// This essentially gives us offsets of all absolutely positioned direct descendants and all
/// fixed descendants, in tree order.
///
/// This is called in a bottom-up traversal (specifically, the assign-block-size traversal).
/// So, kids have their flow origin already set. In the case of absolute flow kids, they have
/// their hypothetical box position already set.
fn collect_static_block_offsets_from_children(self);
}
pub trait MutableOwnedFlowUtils {
/// Set absolute descendants for this flow.
///
/// Set this flow as the Containing Block for all the absolute descendants.
fn set_absolute_descendants(&mut self, abs_descendants: AbsDescendants);
}
#[deriving(Encodable, PartialEq, Show)]
pub enum FlowClass {
BlockFlowClass,
InlineFlowClass,
ListItemFlowClass,
TableWrapperFlowClass,
TableFlowClass,
TableColGroupFlowClass,
TableRowGroupFlowClass,
TableRowFlowClass,
TableCaptionFlowClass,
TableCellFlowClass,
}
/// A top-down traversal.
pub trait PreorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns true if this node must be processed in-order. If this returns false,
/// we skip the operation for this node, but continue processing the descendants.
/// This is called *after* parent nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
}
/// A bottom-up traversal, with a optional in-order pass.
pub trait PostorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns false if this node must be processed in-order. If this returns false, we skip the
/// operation for this node, but continue processing the ancestors. This is called *after*
/// child nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
}
bitflags! {
#[doc = "Flags used in flows."]
flags FlowFlags: u16 {
// floated descendants flags
#[doc = "Whether this flow has descendants that float left in the same block formatting"]
#[doc = "context."]
const HAS_LEFT_FLOATED_DESCENDANTS = 0b0000_0000_0000_0001,
#[doc = "Whether this flow has descendants that float right in the same block formatting"]
#[doc = "context."]
const HAS_RIGHT_FLOATED_DESCENDANTS = 0b0000_0000_0000_0010,
#[doc = "Whether this flow is impacted by floats to the left in the same block formatting"]
#[doc = "context (i.e. its height depends on some prior flows with `float: left`)."]
const IMPACTED_BY_LEFT_FLOATS = 0b0000_0000_0000_0100,
#[doc = "Whether this flow is impacted by floats to the right in the same block"]
#[doc = "formatting context (i.e. its height depends on some prior flows with `float:"]
#[doc = "right`)."]
const IMPACTED_BY_RIGHT_FLOATS = 0b0000_0000_0000_1000,
// text align flags
#[doc = "Whether this flow contains a flow that has its own layer within the same absolute"]
#[doc = "containing block."]
const LAYERS_NEEDED_FOR_DESCENDANTS = 0b0000_0000_0001_0000,
#[doc = "Whether this flow must have its own layer. Even if this flag is not set, it might"]
#[doc = "get its own layer if it's deemed to be likely to overlap flows with their own"]
#[doc = "layer."]
const NEEDS_LAYER = 0b0000_0000_0010_0000,
#[doc = "Whether this flow is absolutely positioned. This is checked all over layout, so a"]
#[doc = "virtual call is too expensive."]
const IS_ABSOLUTELY_POSITIONED = 0b0000_0000_0100_0000,
#[doc = "Whether this flow clears to the left. This is checked all over layout, so a"]
#[doc = "virtual call is too expensive."]
const CLEARS_LEFT = 0b0000_0000_1000_0000,
#[doc = "Whether this flow clears to the right. This is checked all over layout, so a"]
#[doc = "virtual call is too expensive."]
const CLEARS_RIGHT = 0b0000_0001_0000_0000,
#[doc = "Whether this flow is left-floated. This is checked all over layout, so a"]
#[doc = "virtual call is too expensive."]
const FLOATS_LEFT = 0b0000_0010_0000_0000,
#[doc = "Whether this flow is right-floated. This is checked all over layout, so a"]
#[doc = "virtual call is too expensive."]
const FLOATS_RIGHT = 0b0000_0100_0000_0000,
#[doc = "Text alignment. \
NB: If you update this, update `TEXT_ALIGN_SHIFT` below."]
const TEXT_ALIGN = 0b0111_1000_0000_0000,
}
}
// NB: If you update this field, you must update the the floated descendants flags.
/// The bitmask of flags that represent the `has_left_floated_descendants` and
/// `has_right_floated_descendants` fields.
static HAS_FLOATED_DESCENDANTS_BITMASK: FlowFlags = FlowFlags { bits: 0b0000_0011 };
/// The number of bits we must shift off to handle the text alignment field.
///
/// NB: If you update this, update `TEXT_ALIGN` above.
static TEXT_ALIGN_SHIFT: uint = 11;
impl FlowFlags {
/// Propagates text alignment flags from an appropriate parent flow per CSS 2.1.
///
/// FIXME(#2265, pcwalton): It would be cleaner and faster to make this a derived CSS property
/// `-servo-text-align-in-effect`.
pub fn propagate_text_alignment_from_parent(&mut self, parent_flags: FlowFlags) {
self.set_text_align_override(parent_flags);
}
#[inline]
pub fn text_align(self) -> text_align::T {
FromPrimitive::from_u16((self & TEXT_ALIGN).bits() >> TEXT_ALIGN_SHIFT).unwrap()
}
#[inline]
pub fn set_text_align(&mut self, value: text_align::T) {
*self = (*self & !TEXT_ALIGN) |
FlowFlags::from_bits(value as u16 << TEXT_ALIGN_SHIFT).unwrap();
}
#[inline]
pub fn set_text_align_override(&mut self, parent: FlowFlags) {
self.insert(parent & TEXT_ALIGN);
}
#[inline]
pub fn union_floated_descendants_flags(&mut self, other: FlowFlags) {
self.insert(other & HAS_FLOATED_DESCENDANTS_BITMASK);
}
#[inline]
pub fn impacted_by_floats(&self) -> bool {
self.contains(IMPACTED_BY_LEFT_FLOATS) || self.contains(IMPACTED_BY_RIGHT_FLOATS)
}
#[inline]
pub fn set(&mut self, flags: FlowFlags, value: bool) {
if value {
self.insert(flags);
} else {
self.remove(flags);
}
}
#[inline]
pub fn float_kind(&self) -> float::T {
if self.contains(FLOATS_LEFT) {
float::left
} else if self.contains(FLOATS_RIGHT) {
float::right
} else {
float::none
}
}
#[inline]
pub fn is_float(&self) -> bool {
self.contains(FLOATS_LEFT) || self.contains(FLOATS_RIGHT)
}
#[inline]
pub fn clears_floats(&self) -> bool {
self.contains(CLEARS_LEFT) || self.contains(CLEARS_RIGHT)
}
}
/// The Descendants of a flow.
///
/// Also, details about their position wrt this flow.
#[deriving(Clone)]
pub struct Descendants {
/// Links to every descendant. This must be private because it is unsafe to leak `FlowRef`s to
/// layout.
descendant_links: Vec<FlowRef>,
/// Static block-direction offsets of all descendants from the start of this flow box.
pub static_block_offsets: Vec<Au>,
}
impl Descendants {
pub fn new() -> Descendants {
Descendants {
descendant_links: Vec::new(),
static_block_offsets: Vec::new(),
}
}
pub fn len(&self) -> uint {
self.descendant_links.len()
}
pub fn is_empty(&self) -> bool {
self.descendant_links.is_empty()
}
pub fn push(&mut self, given_descendant: FlowRef) {
self.descendant_links.push(given_descendant);
}
/// Push the given descendants on to the existing descendants.
///
/// Ignore any static y offsets, because they are None before layout.
pub fn push_descendants(&mut self, given_descendants: Descendants) {
for elem in given_descendants.descendant_links.into_iter() {
self.descendant_links.push(elem);
}
}
/// Return an iterator over the descendant flows.
pub fn iter<'a>(&'a mut self) -> DescendantIter<'a> {
DescendantIter {
iter: self.descendant_links.slice_from_mut(0).iter_mut(),
}
}
/// Return an iterator over (descendant, static y offset).
pub fn iter_with_offset<'a>(&'a mut self) -> DescendantOffsetIter<'a> {
let descendant_iter = DescendantIter {
iter: self.descendant_links.slice_from_mut(0).iter_mut(),
};
descendant_iter.zip(self.static_block_offsets.slice_from_mut(0).iter_mut())
}
}
pub type AbsDescendants = Descendants;
pub struct DescendantIter<'a> {
iter: MutItems<'a, FlowRef>,
}
impl<'a> Iterator<&'a mut Flow + 'a> for DescendantIter<'a> {
fn next(&mut self) -> Option<&'a mut Flow + 'a> {
match self.iter.next() {
None => None,
Some(ref mut flow) => {
unsafe {
let result: &'a mut Flow = mem::transmute(flow.deref_mut());
Some(result)
}
}
}
}
}
pub type DescendantOffsetIter<'a> = Zip<DescendantIter<'a>, MutItems<'a, Au>>;
/// Information needed to compute absolute (i.e. viewport-relative) flow positions (not to be
/// confused with absolutely-positioned flows).
#[deriving(Encodable)]
pub struct AbsolutePositionInfo {
/// The size of the containing block for relatively-positioned descendants.
pub relative_containing_block_size: LogicalSize<Au>,
/// The position of the absolute containing block relative to the nearest ancestor stacking
/// context. If the absolute containing block establishes the stacking context for this flow,
/// and this flow is not itself absolutely-positioned, then this is (0, 0).
pub stacking_relative_position_of_absolute_containing_block: Point2D<Au>,
/// Whether the absolute containing block forces positioned descendants to be layerized.
///
/// FIXME(pcwalton): Move into `FlowFlags`.
pub layers_needed_for_positioned_flows: bool,
}
impl AbsolutePositionInfo {
pub fn new(writing_mode: WritingMode) -> AbsolutePositionInfo {
// FIXME(pcwalton): The initial relative containing block-size should be equal to the size
// of the root layer.
AbsolutePositionInfo {
relative_containing_block_size: LogicalSize::zero(writing_mode),
stacking_relative_position_of_absolute_containing_block: Zero::zero(),
layers_needed_for_positioned_flows: false,
}
}
}
/// Data common to all flows.
pub struct BaseFlow {
/// NB: Must be the first element.
///
/// The necessity of this will disappear once we have dynamically-sized types.
ref_count: AtomicUint,
pub restyle_damage: RestyleDamage,
/// The children of this flow.
pub children: FlowList,
/// Intrinsic inline sizes for this flow.
pub intrinsic_inline_sizes: IntrinsicISizes,
/// The upper left corner of the box representing this flow, relative to the box representing
/// its parent flow.
///
/// For absolute flows, this represents the position with respect to its *containing block*.
///
/// This does not include margins in the block flow direction, because those can collapse. So
/// for the block direction (usually vertical), this represents the *border box*. For the
/// inline direction (usually horizontal), this represents the *margin box*.
pub position: LogicalRect<Au>,
/// The amount of overflow of this flow, relative to the containing block. Must include all the
/// pixels of all the display list items for correct invalidation.
pub overflow: LogicalRect<Au>,
/// Data used during parallel traversals.
///
/// TODO(pcwalton): Group with other transient data to save space.
pub parallel: FlowParallelInfo,
/// The floats next to this flow.
pub floats: Floats,
/// The collapsible margins for this flow, if any.
pub collapsible_margins: CollapsibleMargins,
/// The position of this flow relative to the start of the nearest ancestor stacking context.
/// This is computed during the top-down pass of display list construction.
pub stacking_relative_position: Point2D<Au>,
/// Details about descendants with position 'absolute' or 'fixed' for which we are the
/// containing block. This is in tree order. This includes any direct children.
pub abs_descendants: AbsDescendants,
/// The inline-size of the block container of this flow. Used for computing percentage and
/// automatic values for `width`.
pub block_container_inline_size: Au,
/// The block-size of the block container of this flow, if it is an explicit size (does not
/// depend on content heights). Used for computing percentage values for `height`.
pub block_container_explicit_block_size: Option<Au>,
/// Offset wrt the nearest positioned ancestor - aka the Containing Block
/// for any absolutely positioned elements.
pub absolute_static_i_offset: Au,
/// Offset wrt the Initial Containing Block.
pub fixed_static_i_offset: Au,
/// Reference to the Containing Block, if this flow is absolutely positioned.
pub absolute_cb: ContainingBlockLink,
/// Information needed to compute absolute (i.e. viewport-relative) flow positions (not to be
/// confused with absolutely-positioned flows).
///
/// FIXME(pcwalton): Merge with `absolute_static_i_offset` and `fixed_static_i_offset` above?
pub absolute_position_info: AbsolutePositionInfo,
/// The clipping rectangle for this flow and its descendants, in layer coordinates.
///
/// TODO(pcwalton): When we have `border-radius` this will need to at least support rounded
/// rectangles.
pub clip_rect: Rect<Au>,
/// The results of display list building for this flow.
pub display_list_building_result: DisplayListBuildingResult,
/// The writing mode for this flow.
pub writing_mode: WritingMode,
/// Various flags for flows, tightly packed to save space.
pub flags: FlowFlags,
}
impl fmt::Show for BaseFlow {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"@ {}, CC {}, ADC {}",
self.position,
self.parallel.children_count.load(SeqCst),
self.abs_descendants.len())
}
}
impl<E, S: Encoder<E>> Encodable<S, E> for BaseFlow {
fn encode(&self, e: &mut S) -> Result<(), E> {
e.emit_struct("base", 0, |e| {
try!(e.emit_struct_field("id", 0, |e| self.debug_id().encode(e)))
try!(e.emit_struct_field("stacking_relative_position",
1,
|e| self.stacking_relative_position.encode(e)))
try!(e.emit_struct_field("intrinsic_inline_sizes",
2,
|e| self.intrinsic_inline_sizes.encode(e)))
try!(e.emit_struct_field("position", 3, |e| self.position.encode(e)))
e.emit_struct_field("children", 4, |e| {
e.emit_seq(self.children.len(), |e| {
for (i, c) in self.children.iter().enumerate() {
try!(e.emit_seq_elt(i, |e| {
try!(e.emit_struct("flow", 0, |e| {
try!(e.emit_struct_field("class", 0, |e| c.class().encode(e)))
e.emit_struct_field("data", 1, |e| {
match c.class() {
BlockFlowClass => c.as_immutable_block().encode(e),
InlineFlowClass => c.as_immutable_inline().encode(e),
TableFlowClass => c.as_immutable_table().encode(e),
TableWrapperFlowClass => c.as_immutable_table_wrapper().encode(e),
TableRowGroupFlowClass => c.as_immutable_table_rowgroup().encode(e),
TableRowFlowClass => c.as_immutable_table_row().encode(e),
TableCellFlowClass => c.as_immutable_table_cell().encode(e),
_ => { Ok(()) } // TODO: Support captions
}
})
}))
Ok(())
}))
}
Ok(())
})
})
})
}
}
#[unsafe_destructor]
impl Drop for BaseFlow {
fn drop(&mut self) {
if self.ref_count.load(SeqCst) != 0 {
panic!("Flow destroyed before its ref count hit zero—this is unsafe!")
}
}
}
/// Whether a base flow should be forced to be nonfloated. This can affect e.g. `TableFlow`, which
/// is never floated because the table wrapper flow is the floated one.
#[deriving(Clone, PartialEq)]
pub enum ForceNonfloatedFlag {
/// The flow should be floated if the node has a `float` property.
FloatIfNecessary,
/// The flow should be forced to be nonfloated.
ForceNonfloated,
}
impl BaseFlow {
#[inline]
pub fn new(node: Option<ThreadSafeLayoutNode>,
writing_mode: WritingMode,
force_nonfloated: ForceNonfloatedFlag)
-> BaseFlow {
let mut flags = FlowFlags::empty();
match node {
None => {}
Some(node) => {
let node_style = node.style();
match node_style.get_box().position {
position::absolute | position::fixed => {
flags.insert(IS_ABSOLUTELY_POSITIONED)
}
_ => {}
}
if force_nonfloated == FloatIfNecessary {
match node_style.get_box().float {
float::none => {}
float::left => flags.insert(FLOATS_LEFT),
float::right => flags.insert(FLOATS_RIGHT),
}
}
match node_style.get_box().clear {
clear::none => {}
clear::left => flags.insert(CLEARS_LEFT),
clear::right => flags.insert(CLEARS_RIGHT),
clear::both => {
flags.insert(CLEARS_LEFT);
flags.insert(CLEARS_RIGHT);
}
}
}
}
// New flows start out as fully damaged.
let mut damage = RestyleDamage::all();
damage.remove(RECONSTRUCT_FLOW);
BaseFlow {
ref_count: AtomicUint::new(1),
restyle_damage: damage,
children: FlowList::new(),
intrinsic_inline_sizes: IntrinsicISizes::new(),
position: LogicalRect::zero(writing_mode),
overflow: LogicalRect::zero(writing_mode),
parallel: FlowParallelInfo::new(),
floats: Floats::new(writing_mode),
collapsible_margins: CollapsibleMargins::new(),
stacking_relative_position: Zero::zero(),
abs_descendants: Descendants::new(),
absolute_static_i_offset: Au(0),
fixed_static_i_offset: Au(0),
block_container_inline_size: Au(0),
block_container_explicit_block_size: None,
absolute_cb: ContainingBlockLink::new(),
display_list_building_result: NoDisplayListBuildingResult,
absolute_position_info: AbsolutePositionInfo::new(writing_mode),
clip_rect: Rect(Zero::zero(), Size2D(Au(0), Au(0))),
flags: flags,
writing_mode: writing_mode,
}
}
pub fn child_iter<'a>(&'a mut self) -> MutFlowListIterator<'a> {
self.children.iter_mut()
}
pub unsafe fn ref_count<'a>(&'a self) -> &'a AtomicUint {
&self.ref_count
}
pub fn debug_id(&self) -> uint {
let p = self as *const _;
p as uint
}
/// Ensures that all display list items generated by this flow are within the flow's overflow
/// rect. This should only be used for debugging.
pub fn validate_display_list_geometry(&self) {
let position_with_overflow = self.position.union(&self.overflow);
let bounds = Rect(self.stacking_relative_position,
Size2D(position_with_overflow.size.inline,
position_with_overflow.size.block));
let all_items = match self.display_list_building_result {
NoDisplayListBuildingResult => Vec::new(),
StackingContextResult(ref stacking_context) => {
stacking_context.display_list.all_display_items()
}
DisplayListResult(ref display_list) => display_list.all_display_items(),
};
for item in all_items.iter() {
let paint_bounds = match item.base().bounds.intersection(&item.base().clip_rect) {
None => continue,
Some(rect) => rect,
};
if paint_bounds.is_empty() {
continue;
}
if bounds.union(&paint_bounds) != bounds {
error!("DisplayList item {} outside of Flow overflow ({})", item, paint_bounds);
}
}
}
/// Returns the position of the given fragment relative to the start of the nearest ancestor
/// stacking context. The fragment must be a child fragment of this flow.
pub fn stacking_relative_position_of_child_fragment(&self, fragment: &Fragment)
-> Point2D<Au> {
let relative_offset =
fragment.relative_position(&self
.absolute_position_info
.relative_containing_block_size);
self.stacking_relative_position.add_size(&relative_offset.to_physical(self.writing_mode))
}
}
impl<'a> ImmutableFlowUtils for &'a Flow + 'a {
/// Returns true if this flow is a block flow.
fn is_block_like(self) -> bool {
match self.class() {
BlockFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a proper table child.
/// 'Proper table child' is defined as table-row flow, table-rowgroup flow,
/// table-column-group flow, or table-caption flow.
fn is_proper_table_child(self) -> bool {
match self.class() {
TableRowFlowClass | TableRowGroupFlowClass |
TableColGroupFlowClass | TableCaptionFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table row flow.
fn is_table_row(self) -> bool {
match self.class() {
TableRowFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table cell flow.
fn is_table_cell(self) -> bool {
match self.class() {
TableCellFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table colgroup flow.
fn is_table_colgroup(self) -> bool {
match self.class() {
TableColGroupFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table flow.
fn is_table(self) -> bool {
match self.class() {
TableFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table caption flow.
fn is_table_caption(self) -> bool {
match self.class() {
TableCaptionFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table rowgroup flow.
fn is_table_rowgroup(self) -> bool {
match self.class() {
TableRowGroupFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is one of table-related flows.
fn is_table_kind(self) -> bool {
match self.class() {
TableWrapperFlowClass | TableFlowClass |
TableColGroupFlowClass | TableRowGroupFlowClass |
TableRowFlowClass | TableCaptionFlowClass | TableCellFlowClass => true,
_ => false,
}
}
/// Returns true if anonymous flow is needed between this flow and child flow.
/// Spec: http://www.w3.org/TR/CSS21/tables.html#anonymous-boxes
fn need_anonymous_flow(self, child: &Flow) -> bool {
match self.class() {
TableFlowClass => !child.is_proper_table_child(),
TableRowGroupFlowClass => !child.is_table_row(),
TableRowFlowClass => !child.is_table_cell(),
_ => false
}
}
/// Generates missing child flow of this flow.
fn generate_missing_child_flow(self, node: &ThreadSafeLayoutNode) -> FlowRef {
let flow = match self.class() {
TableFlowClass | TableRowGroupFlowClass => {
let fragment = Fragment::new_anonymous_table_fragment(node, TableRowFragment);
box TableRowFlow::from_node_and_fragment(node, fragment) as Box<Flow>
},
TableRowFlowClass => {
let fragment = Fragment::new_anonymous_table_fragment(node, TableCellFragment);
box TableCellFlow::from_node_and_fragment(node, fragment) as Box<Flow>
},
_ => {
panic!("no need to generate a missing child")
}
};
FlowRef::new(flow)
}
/// Returns true if this flow has no children.
fn is_leaf(self) -> bool {
base(self).children.len() == 0
}
/// Returns the number of children that this flow possesses.
fn child_count(self) -> uint {
base(self).children.len()
}
/// Return true if this flow is a Block Container.
///
/// Except for table fragments and replaced elements, block-level fragments (`BlockFlow`) are
/// also block container fragments.
/// Non-replaced inline blocks and non-replaced table cells are also block
/// containers.
fn is_block_container(self) -> bool {
match self.class() {
// TODO: Change this when inline-blocks are supported.
BlockFlowClass | TableCaptionFlowClass | TableCellFlowClass => {
// FIXME: Actually check the type of the node
self.child_count() != 0
}
_ => false,
}
}
/// Returns true if this flow is a block flow.
fn is_block_flow(self) -> bool {
match self.class() {
BlockFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is an inline flow.
fn is_inline_flow(self) -> bool {
match self.class() {
InlineFlowClass => true,
_ => false,
}
}
/// Dumps the flow tree for debugging.
fn dump(self) {
self.dump_with_level(0)
}
/// Dumps the flow tree for debugging, with a prefix to indicate that we're at the given level.
fn dump_with_level(self, level: uint) {
let mut indent = String::new();
for _ in range(0, level) {
indent.push_str("| ")
}
println!("{}+ {}", indent, self.to_string());
for kid in imm_child_iter(self) {
kid.dump_with_level(level + 1)
}
}
}
impl<'a> MutableFlowUtils for &'a mut Flow + 'a {
/// Traverses the tree in preorder.
fn traverse_preorder<T:PreorderFlowTraversal>(self, traversal: &T) {
if traversal.should_process(self) {
traversal.process(self);
}
for kid in child_iter(self) {
kid.traverse_preorder(traversal);
}
}
/// Traverses the tree in postorder.
fn traverse_postorder<T:PostorderFlowTraversal>(self, traversal: &T) {
for kid in child_iter(self) {
kid.traverse_postorder(traversal);
}
if traversal.should_process(self) {
traversal.process(self)
}
}
/// Calculate and set overflow for current flow.
///
/// CSS Section 11.1
/// This is the union of rectangles of the flows for which we define the
/// Containing Block.
///
/// Assumption: This is called in a bottom-up traversal, so kids' overflows have
/// already been set.
/// Assumption: Absolute descendants have had their overflow calculated.
fn store_overflow(self, _: &LayoutContext) {
let my_position = mut_base(self).position;
// FIXME(pcwalton): We should calculate overflow on a per-fragment basis, because their
// styles can affect overflow regions. Consider `box-shadow`, `outline`, etc.--anything
// that can draw outside the border box. For now we assume overflow is the border box, but
// that is wrong.
let mut overflow = my_position;
if self.is_block_container() {
for kid in child_iter(self) {
if kid.is_store_overflow_delayed() {
// Absolute flows will be handled by their CB. If we are
// their CB, they will show up in `abs_descendants`.
continue;
}
let mut kid_overflow = base(kid).overflow;
kid_overflow = kid_overflow.translate(&my_position.start);
overflow = overflow.union(&kid_overflow)
}
// FIXME(#2004, pcwalton): This is wrong for `position: fixed`.
for descendant_link in mut_base(self).abs_descendants.iter() {
let mut kid_overflow = base(descendant_link).overflow;
kid_overflow = kid_overflow.translate(&my_position.start);
overflow = overflow.union(&kid_overflow)
}
}
mut_base(self).overflow = overflow;
}
/// Collect and update static y-offsets bubbled up by kids.
///
/// This would essentially give us offsets of all absolutely positioned
/// direct descendants and all fixed descendants, in tree order.
///
/// Assume that this is called in a bottom-up traversal (specifically, the
/// assign-block-size traversal). So, kids have their flow origin already set.
/// In the case of absolute flow kids, they have their hypothetical box
/// position already set.
fn collect_static_block_offsets_from_children(self) {
let mut absolute_descendant_block_offsets = Vec::new();
for kid in mut_base(self).child_iter() {
let mut gives_absolute_offsets = true;
if kid.is_block_like() {
let kid_block = kid.as_block();
if kid_block.is_fixed() || kid_block.base.flags.contains(IS_ABSOLUTELY_POSITIONED) {
// It won't contribute any offsets for descendants because it would be the
// containing block for them.
gives_absolute_offsets = false;
// Give the offset for the current absolute flow alone.
absolute_descendant_block_offsets.push(
kid_block.get_hypothetical_block_start_edge());
} else if kid_block.is_positioned() {
// It won't contribute any offsets because it would be the containing block
// for the descendants.
gives_absolute_offsets = false;
}
}
if gives_absolute_offsets {
let kid_base = mut_base(kid);
// Avoid copying the offset vector.
let offsets = mem::replace(&mut kid_base.abs_descendants.static_block_offsets,
Vec::new());
// Consume all the static block-offsets bubbled up by kids.
for block_offset in offsets.into_iter() {
// The offsets are with respect to the kid flow's fragment. Translate them to
// that of the current flow.
absolute_descendant_block_offsets.push(
block_offset + kid_base.position.start.b);
}
}
}
mut_base(self).abs_descendants.static_block_offsets = absolute_descendant_block_offsets
}
}
impl MutableOwnedFlowUtils for FlowRef {
/// Set absolute descendants for this flow.
///
/// Set yourself as the Containing Block for all the absolute descendants.
///
/// This is called during flow construction, so nothing else can be accessing the descendant
/// flows. This is enforced by the fact that we have a mutable `FlowRef`, which only flow
/// construction is allowed to possess.
fn set_absolute_descendants(&mut self, abs_descendants: AbsDescendants) {
let this = self.clone();
let block = self.as_block();
block.base.abs_descendants = abs_descendants;
for descendant_link in block.base.abs_descendants.iter() {
let base = mut_base(descendant_link);
base.absolute_cb.set(this.clone());
}
}
}
/// A link to a flow's containing block.
///
/// This cannot safely be a `Flow` pointer because this is a pointer *up* the tree, not *down* the
/// tree. A pointer up the tree is unsafe during layout because it can be used to access a node
/// with an immutable reference while that same node is being laid out, causing possible iterator
/// invalidation and use-after-free.
///
/// FIXME(pcwalton): I think this would be better with a borrow flag instead of `unsafe`.
pub struct ContainingBlockLink {
/// The pointer up to the containing block.
link: Option<FlowRef>,
}
impl ContainingBlockLink {
fn new() -> ContainingBlockLink {
ContainingBlockLink {
link: None,
}
}
fn set(&mut self, link: FlowRef) {
self.link = Some(link)
}
pub unsafe fn get<'a>(&'a mut self) -> &'a mut Option<FlowRef> {
&mut self.link
}
#[inline]
pub fn generated_containing_block_rect(&mut self) -> LogicalRect<Au> {
match self.link {
None => panic!("haven't done it"),
Some(ref mut link) => link.generated_containing_block_rect(),
}
}
}
Fix bogus transmute.
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo's experimental layout system builds a tree of `Flow` and `Fragment` objects and solves
//! layout constraints to obtain positions and display attributes of tree nodes. Positions are
//! computed in several tree traversals driven by the fundamental data dependencies required by
/// inline and block layout.
///
/// Flows are interior nodes in the layout tree and correspond closely to *flow contexts* in the
/// CSS specification. Flows are responsible for positioning their child flow contexts and
/// fragments. Flows have purpose-specific fields, such as auxiliary line structs, out-of-flow
/// child lists, and so on.
///
/// Currently, the important types of flows are:
///
/// * `BlockFlow`: A flow that establishes a block context. It has several child flows, each of
/// which are positioned according to block formatting context rules (CSS block boxes). Block
/// flows also contain a single box to represent their rendered borders, padding, etc.
/// The BlockFlow at the root of the tree has special behavior: it stretches to the boundaries of
/// the viewport.
///
/// * `InlineFlow`: A flow that establishes an inline context. It has a flat list of child
/// fragments/flows that are subject to inline layout and line breaking and structs to represent
/// line breaks and mapping to CSS boxes, for the purpose of handling `getClientRects()` and
/// similar methods.
use css::node_style::StyledNode;
use block::BlockFlow;
use context::LayoutContext;
use display_list_builder::{DisplayListBuildingResult, DisplayListResult};
use display_list_builder::{NoDisplayListBuildingResult, StackingContextResult};
use floats::Floats;
use flow_list::{FlowList, FlowListIterator, MutFlowListIterator};
use flow_ref::FlowRef;
use fragment::{Fragment, FragmentBoundsIterator, TableRowFragment, TableCellFragment};
use incremental::{RECONSTRUCT_FLOW, REFLOW, REFLOW_OUT_OF_FLOW, RestyleDamage};
use inline::InlineFlow;
use model::{CollapsibleMargins, IntrinsicISizes, MarginCollapseInfo};
use parallel::FlowParallelInfo;
use table::{ColumnComputedInlineSize, ColumnIntrinsicInlineSize, TableFlow};
use table_caption::TableCaptionFlow;
use table_cell::TableCellFlow;
use table_colgroup::TableColGroupFlow;
use table_row::TableRowFlow;
use table_rowgroup::TableRowGroupFlow;
use table_wrapper::TableWrapperFlow;
use wrapper::ThreadSafeLayoutNode;
use geom::{Point2D, Rect, Size2D};
use serialize::{Encoder, Encodable};
use servo_msg::compositor_msg::LayerId;
use servo_util::geometry::Au;
use servo_util::logical_geometry::WritingMode;
use servo_util::logical_geometry::{LogicalRect, LogicalSize};
use std::mem;
use std::num::Zero;
use std::fmt;
use std::iter::Zip;
use std::raw;
use std::sync::atomic::{AtomicUint, SeqCst};
use std::slice::MutItems;
use style::computed_values::{clear, float, position, text_align};
use style::ComputedValues;
use sync::Arc;
/// Virtual methods that make up a float context.
///
/// Note that virtual methods have a cost; we should not overuse them in Servo. Consider adding
/// methods to `ImmutableFlowUtils` or `MutableFlowUtils` before adding more methods here.
pub trait Flow: fmt::Show + ToString + Sync {
// RTTI
//
// TODO(pcwalton): Use Rust's RTTI, once that works.
/// Returns the class of flow that this is.
fn class(&self) -> FlowClass;
/// If this is a block flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_block<'a>(&'a self) -> &'a BlockFlow {
panic!("called as_immutable_block() on a non-block flow")
}
/// If this is a block flow, returns the underlying object. Fails otherwise.
fn as_block<'a>(&'a mut self) -> &'a mut BlockFlow {
debug!("called as_block() on a flow of type {}", self.class());
panic!("called as_block() on a non-block flow")
}
/// If this is an inline flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_inline<'a>(&'a self) -> &'a InlineFlow {
panic!("called as_immutable_inline() on a non-inline flow")
}
/// If this is an inline flow, returns the underlying object. Fails otherwise.
fn as_inline<'a>(&'a mut self) -> &'a mut InlineFlow {
panic!("called as_inline() on a non-inline flow")
}
/// If this is a table wrapper flow, returns the underlying object. Fails otherwise.
fn as_table_wrapper<'a>(&'a mut self) -> &'a mut TableWrapperFlow {
panic!("called as_table_wrapper() on a non-tablewrapper flow")
}
/// If this is a table wrapper flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_table_wrapper<'a>(&'a self) -> &'a TableWrapperFlow {
panic!("called as_immutable_table_wrapper() on a non-tablewrapper flow")
}
/// If this is a table flow, returns the underlying object. Fails otherwise.
fn as_table<'a>(&'a mut self) -> &'a mut TableFlow {
panic!("called as_table() on a non-table flow")
}
/// If this is a table flow, returns the underlying object, borrowed immutably. Fails otherwise.
fn as_immutable_table<'a>(&'a self) -> &'a TableFlow {
panic!("called as_table() on a non-table flow")
}
/// If this is a table colgroup flow, returns the underlying object. Fails otherwise.
fn as_table_colgroup<'a>(&'a mut self) -> &'a mut TableColGroupFlow {
panic!("called as_table_colgroup() on a non-tablecolgroup flow")
}
/// If this is a table rowgroup flow, returns the underlying object. Fails otherwise.
fn as_table_rowgroup<'a>(&'a mut self) -> &'a mut TableRowGroupFlow {
panic!("called as_table_rowgroup() on a non-tablerowgroup flow")
}
/// If this is a table rowgroup flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_table_rowgroup<'a>(&'a self) -> &'a TableRowGroupFlow {
panic!("called as_table_rowgroup() on a non-tablerowgroup flow")
}
/// If this is a table row flow, returns the underlying object. Fails otherwise.
fn as_table_row<'a>(&'a mut self) -> &'a mut TableRowFlow {
panic!("called as_table_row() on a non-tablerow flow")
}
/// If this is a table row flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_table_row<'a>(&'a self) -> &'a TableRowFlow {
panic!("called as_table_row() on a non-tablerow flow")
}
/// If this is a table cell flow, returns the underlying object. Fails otherwise.
fn as_table_caption<'a>(&'a mut self) -> &'a mut TableCaptionFlow {
panic!("called as_table_caption() on a non-tablecaption flow")
}
/// If this is a table cell flow, returns the underlying object. Fails otherwise.
fn as_table_cell<'a>(&'a mut self) -> &'a mut TableCellFlow {
panic!("called as_table_cell() on a non-tablecell flow")
}
/// If this is a table cell flow, returns the underlying object, borrowed immutably. Fails
/// otherwise.
fn as_immutable_table_cell<'a>(&'a self) -> &'a TableCellFlow {
panic!("called as_table_cell() on a non-tablecell flow")
}
/// If this is a table row, table rowgroup, or table flow, returns column intrinsic
/// inline-sizes. Fails otherwise.
fn column_intrinsic_inline_sizes<'a>(&'a mut self) -> &'a mut Vec<ColumnIntrinsicInlineSize> {
panic!("called column_intrinsic_inline_sizes() on non-table flow")
}
/// If this is a table row, table rowgroup, or table flow, returns column computed
/// inline-sizes. Fails otherwise.
fn column_computed_inline_sizes<'a>(&'a mut self) -> &'a mut Vec<ColumnComputedInlineSize> {
panic!("called column_intrinsic_inline_sizes() on non-table flow")
}
// Main methods
/// Pass 1 of reflow: computes minimum and preferred inline-sizes.
///
/// Recursively (bottom-up) determine the flow's minimum and preferred inline-sizes. When
/// called on this flow, all child flows have had their minimum and preferred inline-sizes set.
/// This function must decide minimum/preferred inline-sizes based on its children's inline-
/// sizes and the dimensions of any boxes it is responsible for flowing.
fn bubble_inline_sizes(&mut self) {
panic!("bubble_inline_sizes not yet implemented")
}
/// Pass 2 of reflow: computes inline-size.
fn assign_inline_sizes(&mut self, _ctx: &LayoutContext) {
panic!("assign_inline_sizes not yet implemented")
}
/// Pass 3a of reflow: computes block-size.
fn assign_block_size<'a>(&mut self, _ctx: &'a LayoutContext<'a>) {
panic!("assign_block_size not yet implemented")
}
/// If this is a float, places it. The default implementation does nothing.
fn place_float_if_applicable<'a>(&mut self, _: &'a LayoutContext<'a>) {}
/// Assigns block-sizes in-order; or, if this is a float, places the float. The default
/// implementation simply assigns block-sizes if this flow is impacted by floats. Returns true
/// if this child was impacted by floats or false otherwise.
fn assign_block_size_for_inorder_child_if_necessary<'a>(&mut self,
layout_context: &'a LayoutContext<'a>)
-> bool {
let impacted = base(&*self).flags.impacted_by_floats();
if impacted {
self.assign_block_size(layout_context);
mut_base(&mut *self).restyle_damage.remove(REFLOW_OUT_OF_FLOW | REFLOW);
}
impacted
}
/// Phase 4 of reflow: computes absolute positions.
fn compute_absolute_position(&mut self) {
// The default implementation is a no-op.
}
/// Phase 5 of reflow: builds display lists.
fn build_display_list(&mut self, layout_context: &LayoutContext);
/// Perform an iteration of fragment bounds on this flow.
fn iterate_through_fragment_bounds(&self, iterator: &mut FragmentBoundsIterator);
fn compute_collapsible_block_start_margin(&mut self,
_layout_context: &mut LayoutContext,
_margin_collapse_info: &mut MarginCollapseInfo) {
// The default implementation is a no-op.
}
/// Marks this flow as the root flow. The default implementation is a no-op.
fn mark_as_root(&mut self) {}
// Note that the following functions are mostly called using static method
// dispatch, so it's ok to have them in this trait. Plus, they have
// different behaviour for different types of Flow, so they can't go into
// the Immutable / Mutable Flow Utils traits without additional casts.
/// Return true if store overflow is delayed for this flow.
///
/// Currently happens only for absolutely positioned flows.
fn is_store_overflow_delayed(&mut self) -> bool {
false
}
fn is_root(&self) -> bool {
false
}
/// The 'position' property of this flow.
fn positioning(&self) -> position::T {
position::static_
}
/// Return true if this flow has position 'fixed'.
fn is_fixed(&self) -> bool {
self.positioning() == position::fixed
}
fn is_positioned(&self) -> bool {
self.is_relatively_positioned() || base(self).flags.contains(IS_ABSOLUTELY_POSITIONED)
}
fn is_relatively_positioned(&self) -> bool {
self.positioning() == position::relative
}
/// Return true if this is the root of an absolute flow tree.
fn is_root_of_absolute_flow_tree(&self) -> bool {
false
}
/// Returns true if this is an absolute containing block.
fn is_absolute_containing_block(&self) -> bool {
false
}
/// Updates the inline position of a child flow during the assign-height traversal. At present,
/// this is only used for absolutely-positioned inline-blocks.
fn update_late_computed_inline_position_if_necessary(&mut self, inline_position: Au);
/// Updates the block position of a child flow during the assign-height traversal. At present,
/// this is only used for absolutely-positioned inline-blocks.
fn update_late_computed_block_position_if_necessary(&mut self, block_position: Au);
/// Return the dimensions of the containing block generated by this flow for absolutely-
/// positioned descendants. For block flows, this is the padding box.
///
/// NB: Do not change this `&self` to `&mut self` under any circumstances! It has security
/// implications because this can be called on parents concurrently from descendants!
fn generated_containing_block_rect(&self) -> LogicalRect<Au> {
panic!("generated_containing_block_position not yet implemented for this flow")
}
/// Returns a layer ID for the given fragment.
fn layer_id(&self, fragment_id: uint) -> LayerId {
unsafe {
let pointer: uint = mem::transmute(self);
LayerId(pointer, fragment_id)
}
}
/// Attempts to perform incremental fixup of this flow by replacing its fragment's style with
/// the new style. This can only succeed if the flow has exactly one fragment.
fn repair_style(&mut self, new_style: &Arc<ComputedValues>);
}
// Base access
#[inline(always)]
pub fn base<'a>(this: &'a Flow) -> &'a BaseFlow {
unsafe {
let obj = mem::transmute::<&'a Flow, raw::TraitObject>(this);
mem::transmute::<*mut (), &'a BaseFlow>(obj.data)
}
}
/// Iterates over the children of this immutable flow.
pub fn imm_child_iter<'a>(flow: &'a Flow) -> FlowListIterator<'a> {
base(flow).children.iter()
}
#[inline(always)]
pub fn mut_base<'a>(this: &'a mut Flow) -> &'a mut BaseFlow {
unsafe {
let obj = mem::transmute::<&'a mut Flow, raw::TraitObject>(this);
mem::transmute::<*mut (), &'a mut BaseFlow>(obj.data)
}
}
/// Iterates over the children of this flow.
pub fn child_iter<'a>(flow: &'a mut Flow) -> MutFlowListIterator<'a> {
mut_base(flow).children.iter_mut()
}
pub trait ImmutableFlowUtils {
// Convenience functions
/// Returns true if this flow is a block or a float flow.
fn is_block_like(self) -> bool;
/// Returns true if this flow is a table flow.
fn is_table(self) -> bool;
/// Returns true if this flow is a table caption flow.
fn is_table_caption(self) -> bool;
/// Returns true if this flow is a proper table child.
fn is_proper_table_child(self) -> bool;
/// Returns true if this flow is a table row flow.
fn is_table_row(self) -> bool;
/// Returns true if this flow is a table cell flow.
fn is_table_cell(self) -> bool;
/// Returns true if this flow is a table colgroup flow.
fn is_table_colgroup(self) -> bool;
/// Returns true if this flow is a table rowgroup flow.
fn is_table_rowgroup(self) -> bool;
/// Returns true if this flow is one of table-related flows.
fn is_table_kind(self) -> bool;
/// Returns true if anonymous flow is needed between this flow and child flow.
fn need_anonymous_flow(self, child: &Flow) -> bool;
/// Generates missing child flow of this flow.
fn generate_missing_child_flow(self, node: &ThreadSafeLayoutNode) -> FlowRef;
/// Returns true if this flow has no children.
fn is_leaf(self) -> bool;
/// Returns the number of children that this flow possesses.
fn child_count(self) -> uint;
/// Return true if this flow is a Block Container.
fn is_block_container(self) -> bool;
/// Returns true if this flow is a block flow.
fn is_block_flow(self) -> bool;
/// Returns true if this flow is an inline flow.
fn is_inline_flow(self) -> bool;
/// Dumps the flow tree for debugging.
fn dump(self);
/// Dumps the flow tree for debugging, with a prefix to indicate that we're at the given level.
fn dump_with_level(self, level: uint);
}
pub trait MutableFlowUtils {
// Traversals
/// Traverses the tree in preorder.
fn traverse_preorder<T:PreorderFlowTraversal>(self, traversal: &T);
/// Traverses the tree in postorder.
fn traverse_postorder<T:PostorderFlowTraversal>(self, traversal: &T);
// Mutators
/// Computes the overflow region for this flow.
fn store_overflow(self, _: &LayoutContext);
/// Gathers static block-offsets bubbled up by kids.
///
/// This essentially gives us offsets of all absolutely positioned direct descendants and all
/// fixed descendants, in tree order.
///
/// This is called in a bottom-up traversal (specifically, the assign-block-size traversal).
/// So, kids have their flow origin already set. In the case of absolute flow kids, they have
/// their hypothetical box position already set.
fn collect_static_block_offsets_from_children(self);
}
pub trait MutableOwnedFlowUtils {
/// Set absolute descendants for this flow.
///
/// Set this flow as the Containing Block for all the absolute descendants.
fn set_absolute_descendants(&mut self, abs_descendants: AbsDescendants);
}
#[deriving(Encodable, PartialEq, Show)]
pub enum FlowClass {
BlockFlowClass,
InlineFlowClass,
ListItemFlowClass,
TableWrapperFlowClass,
TableFlowClass,
TableColGroupFlowClass,
TableRowGroupFlowClass,
TableRowFlowClass,
TableCaptionFlowClass,
TableCellFlowClass,
}
/// A top-down traversal.
pub trait PreorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns true if this node must be processed in-order. If this returns false,
/// we skip the operation for this node, but continue processing the descendants.
/// This is called *after* parent nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
}
/// A bottom-up traversal, with a optional in-order pass.
pub trait PostorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns false if this node must be processed in-order. If this returns false, we skip the
/// operation for this node, but continue processing the ancestors. This is called *after*
/// child nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
}
bitflags! {
#[doc = "Flags used in flows."]
flags FlowFlags: u16 {
// floated descendants flags
#[doc = "Whether this flow has descendants that float left in the same block formatting"]
#[doc = "context."]
const HAS_LEFT_FLOATED_DESCENDANTS = 0b0000_0000_0000_0001,
#[doc = "Whether this flow has descendants that float right in the same block formatting"]
#[doc = "context."]
const HAS_RIGHT_FLOATED_DESCENDANTS = 0b0000_0000_0000_0010,
#[doc = "Whether this flow is impacted by floats to the left in the same block formatting"]
#[doc = "context (i.e. its height depends on some prior flows with `float: left`)."]
const IMPACTED_BY_LEFT_FLOATS = 0b0000_0000_0000_0100,
#[doc = "Whether this flow is impacted by floats to the right in the same block"]
#[doc = "formatting context (i.e. its height depends on some prior flows with `float:"]
#[doc = "right`)."]
const IMPACTED_BY_RIGHT_FLOATS = 0b0000_0000_0000_1000,
// text align flags
#[doc = "Whether this flow contains a flow that has its own layer within the same absolute"]
#[doc = "containing block."]
const LAYERS_NEEDED_FOR_DESCENDANTS = 0b0000_0000_0001_0000,
#[doc = "Whether this flow must have its own layer. Even if this flag is not set, it might"]
#[doc = "get its own layer if it's deemed to be likely to overlap flows with their own"]
#[doc = "layer."]
const NEEDS_LAYER = 0b0000_0000_0010_0000,
#[doc = "Whether this flow is absolutely positioned. This is checked all over layout, so a"]
#[doc = "virtual call is too expensive."]
const IS_ABSOLUTELY_POSITIONED = 0b0000_0000_0100_0000,
#[doc = "Whether this flow clears to the left. This is checked all over layout, so a"]
#[doc = "virtual call is too expensive."]
const CLEARS_LEFT = 0b0000_0000_1000_0000,
#[doc = "Whether this flow clears to the right. This is checked all over layout, so a"]
#[doc = "virtual call is too expensive."]
const CLEARS_RIGHT = 0b0000_0001_0000_0000,
#[doc = "Whether this flow is left-floated. This is checked all over layout, so a"]
#[doc = "virtual call is too expensive."]
const FLOATS_LEFT = 0b0000_0010_0000_0000,
#[doc = "Whether this flow is right-floated. This is checked all over layout, so a"]
#[doc = "virtual call is too expensive."]
const FLOATS_RIGHT = 0b0000_0100_0000_0000,
#[doc = "Text alignment. \
NB: If you update this, update `TEXT_ALIGN_SHIFT` below."]
const TEXT_ALIGN = 0b0111_1000_0000_0000,
}
}
// NB: If you update this field, you must update the the floated descendants flags.
/// The bitmask of flags that represent the `has_left_floated_descendants` and
/// `has_right_floated_descendants` fields.
static HAS_FLOATED_DESCENDANTS_BITMASK: FlowFlags = FlowFlags { bits: 0b0000_0011 };
/// The number of bits we must shift off to handle the text alignment field.
///
/// NB: If you update this, update `TEXT_ALIGN` above.
static TEXT_ALIGN_SHIFT: uint = 11;
impl FlowFlags {
/// Propagates text alignment flags from an appropriate parent flow per CSS 2.1.
///
/// FIXME(#2265, pcwalton): It would be cleaner and faster to make this a derived CSS property
/// `-servo-text-align-in-effect`.
pub fn propagate_text_alignment_from_parent(&mut self, parent_flags: FlowFlags) {
self.set_text_align_override(parent_flags);
}
#[inline]
pub fn text_align(self) -> text_align::T {
FromPrimitive::from_u16((self & TEXT_ALIGN).bits() >> TEXT_ALIGN_SHIFT).unwrap()
}
#[inline]
pub fn set_text_align(&mut self, value: text_align::T) {
*self = (*self & !TEXT_ALIGN) |
FlowFlags::from_bits(value as u16 << TEXT_ALIGN_SHIFT).unwrap();
}
#[inline]
pub fn set_text_align_override(&mut self, parent: FlowFlags) {
self.insert(parent & TEXT_ALIGN);
}
#[inline]
pub fn union_floated_descendants_flags(&mut self, other: FlowFlags) {
self.insert(other & HAS_FLOATED_DESCENDANTS_BITMASK);
}
#[inline]
pub fn impacted_by_floats(&self) -> bool {
self.contains(IMPACTED_BY_LEFT_FLOATS) || self.contains(IMPACTED_BY_RIGHT_FLOATS)
}
#[inline]
pub fn set(&mut self, flags: FlowFlags, value: bool) {
if value {
self.insert(flags);
} else {
self.remove(flags);
}
}
#[inline]
pub fn float_kind(&self) -> float::T {
if self.contains(FLOATS_LEFT) {
float::left
} else if self.contains(FLOATS_RIGHT) {
float::right
} else {
float::none
}
}
#[inline]
pub fn is_float(&self) -> bool {
self.contains(FLOATS_LEFT) || self.contains(FLOATS_RIGHT)
}
#[inline]
pub fn clears_floats(&self) -> bool {
self.contains(CLEARS_LEFT) || self.contains(CLEARS_RIGHT)
}
}
/// The Descendants of a flow.
///
/// Also, details about their position wrt this flow.
#[deriving(Clone)]
pub struct Descendants {
/// Links to every descendant. This must be private because it is unsafe to leak `FlowRef`s to
/// layout.
descendant_links: Vec<FlowRef>,
/// Static block-direction offsets of all descendants from the start of this flow box.
pub static_block_offsets: Vec<Au>,
}
impl Descendants {
pub fn new() -> Descendants {
Descendants {
descendant_links: Vec::new(),
static_block_offsets: Vec::new(),
}
}
pub fn len(&self) -> uint {
self.descendant_links.len()
}
pub fn is_empty(&self) -> bool {
self.descendant_links.is_empty()
}
pub fn push(&mut self, given_descendant: FlowRef) {
self.descendant_links.push(given_descendant);
}
/// Push the given descendants on to the existing descendants.
///
/// Ignore any static y offsets, because they are None before layout.
pub fn push_descendants(&mut self, given_descendants: Descendants) {
for elem in given_descendants.descendant_links.into_iter() {
self.descendant_links.push(elem);
}
}
/// Return an iterator over the descendant flows.
pub fn iter<'a>(&'a mut self) -> DescendantIter<'a> {
DescendantIter {
iter: self.descendant_links.slice_from_mut(0).iter_mut(),
}
}
/// Return an iterator over (descendant, static y offset).
pub fn iter_with_offset<'a>(&'a mut self) -> DescendantOffsetIter<'a> {
let descendant_iter = DescendantIter {
iter: self.descendant_links.slice_from_mut(0).iter_mut(),
};
descendant_iter.zip(self.static_block_offsets.slice_from_mut(0).iter_mut())
}
}
pub type AbsDescendants = Descendants;
pub struct DescendantIter<'a> {
iter: MutItems<'a, FlowRef>,
}
impl<'a> Iterator<&'a mut Flow + 'a> for DescendantIter<'a> {
fn next(&mut self) -> Option<&'a mut Flow + 'a> {
self.iter.next().map(|flow| &mut **flow)
}
}
pub type DescendantOffsetIter<'a> = Zip<DescendantIter<'a>, MutItems<'a, Au>>;
/// Information needed to compute absolute (i.e. viewport-relative) flow positions (not to be
/// confused with absolutely-positioned flows).
#[deriving(Encodable)]
pub struct AbsolutePositionInfo {
/// The size of the containing block for relatively-positioned descendants.
pub relative_containing_block_size: LogicalSize<Au>,
/// The position of the absolute containing block relative to the nearest ancestor stacking
/// context. If the absolute containing block establishes the stacking context for this flow,
/// and this flow is not itself absolutely-positioned, then this is (0, 0).
pub stacking_relative_position_of_absolute_containing_block: Point2D<Au>,
/// Whether the absolute containing block forces positioned descendants to be layerized.
///
/// FIXME(pcwalton): Move into `FlowFlags`.
pub layers_needed_for_positioned_flows: bool,
}
impl AbsolutePositionInfo {
pub fn new(writing_mode: WritingMode) -> AbsolutePositionInfo {
// FIXME(pcwalton): The initial relative containing block-size should be equal to the size
// of the root layer.
AbsolutePositionInfo {
relative_containing_block_size: LogicalSize::zero(writing_mode),
stacking_relative_position_of_absolute_containing_block: Zero::zero(),
layers_needed_for_positioned_flows: false,
}
}
}
/// Data common to all flows.
pub struct BaseFlow {
/// NB: Must be the first element.
///
/// The necessity of this will disappear once we have dynamically-sized types.
ref_count: AtomicUint,
pub restyle_damage: RestyleDamage,
/// The children of this flow.
pub children: FlowList,
/// Intrinsic inline sizes for this flow.
pub intrinsic_inline_sizes: IntrinsicISizes,
/// The upper left corner of the box representing this flow, relative to the box representing
/// its parent flow.
///
/// For absolute flows, this represents the position with respect to its *containing block*.
///
/// This does not include margins in the block flow direction, because those can collapse. So
/// for the block direction (usually vertical), this represents the *border box*. For the
/// inline direction (usually horizontal), this represents the *margin box*.
pub position: LogicalRect<Au>,
/// The amount of overflow of this flow, relative to the containing block. Must include all the
/// pixels of all the display list items for correct invalidation.
pub overflow: LogicalRect<Au>,
/// Data used during parallel traversals.
///
/// TODO(pcwalton): Group with other transient data to save space.
pub parallel: FlowParallelInfo,
/// The floats next to this flow.
pub floats: Floats,
/// The collapsible margins for this flow, if any.
pub collapsible_margins: CollapsibleMargins,
/// The position of this flow relative to the start of the nearest ancestor stacking context.
/// This is computed during the top-down pass of display list construction.
pub stacking_relative_position: Point2D<Au>,
/// Details about descendants with position 'absolute' or 'fixed' for which we are the
/// containing block. This is in tree order. This includes any direct children.
pub abs_descendants: AbsDescendants,
/// The inline-size of the block container of this flow. Used for computing percentage and
/// automatic values for `width`.
pub block_container_inline_size: Au,
/// The block-size of the block container of this flow, if it is an explicit size (does not
/// depend on content heights). Used for computing percentage values for `height`.
pub block_container_explicit_block_size: Option<Au>,
/// Offset wrt the nearest positioned ancestor - aka the Containing Block
/// for any absolutely positioned elements.
pub absolute_static_i_offset: Au,
/// Offset wrt the Initial Containing Block.
pub fixed_static_i_offset: Au,
/// Reference to the Containing Block, if this flow is absolutely positioned.
pub absolute_cb: ContainingBlockLink,
/// Information needed to compute absolute (i.e. viewport-relative) flow positions (not to be
/// confused with absolutely-positioned flows).
///
/// FIXME(pcwalton): Merge with `absolute_static_i_offset` and `fixed_static_i_offset` above?
pub absolute_position_info: AbsolutePositionInfo,
/// The clipping rectangle for this flow and its descendants, in layer coordinates.
///
/// TODO(pcwalton): When we have `border-radius` this will need to at least support rounded
/// rectangles.
pub clip_rect: Rect<Au>,
/// The results of display list building for this flow.
pub display_list_building_result: DisplayListBuildingResult,
/// The writing mode for this flow.
pub writing_mode: WritingMode,
/// Various flags for flows, tightly packed to save space.
pub flags: FlowFlags,
}
impl fmt::Show for BaseFlow {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"@ {}, CC {}, ADC {}",
self.position,
self.parallel.children_count.load(SeqCst),
self.abs_descendants.len())
}
}
impl<E, S: Encoder<E>> Encodable<S, E> for BaseFlow {
fn encode(&self, e: &mut S) -> Result<(), E> {
e.emit_struct("base", 0, |e| {
try!(e.emit_struct_field("id", 0, |e| self.debug_id().encode(e)))
try!(e.emit_struct_field("stacking_relative_position",
1,
|e| self.stacking_relative_position.encode(e)))
try!(e.emit_struct_field("intrinsic_inline_sizes",
2,
|e| self.intrinsic_inline_sizes.encode(e)))
try!(e.emit_struct_field("position", 3, |e| self.position.encode(e)))
e.emit_struct_field("children", 4, |e| {
e.emit_seq(self.children.len(), |e| {
for (i, c) in self.children.iter().enumerate() {
try!(e.emit_seq_elt(i, |e| {
try!(e.emit_struct("flow", 0, |e| {
try!(e.emit_struct_field("class", 0, |e| c.class().encode(e)))
e.emit_struct_field("data", 1, |e| {
match c.class() {
BlockFlowClass => c.as_immutable_block().encode(e),
InlineFlowClass => c.as_immutable_inline().encode(e),
TableFlowClass => c.as_immutable_table().encode(e),
TableWrapperFlowClass => c.as_immutable_table_wrapper().encode(e),
TableRowGroupFlowClass => c.as_immutable_table_rowgroup().encode(e),
TableRowFlowClass => c.as_immutable_table_row().encode(e),
TableCellFlowClass => c.as_immutable_table_cell().encode(e),
_ => { Ok(()) } // TODO: Support captions
}
})
}))
Ok(())
}))
}
Ok(())
})
})
})
}
}
#[unsafe_destructor]
impl Drop for BaseFlow {
fn drop(&mut self) {
if self.ref_count.load(SeqCst) != 0 {
panic!("Flow destroyed before its ref count hit zero—this is unsafe!")
}
}
}
/// Whether a base flow should be forced to be nonfloated. This can affect e.g. `TableFlow`, which
/// is never floated because the table wrapper flow is the floated one.
#[deriving(Clone, PartialEq)]
pub enum ForceNonfloatedFlag {
/// The flow should be floated if the node has a `float` property.
FloatIfNecessary,
/// The flow should be forced to be nonfloated.
ForceNonfloated,
}
impl BaseFlow {
#[inline]
pub fn new(node: Option<ThreadSafeLayoutNode>,
writing_mode: WritingMode,
force_nonfloated: ForceNonfloatedFlag)
-> BaseFlow {
let mut flags = FlowFlags::empty();
match node {
None => {}
Some(node) => {
let node_style = node.style();
match node_style.get_box().position {
position::absolute | position::fixed => {
flags.insert(IS_ABSOLUTELY_POSITIONED)
}
_ => {}
}
if force_nonfloated == FloatIfNecessary {
match node_style.get_box().float {
float::none => {}
float::left => flags.insert(FLOATS_LEFT),
float::right => flags.insert(FLOATS_RIGHT),
}
}
match node_style.get_box().clear {
clear::none => {}
clear::left => flags.insert(CLEARS_LEFT),
clear::right => flags.insert(CLEARS_RIGHT),
clear::both => {
flags.insert(CLEARS_LEFT);
flags.insert(CLEARS_RIGHT);
}
}
}
}
// New flows start out as fully damaged.
let mut damage = RestyleDamage::all();
damage.remove(RECONSTRUCT_FLOW);
BaseFlow {
ref_count: AtomicUint::new(1),
restyle_damage: damage,
children: FlowList::new(),
intrinsic_inline_sizes: IntrinsicISizes::new(),
position: LogicalRect::zero(writing_mode),
overflow: LogicalRect::zero(writing_mode),
parallel: FlowParallelInfo::new(),
floats: Floats::new(writing_mode),
collapsible_margins: CollapsibleMargins::new(),
stacking_relative_position: Zero::zero(),
abs_descendants: Descendants::new(),
absolute_static_i_offset: Au(0),
fixed_static_i_offset: Au(0),
block_container_inline_size: Au(0),
block_container_explicit_block_size: None,
absolute_cb: ContainingBlockLink::new(),
display_list_building_result: NoDisplayListBuildingResult,
absolute_position_info: AbsolutePositionInfo::new(writing_mode),
clip_rect: Rect(Zero::zero(), Size2D(Au(0), Au(0))),
flags: flags,
writing_mode: writing_mode,
}
}
pub fn child_iter<'a>(&'a mut self) -> MutFlowListIterator<'a> {
self.children.iter_mut()
}
pub unsafe fn ref_count<'a>(&'a self) -> &'a AtomicUint {
&self.ref_count
}
pub fn debug_id(&self) -> uint {
let p = self as *const _;
p as uint
}
/// Ensures that all display list items generated by this flow are within the flow's overflow
/// rect. This should only be used for debugging.
pub fn validate_display_list_geometry(&self) {
let position_with_overflow = self.position.union(&self.overflow);
let bounds = Rect(self.stacking_relative_position,
Size2D(position_with_overflow.size.inline,
position_with_overflow.size.block));
let all_items = match self.display_list_building_result {
NoDisplayListBuildingResult => Vec::new(),
StackingContextResult(ref stacking_context) => {
stacking_context.display_list.all_display_items()
}
DisplayListResult(ref display_list) => display_list.all_display_items(),
};
for item in all_items.iter() {
let paint_bounds = match item.base().bounds.intersection(&item.base().clip_rect) {
None => continue,
Some(rect) => rect,
};
if paint_bounds.is_empty() {
continue;
}
if bounds.union(&paint_bounds) != bounds {
error!("DisplayList item {} outside of Flow overflow ({})", item, paint_bounds);
}
}
}
/// Returns the position of the given fragment relative to the start of the nearest ancestor
/// stacking context. The fragment must be a child fragment of this flow.
pub fn stacking_relative_position_of_child_fragment(&self, fragment: &Fragment)
-> Point2D<Au> {
let relative_offset =
fragment.relative_position(&self
.absolute_position_info
.relative_containing_block_size);
self.stacking_relative_position.add_size(&relative_offset.to_physical(self.writing_mode))
}
}
impl<'a> ImmutableFlowUtils for &'a Flow + 'a {
/// Returns true if this flow is a block flow.
fn is_block_like(self) -> bool {
match self.class() {
BlockFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a proper table child.
/// 'Proper table child' is defined as table-row flow, table-rowgroup flow,
/// table-column-group flow, or table-caption flow.
fn is_proper_table_child(self) -> bool {
match self.class() {
TableRowFlowClass | TableRowGroupFlowClass |
TableColGroupFlowClass | TableCaptionFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table row flow.
fn is_table_row(self) -> bool {
match self.class() {
TableRowFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table cell flow.
fn is_table_cell(self) -> bool {
match self.class() {
TableCellFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table colgroup flow.
fn is_table_colgroup(self) -> bool {
match self.class() {
TableColGroupFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table flow.
fn is_table(self) -> bool {
match self.class() {
TableFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table caption flow.
fn is_table_caption(self) -> bool {
match self.class() {
TableCaptionFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is a table rowgroup flow.
fn is_table_rowgroup(self) -> bool {
match self.class() {
TableRowGroupFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is one of table-related flows.
fn is_table_kind(self) -> bool {
match self.class() {
TableWrapperFlowClass | TableFlowClass |
TableColGroupFlowClass | TableRowGroupFlowClass |
TableRowFlowClass | TableCaptionFlowClass | TableCellFlowClass => true,
_ => false,
}
}
/// Returns true if anonymous flow is needed between this flow and child flow.
/// Spec: http://www.w3.org/TR/CSS21/tables.html#anonymous-boxes
fn need_anonymous_flow(self, child: &Flow) -> bool {
match self.class() {
TableFlowClass => !child.is_proper_table_child(),
TableRowGroupFlowClass => !child.is_table_row(),
TableRowFlowClass => !child.is_table_cell(),
_ => false
}
}
/// Generates missing child flow of this flow.
fn generate_missing_child_flow(self, node: &ThreadSafeLayoutNode) -> FlowRef {
let flow = match self.class() {
TableFlowClass | TableRowGroupFlowClass => {
let fragment = Fragment::new_anonymous_table_fragment(node, TableRowFragment);
box TableRowFlow::from_node_and_fragment(node, fragment) as Box<Flow>
},
TableRowFlowClass => {
let fragment = Fragment::new_anonymous_table_fragment(node, TableCellFragment);
box TableCellFlow::from_node_and_fragment(node, fragment) as Box<Flow>
},
_ => {
panic!("no need to generate a missing child")
}
};
FlowRef::new(flow)
}
/// Returns true if this flow has no children.
fn is_leaf(self) -> bool {
base(self).children.len() == 0
}
/// Returns the number of children that this flow possesses.
fn child_count(self) -> uint {
base(self).children.len()
}
/// Return true if this flow is a Block Container.
///
/// Except for table fragments and replaced elements, block-level fragments (`BlockFlow`) are
/// also block container fragments.
/// Non-replaced inline blocks and non-replaced table cells are also block
/// containers.
fn is_block_container(self) -> bool {
match self.class() {
// TODO: Change this when inline-blocks are supported.
BlockFlowClass | TableCaptionFlowClass | TableCellFlowClass => {
// FIXME: Actually check the type of the node
self.child_count() != 0
}
_ => false,
}
}
/// Returns true if this flow is a block flow.
fn is_block_flow(self) -> bool {
match self.class() {
BlockFlowClass => true,
_ => false,
}
}
/// Returns true if this flow is an inline flow.
fn is_inline_flow(self) -> bool {
match self.class() {
InlineFlowClass => true,
_ => false,
}
}
/// Dumps the flow tree for debugging.
fn dump(self) {
self.dump_with_level(0)
}
/// Dumps the flow tree for debugging, with a prefix to indicate that we're at the given level.
fn dump_with_level(self, level: uint) {
let mut indent = String::new();
for _ in range(0, level) {
indent.push_str("| ")
}
println!("{}+ {}", indent, self.to_string());
for kid in imm_child_iter(self) {
kid.dump_with_level(level + 1)
}
}
}
impl<'a> MutableFlowUtils for &'a mut Flow + 'a {
/// Traverses the tree in preorder.
fn traverse_preorder<T:PreorderFlowTraversal>(self, traversal: &T) {
if traversal.should_process(self) {
traversal.process(self);
}
for kid in child_iter(self) {
kid.traverse_preorder(traversal);
}
}
/// Traverses the tree in postorder.
fn traverse_postorder<T:PostorderFlowTraversal>(self, traversal: &T) {
for kid in child_iter(self) {
kid.traverse_postorder(traversal);
}
if traversal.should_process(self) {
traversal.process(self)
}
}
/// Calculate and set overflow for current flow.
///
/// CSS Section 11.1
/// This is the union of rectangles of the flows for which we define the
/// Containing Block.
///
/// Assumption: This is called in a bottom-up traversal, so kids' overflows have
/// already been set.
/// Assumption: Absolute descendants have had their overflow calculated.
fn store_overflow(self, _: &LayoutContext) {
let my_position = mut_base(self).position;
// FIXME(pcwalton): We should calculate overflow on a per-fragment basis, because their
// styles can affect overflow regions. Consider `box-shadow`, `outline`, etc.--anything
// that can draw outside the border box. For now we assume overflow is the border box, but
// that is wrong.
let mut overflow = my_position;
if self.is_block_container() {
for kid in child_iter(self) {
if kid.is_store_overflow_delayed() {
// Absolute flows will be handled by their CB. If we are
// their CB, they will show up in `abs_descendants`.
continue;
}
let mut kid_overflow = base(kid).overflow;
kid_overflow = kid_overflow.translate(&my_position.start);
overflow = overflow.union(&kid_overflow)
}
// FIXME(#2004, pcwalton): This is wrong for `position: fixed`.
for descendant_link in mut_base(self).abs_descendants.iter() {
let mut kid_overflow = base(descendant_link).overflow;
kid_overflow = kid_overflow.translate(&my_position.start);
overflow = overflow.union(&kid_overflow)
}
}
mut_base(self).overflow = overflow;
}
/// Collect and update static y-offsets bubbled up by kids.
///
/// This would essentially give us offsets of all absolutely positioned
/// direct descendants and all fixed descendants, in tree order.
///
/// Assume that this is called in a bottom-up traversal (specifically, the
/// assign-block-size traversal). So, kids have their flow origin already set.
/// In the case of absolute flow kids, they have their hypothetical box
/// position already set.
fn collect_static_block_offsets_from_children(self) {
let mut absolute_descendant_block_offsets = Vec::new();
for kid in mut_base(self).child_iter() {
let mut gives_absolute_offsets = true;
if kid.is_block_like() {
let kid_block = kid.as_block();
if kid_block.is_fixed() || kid_block.base.flags.contains(IS_ABSOLUTELY_POSITIONED) {
// It won't contribute any offsets for descendants because it would be the
// containing block for them.
gives_absolute_offsets = false;
// Give the offset for the current absolute flow alone.
absolute_descendant_block_offsets.push(
kid_block.get_hypothetical_block_start_edge());
} else if kid_block.is_positioned() {
// It won't contribute any offsets because it would be the containing block
// for the descendants.
gives_absolute_offsets = false;
}
}
if gives_absolute_offsets {
let kid_base = mut_base(kid);
// Avoid copying the offset vector.
let offsets = mem::replace(&mut kid_base.abs_descendants.static_block_offsets,
Vec::new());
// Consume all the static block-offsets bubbled up by kids.
for block_offset in offsets.into_iter() {
// The offsets are with respect to the kid flow's fragment. Translate them to
// that of the current flow.
absolute_descendant_block_offsets.push(
block_offset + kid_base.position.start.b);
}
}
}
mut_base(self).abs_descendants.static_block_offsets = absolute_descendant_block_offsets
}
}
impl MutableOwnedFlowUtils for FlowRef {
/// Set absolute descendants for this flow.
///
/// Set yourself as the Containing Block for all the absolute descendants.
///
/// This is called during flow construction, so nothing else can be accessing the descendant
/// flows. This is enforced by the fact that we have a mutable `FlowRef`, which only flow
/// construction is allowed to possess.
fn set_absolute_descendants(&mut self, abs_descendants: AbsDescendants) {
let this = self.clone();
let block = self.as_block();
block.base.abs_descendants = abs_descendants;
for descendant_link in block.base.abs_descendants.iter() {
let base = mut_base(descendant_link);
base.absolute_cb.set(this.clone());
}
}
}
/// A link to a flow's containing block.
///
/// This cannot safely be a `Flow` pointer because this is a pointer *up* the tree, not *down* the
/// tree. A pointer up the tree is unsafe during layout because it can be used to access a node
/// with an immutable reference while that same node is being laid out, causing possible iterator
/// invalidation and use-after-free.
///
/// FIXME(pcwalton): I think this would be better with a borrow flag instead of `unsafe`.
pub struct ContainingBlockLink {
/// The pointer up to the containing block.
link: Option<FlowRef>,
}
impl ContainingBlockLink {
fn new() -> ContainingBlockLink {
ContainingBlockLink {
link: None,
}
}
fn set(&mut self, link: FlowRef) {
self.link = Some(link)
}
pub unsafe fn get<'a>(&'a mut self) -> &'a mut Option<FlowRef> {
&mut self.link
}
#[inline]
pub fn generated_containing_block_rect(&mut self) -> LogicalRect<Au> {
match self.link {
None => panic!("haven't done it"),
Some(ref mut link) => link.generated_containing_block_rect(),
}
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[macro_use]
extern crate lazy_static;
#[cfg(feature = "bindgen")]
extern crate bindgen;
#[cfg(feature = "bindgen")]
extern crate log;
#[cfg(feature = "bindgen")]
extern crate regex;
#[cfg(feature = "bindgen")]
extern crate toml;
extern crate walkdir;
use std::env;
use std::path::Path;
use std::process::{Command, exit};
use walkdir::WalkDir;
#[cfg(feature = "gecko")]
mod build_gecko;
#[cfg(not(feature = "gecko"))]
mod build_gecko {
pub fn generate() {}
}
#[cfg(windows)]
fn find_python() -> String {
if Command::new("python2.7.exe").arg("--version").output().is_ok() {
return "python2.7.exe".to_owned();
}
if Command::new("python27.exe").arg("--version").output().is_ok() {
return "python27.exe".to_owned();
}
if Command::new("python.exe").arg("--version").output().is_ok() {
return "python.exe".to_owned();
}
panic!(concat!("Can't find python (tried python2.7.exe, python27.exe, and python.exe)! ",
"Try fixing PATH or setting the PYTHON env var"));
}
#[cfg(not(windows))]
fn find_python() -> String {
if Command::new("python2.7").arg("--version").output().unwrap().status.success() {
"python2.7"
} else {
"python"
}.to_owned()
}
lazy_static! {
pub static ref PYTHON: String = env::var("PYTHON").ok().unwrap_or_else(find_python);
}
fn generate_properties() {
for entry in WalkDir::new("properties") {
let entry = entry.unwrap();
match entry.path().extension().and_then(|e| e.to_str()) {
Some("mako") | Some("rs") | Some("py") | Some("zip") => {
println!("cargo:rerun-if-changed={}", entry.path().display());
}
_ => {}
}
}
let script = Path::new(&env::var_os("CARGO_MANIFEST_DIR").unwrap())
.join("properties").join("build.py");
let product = if cfg!(feature = "gecko") { "gecko" } else { "servo" };
let status = Command::new(&*PYTHON)
.arg(&script)
.arg(product)
.arg("style-crate")
.envs(if std::mem::size_of::<Option<bool>>() == 1 {
// FIXME: remove this envs() call
// and make unconditional code that depends on RUSTC_HAS_PR45225
// once Firefox requires Rust 1.23+
// https://github.com/rust-lang/rust/pull/45225
vec![("RUSTC_HAS_PR45225", "1")]
} else {
vec![]
})
.status()
.unwrap();
if !status.success() {
exit(1)
}
}
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:out_dir={}", env::var("OUT_DIR").unwrap());
generate_properties();
build_gecko::generate();
}
Auto merge of #19540 - servo:style-features, r=emilio
Check that 'style' is compiled in a supported configuration.
Provide an explanatory error message when the compilation would fail because of missing or duplicate items.
<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/servo/19540)
<!-- Reviewable:end -->
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[macro_use]
extern crate lazy_static;
#[cfg(feature = "bindgen")]
extern crate bindgen;
#[cfg(feature = "bindgen")]
extern crate log;
#[cfg(feature = "bindgen")]
extern crate regex;
#[cfg(feature = "bindgen")]
extern crate toml;
extern crate walkdir;
use std::env;
use std::path::Path;
use std::process::{Command, exit};
use walkdir::WalkDir;
#[cfg(feature = "gecko")]
mod build_gecko;
#[cfg(not(feature = "gecko"))]
mod build_gecko {
pub fn generate() {}
}
#[cfg(windows)]
fn find_python() -> String {
if Command::new("python2.7.exe").arg("--version").output().is_ok() {
return "python2.7.exe".to_owned();
}
if Command::new("python27.exe").arg("--version").output().is_ok() {
return "python27.exe".to_owned();
}
if Command::new("python.exe").arg("--version").output().is_ok() {
return "python.exe".to_owned();
}
panic!(concat!("Can't find python (tried python2.7.exe, python27.exe, and python.exe)! ",
"Try fixing PATH or setting the PYTHON env var"));
}
#[cfg(not(windows))]
fn find_python() -> String {
if Command::new("python2.7").arg("--version").output().unwrap().status.success() {
"python2.7"
} else {
"python"
}.to_owned()
}
lazy_static! {
pub static ref PYTHON: String = env::var("PYTHON").ok().unwrap_or_else(find_python);
}
fn generate_properties() {
for entry in WalkDir::new("properties") {
let entry = entry.unwrap();
match entry.path().extension().and_then(|e| e.to_str()) {
Some("mako") | Some("rs") | Some("py") | Some("zip") => {
println!("cargo:rerun-if-changed={}", entry.path().display());
}
_ => {}
}
}
let script = Path::new(&env::var_os("CARGO_MANIFEST_DIR").unwrap())
.join("properties").join("build.py");
let product = if cfg!(feature = "gecko") { "gecko" } else { "servo" };
let status = Command::new(&*PYTHON)
.arg(&script)
.arg(product)
.arg("style-crate")
.envs(if std::mem::size_of::<Option<bool>>() == 1 {
// FIXME: remove this envs() call
// and make unconditional code that depends on RUSTC_HAS_PR45225
// once Firefox requires Rust 1.23+
// https://github.com/rust-lang/rust/pull/45225
vec![("RUSTC_HAS_PR45225", "1")]
} else {
vec![]
})
.status()
.unwrap();
if !status.success() {
exit(1)
}
}
fn main() {
let gecko = cfg!(feature = "gecko");
let servo = cfg!(feature = "servo");
if !(gecko || servo) {
panic!("The style crate requires enabling one of its 'servo' or 'gecko' feature flags");
}
if gecko && servo {
panic!("The style crate does not support enabling both its 'servo' or 'gecko' \
feature flags at the same time.");
}
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:out_dir={}", env::var("OUT_DIR").unwrap());
generate_properties();
build_gecko::generate();
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Memory profiling functions.
use libc::{c_char,c_int,c_void,size_t};
use std::borrow::ToOwned;
use std::collections::{DList, HashMap};
use std::ffi::CString;
#[cfg(target_os = "linux")]
use std::iter::AdditiveIterator;
use std::old_io::timer::sleep;
#[cfg(target_os="linux")]
use std::old_io::File;
use std::mem::{size_of, transmute};
use std::ptr::null_mut;
use std::sync::Arc;
use std::sync::mpsc::{Sender, channel, Receiver};
use std::time::duration::Duration;
use task::spawn_named;
#[cfg(target_os="macos")]
use task_info::task_basic_info::{virtual_size,resident_size};
extern {
// Get the size of a heap block.
//
// Ideally Rust would expose a function like this in std::rt::heap, which would avoid the
// jemalloc dependence.
//
// The C prototype is `je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)`. On some
// platforms `JEMALLOC_USABLE_SIZE_CONST` is `const` and on some it is empty. But in practice
// this function doesn't modify the contents of the block that `ptr` points to, so we use
// `*const c_void` here.
fn je_malloc_usable_size(ptr: *const c_void) -> size_t;
}
// A wrapper for je_malloc_usable_size that handles `EMPTY` and returns `usize`.
pub fn heap_size_of(ptr: *const c_void) -> usize {
if ptr == ::std::rt::heap::EMPTY as *const c_void {
0
} else {
unsafe { je_malloc_usable_size(ptr) as usize }
}
}
// The simplest trait for measuring the size of heap data structures. More complex traits that
// return multiple measurements -- e.g. measure text separately from images -- are also possible,
// and should be used when appropriate.
//
// FIXME(njn): it would be nice to be able to derive this trait automatically, given that
// implementations are mostly repetitive and mechanical.
//
pub trait SizeOf {
/// Measure the size of any heap-allocated structures that hang off this value, but not the
/// space taken up by the value itself (i.e. what size_of::<T> measures, more or less); that
/// space is handled by the implementation of SizeOf for Box<T> below.
fn size_of_excluding_self(&self) -> usize;
}
// There are two possible ways to measure the size of `self` when it's on the heap: compute it
// (with `::std::rt::heap::usable_size(::std::mem::size_of::<T>(), 0)`) or measure it directly
// using the heap allocator (with `heap_size_of`). We do the latter, for the following reasons.
//
// * The heap allocator is the true authority for the sizes of heap blocks; its measurement is
// guaranteed to be correct. In comparison, size computations are error-prone. (For example, the
// `rt::heap::usable_size` function used in some of Rust's non-default allocator implementations
// underestimate the true usable size of heap blocks, which is safe in general but would cause
// under-measurement here.)
//
// * If we measure something that isn't a heap block, we'll get a crash. This keeps us honest,
// which is important because unsafe code is involved and this can be gotten wrong.
//
// However, in the best case, the two approaches should give the same results.
//
impl<T: SizeOf> SizeOf for Box<T> {
fn size_of_excluding_self(&self) -> usize {
// Measure size of `self`.
heap_size_of(&**self as *const T as *const c_void) + (**self).size_of_excluding_self()
}
}
impl SizeOf for String {
fn size_of_excluding_self(&self) -> usize {
heap_size_of(self.as_ptr() as *const c_void)
}
}
impl<T: SizeOf> SizeOf for Option<T> {
fn size_of_excluding_self(&self) -> usize {
match *self {
None => 0,
Some(ref x) => x.size_of_excluding_self()
}
}
}
impl<T: SizeOf> SizeOf for Arc<T> {
fn size_of_excluding_self(&self) -> usize {
(**self).size_of_excluding_self()
}
}
impl<T: SizeOf> SizeOf for Vec<T> {
fn size_of_excluding_self(&self) -> usize {
heap_size_of(self.as_ptr() as *const c_void) +
self.iter().fold(0, |n, elem| n + elem.size_of_excluding_self())
}
}
// FIXME(njn): We can't implement SizeOf accurately for DList because it requires access to the
// private Node type. Eventually we'll want to add SizeOf (or equivalent) to Rust itself. In the
// meantime, we use the dirty hack of transmuting DList into an identical type (DList2) and
// measuring that.
impl<T: SizeOf> SizeOf for DList<T> {
fn size_of_excluding_self(&self) -> usize {
let list2: &DList2<T> = unsafe { transmute(self) };
list2.size_of_excluding_self()
}
}
struct DList2<T> {
_length: usize,
list_head: Link<T>,
_list_tail: Rawlink<Node<T>>,
}
type Link<T> = Option<Box<Node<T>>>;
struct Rawlink<T> {
_p: *mut T,
}
struct Node<T> {
next: Link<T>,
_prev: Rawlink<Node<T>>,
value: T,
}
impl<T: SizeOf> SizeOf for Node<T> {
// Unlike most size_of_excluding_self() functions, this one does *not* measure descendents.
// Instead, DList2<T>::size_of_excluding_self() handles that, so that it can use iteration
// instead of recursion, which avoids potentially blowing the stack.
fn size_of_excluding_self(&self) -> usize {
self.value.size_of_excluding_self()
}
}
impl<T: SizeOf> SizeOf for DList2<T> {
fn size_of_excluding_self(&self) -> usize {
let mut size = 0;
let mut curr: &Link<T> = &self.list_head;
while curr.is_some() {
size += (*curr).size_of_excluding_self();
curr = &curr.as_ref().unwrap().next;
}
size
}
}
// This is a basic sanity check. If the representation of DList changes such that it becomes a
// different size to DList2, this will fail at compile-time.
#[allow(dead_code)]
unsafe fn dlist2_check() {
transmute::<DList<i32>, DList2<i32>>(panic!());
}
// Currently, types that implement the Drop type are larger than those that don't. Because DList
// implements Drop, DList2 must also so that dlist2_check() doesn't fail.
#[unsafe_destructor]
impl<T> Drop for DList2<T> {
fn drop(&mut self) {}
}
//---------------------------------------------------------------------------
#[derive(Clone)]
pub struct MemoryProfilerChan(pub Sender<MemoryProfilerMsg>);
impl MemoryProfilerChan {
pub fn send(&self, msg: MemoryProfilerMsg) {
let MemoryProfilerChan(ref c) = *self;
c.send(msg).unwrap();
}
}
pub struct MemoryReport {
/// The identifying name for this report.
pub name: String,
/// The size, in bytes.
pub size: u64,
}
/// A channel through which memory reports can be sent.
#[derive(Clone)]
pub struct MemoryReportsChan(pub Sender<Vec<MemoryReport>>);
impl MemoryReportsChan {
pub fn send(&self, report: Vec<MemoryReport>) {
let MemoryReportsChan(ref c) = *self;
c.send(report).unwrap();
}
}
/// A memory reporter is capable of measuring some data structure of interest. Because it needs
/// to be passed to and registered with the MemoryProfiler, it's typically a "small" (i.e. easily
/// cloneable) value that provides access to a "large" data structure, e.g. a channel that can
/// inject a request for measurements into the event queue associated with the "large" data
/// structure.
pub trait MemoryReporter {
/// Collect one or more memory reports. Returns true on success, and false on failure.
fn collect_reports(&self, reports_chan: MemoryReportsChan) -> bool;
}
/// Messages that can be sent to the memory profiler thread.
pub enum MemoryProfilerMsg {
/// Register a MemoryReporter with the memory profiler. The String is only used to identify the
/// reporter so it can be unregistered later. The String must be distinct from that used by any
/// other registered reporter otherwise a panic will occur.
RegisterMemoryReporter(String, Box<MemoryReporter + Send>),
/// Unregister a MemoryReporter with the memory profiler. The String must match the name given
/// when the reporter was registered. If the String does not match the name of a registered
/// reporter a panic will occur.
UnregisterMemoryReporter(String),
/// Triggers printing of the memory profiling metrics.
Print,
/// Tells the memory profiler to shut down.
Exit,
}
pub struct MemoryProfiler {
/// The port through which messages are received.
pub port: Receiver<MemoryProfilerMsg>,
/// Registered memory reporters.
reporters: HashMap<String, Box<MemoryReporter + Send>>,
}
impl MemoryProfiler {
pub fn create(period: Option<f64>) -> MemoryProfilerChan {
let (chan, port) = channel();
// Create the timer thread if a period was provided.
if let Some(period) = period {
let period_ms = Duration::milliseconds((period * 1000f64) as i64);
let chan = chan.clone();
spawn_named("Memory profiler timer".to_owned(), move || {
loop {
sleep(period_ms);
if chan.send(MemoryProfilerMsg::Print).is_err() {
break;
}
}
});
}
// Always spawn the memory profiler. If there is no timer thread it won't receive regular
// `Print` events, but it will still receive the other events.
spawn_named("Memory profiler".to_owned(), move || {
let mut memory_profiler = MemoryProfiler::new(port);
memory_profiler.start();
});
MemoryProfilerChan(chan)
}
pub fn new(port: Receiver<MemoryProfilerMsg>) -> MemoryProfiler {
MemoryProfiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
loop {
match self.port.recv() {
Ok(msg) => {
if !self.handle_msg(msg) {
break
}
}
_ => break
}
}
}
fn handle_msg(&mut self, msg: MemoryProfilerMsg) -> bool {
match msg {
MemoryProfilerMsg::RegisterMemoryReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) =>
panic!(format!("RegisterMemoryReporter: '{}' name is already in use",
name_clone)),
}
},
MemoryProfilerMsg::UnregisterMemoryReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterMemoryReporter: '{}' name is unknown", &name)),
}
},
MemoryProfilerMsg::Print => {
self.handle_print_msg();
true
},
MemoryProfilerMsg::Exit => false
}
}
fn print_measurement(path: &str, nbytes: Option<u64>) {
match nbytes {
Some(nbytes) => {
let mebi = 1024f64 * 1024f64;
println!("{:12.2}: {}", (nbytes as f64) / mebi, path);
}
None => {
println!("{:>12}: {}", "???", path);
}
}
}
fn handle_print_msg(&self) {
println!("{:12}: {}", "_size (MiB)_", "_category_");
// Collect global measurements from the OS and heap allocators.
// Virtual and physical memory usage, as reported by the OS.
MemoryProfiler::print_measurement("vsize", get_vsize());
MemoryProfiler::print_measurement("resident", get_resident());
for seg in get_resident_segments().iter() {
MemoryProfiler::print_measurement(seg.0.as_slice(), Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
MemoryProfiler::print_measurement("system-heap-allocated",
get_system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
MemoryProfiler::print_measurement("jemalloc-heap-allocated",
get_jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
MemoryProfiler::print_measurement("jemalloc-heap-active",
get_jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
MemoryProfiler::print_measurement("jemalloc-heap-mapped",
get_jemalloc_stat("stats.mapped"));
// Collect reports from memory reporters.
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
for reporter in self.reporters.values() {
let (chan, port) = channel();
if reporter.collect_reports(MemoryReportsChan(chan)) {
if let Ok(reports) = port.recv() {
for report in reports {
MemoryProfiler::print_measurement(report.name.as_slice(),
Some(report.size));
}
}
}
}
println!("");
}
}
#[cfg(target_os="linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os="linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os="linux")]
fn get_system_heap_allocated() -> Option<u64> {
let mut info: struct_mallinfo;
unsafe {
info = mallinfo();
}
// The documentation in the glibc man page makes it sound like |uordblks|
// would suffice, but that only gets the small allocations that are put in
// the brk heap. We need |hblkhd| as well to get the larger allocations
// that are mmapped.
Some((info.hblkhd + info.uordblks) as u64)
}
#[cfg(not(target_os="linux"))]
fn get_system_heap_allocated() -> Option<u64> {
None
}
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn get_jemalloc_stat(value_name: &str) -> Option<u64> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::from_slice(epoch_name.as_bytes());
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::from_slice(value_name.as_bytes());
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv != 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len,
null_mut(), 0)
};
if rv != 0 {
return None;
}
Some(value as u64)
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try(
($e:expr) => (match $e { Some(e) => e, None => return None })
);
#[cfg(target_os="linux")]
fn get_proc_self_statm_field(field: usize) -> Option<u64> {
let mut f = File::open(&Path::new("/proc/self/statm"));
match f.read_to_string() {
Ok(contents) => {
let s = option_try!(contents.as_slice().words().nth(field));
let npages = option_try!(s.parse::<u64>().ok());
Some(npages * (::std::env::page_size() as u64))
}
Err(_) => None
}
}
#[cfg(target_os="linux")]
fn get_vsize() -> Option<u64> {
get_proc_self_statm_field(0)
}
#[cfg(target_os="linux")]
fn get_resident() -> Option<u64> {
get_proc_self_statm_field(1)
}
#[cfg(target_os="macos")]
fn get_vsize() -> Option<u64> {
virtual_size()
}
#[cfg(target_os="macos")]
fn get_resident() -> Option<u64> {
resident_size()
}
#[cfg(not(any(target_os="linux", target_os = "macos")))]
fn get_vsize() -> Option<u64> {
None
}
#[cfg(not(any(target_os="linux", target_os = "macos")))]
fn get_resident() -> Option<u64> {
None
}
#[cfg(target_os="linux")]
fn get_resident_segments() -> Vec<(String, u64)> {
use regex::Regex;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
// The first line of an entry in /proc/<pid>/smaps looks just like an entry
// in /proc/<pid>/maps:
//
// address perms offset dev inode pathname
// 02366000-025d8000 rw-p 00000000 00:00 0 [heap]
//
// Each of the following lines contains a key and a value, separated
// by ": ", where the key does not contain either of those characters.
// For example:
//
// Rss: 132 kB
let path = Path::new("/proc/self/smaps");
let mut f = ::std::old_io::BufferedReader::new(File::open(&path));
let seg_re = Regex::new(
r"^[:xdigit:]+-[:xdigit:]+ (....) [:xdigit:]+ [:xdigit:]+:[:xdigit:]+ \d+ +(.*)").unwrap();
let rss_re = Regex::new(r"^Rss: +(\d+) kB").unwrap();
// We record each segment's resident size.
let mut seg_map: HashMap<String, u64> = HashMap::new();
#[derive(PartialEq)]
enum LookingFor { Segment, Rss }
let mut looking_for = LookingFor::Segment;
let mut curr_seg_name = String::new();
// Parse the file.
for line in f.lines() {
let line = match line {
Ok(line) => line,
Err(_) => continue,
};
if looking_for == LookingFor::Segment {
// Look for a segment info line.
let cap = match seg_re.captures(line.as_slice()) {
Some(cap) => cap,
None => continue,
};
let perms = cap.at(1).unwrap();
let pathname = cap.at(2).unwrap();
// Construct the segment name from its pathname and permissions.
curr_seg_name.clear();
curr_seg_name.push_str("- ");
if pathname == "" || pathname.starts_with("[stack:") {
// Anonymous memory. Entries marked with "[stack:nnn]"
// look like thread stacks but they may include other
// anonymous mappings, so we can't trust them and just
// treat them as entirely anonymous.
curr_seg_name.push_str("anonymous");
} else {
curr_seg_name.push_str(pathname);
}
curr_seg_name.push_str(" (");
curr_seg_name.push_str(perms);
curr_seg_name.push_str(")");
looking_for = LookingFor::Rss;
} else {
// Look for an "Rss:" line.
let cap = match rss_re.captures(line.as_slice()) {
Some(cap) => cap,
None => continue,
};
let rss = cap.at(1).unwrap().parse::<u64>().unwrap() * 1024;
if rss > 0 {
// Aggregate small segments into "- other".
let seg_name = if rss < 512 * 1024 {
"- other".to_owned()
} else {
curr_seg_name.clone()
};
match seg_map.entry(seg_name) {
Entry::Vacant(entry) => { entry.insert(rss); },
Entry::Occupied(mut entry) => *entry.get_mut() += rss,
}
}
looking_for = LookingFor::Segment;
}
}
let mut segs: Vec<(String, u64)> = seg_map.into_iter().collect();
// Get the total and add it to the vector. Note that this total differs
// from the "resident" measurement obtained via /proc/<pid>/statm in
// get_resident(). It's unclear why this difference occurs; for some
// processes the measurements match, but for Servo they do not.
let total = segs.iter().map(|&(_, size)| size).sum();
segs.push(("resident-according-to-smaps".to_owned(), total));
// Sort by size; the total will be first.
segs.sort_by(|&(_, rss1), &(_, rss2)| rss2.cmp(&rss1));
segs
}
#[cfg(not(target_os="linux"))]
fn get_resident_segments() -> Vec<(String, u64)> {
vec![]
}
Put system memory measurements in a memory reporter.
Currently the system memory measurements ("resident", "vsize", etc.) are
not reported through the generic memory reporting mechanism, simply
because they pre-date that mechanism. This changeset removes that
special-casing.
One consequence of this is that previously if a platform didn't
implement one of the basic measurements, a '???' entry would be printed.
Now nothing will be printed. This is no great loss and matches what
Firefox does.
Another consequence is that the order in which the measurements are
printed is changed. I plan to fix this soon so that reports are sorted
in a more sensible fashion.
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Memory profiling functions.
use libc::{c_char,c_int,c_void,size_t};
use std::borrow::ToOwned;
use std::collections::{DList, HashMap};
use std::ffi::CString;
#[cfg(target_os = "linux")]
use std::iter::AdditiveIterator;
use std::old_io::timer::sleep;
#[cfg(target_os="linux")]
use std::old_io::File;
use std::mem::{size_of, transmute};
use std::ptr::null_mut;
use std::sync::Arc;
use std::sync::mpsc::{Sender, channel, Receiver};
use std::time::duration::Duration;
use task::spawn_named;
#[cfg(target_os="macos")]
use task_info::task_basic_info::{virtual_size,resident_size};
extern {
// Get the size of a heap block.
//
// Ideally Rust would expose a function like this in std::rt::heap, which would avoid the
// jemalloc dependence.
//
// The C prototype is `je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)`. On some
// platforms `JEMALLOC_USABLE_SIZE_CONST` is `const` and on some it is empty. But in practice
// this function doesn't modify the contents of the block that `ptr` points to, so we use
// `*const c_void` here.
fn je_malloc_usable_size(ptr: *const c_void) -> size_t;
}
// A wrapper for je_malloc_usable_size that handles `EMPTY` and returns `usize`.
pub fn heap_size_of(ptr: *const c_void) -> usize {
if ptr == ::std::rt::heap::EMPTY as *const c_void {
0
} else {
unsafe { je_malloc_usable_size(ptr) as usize }
}
}
// The simplest trait for measuring the size of heap data structures. More complex traits that
// return multiple measurements -- e.g. measure text separately from images -- are also possible,
// and should be used when appropriate.
//
// FIXME(njn): it would be nice to be able to derive this trait automatically, given that
// implementations are mostly repetitive and mechanical.
//
pub trait SizeOf {
/// Measure the size of any heap-allocated structures that hang off this value, but not the
/// space taken up by the value itself (i.e. what size_of::<T> measures, more or less); that
/// space is handled by the implementation of SizeOf for Box<T> below.
fn size_of_excluding_self(&self) -> usize;
}
// There are two possible ways to measure the size of `self` when it's on the heap: compute it
// (with `::std::rt::heap::usable_size(::std::mem::size_of::<T>(), 0)`) or measure it directly
// using the heap allocator (with `heap_size_of`). We do the latter, for the following reasons.
//
// * The heap allocator is the true authority for the sizes of heap blocks; its measurement is
// guaranteed to be correct. In comparison, size computations are error-prone. (For example, the
// `rt::heap::usable_size` function used in some of Rust's non-default allocator implementations
// underestimate the true usable size of heap blocks, which is safe in general but would cause
// under-measurement here.)
//
// * If we measure something that isn't a heap block, we'll get a crash. This keeps us honest,
// which is important because unsafe code is involved and this can be gotten wrong.
//
// However, in the best case, the two approaches should give the same results.
//
impl<T: SizeOf> SizeOf for Box<T> {
fn size_of_excluding_self(&self) -> usize {
// Measure size of `self`.
heap_size_of(&**self as *const T as *const c_void) + (**self).size_of_excluding_self()
}
}
impl SizeOf for String {
fn size_of_excluding_self(&self) -> usize {
heap_size_of(self.as_ptr() as *const c_void)
}
}
impl<T: SizeOf> SizeOf for Option<T> {
fn size_of_excluding_self(&self) -> usize {
match *self {
None => 0,
Some(ref x) => x.size_of_excluding_self()
}
}
}
impl<T: SizeOf> SizeOf for Arc<T> {
fn size_of_excluding_self(&self) -> usize {
(**self).size_of_excluding_self()
}
}
impl<T: SizeOf> SizeOf for Vec<T> {
fn size_of_excluding_self(&self) -> usize {
heap_size_of(self.as_ptr() as *const c_void) +
self.iter().fold(0, |n, elem| n + elem.size_of_excluding_self())
}
}
// FIXME(njn): We can't implement SizeOf accurately for DList because it requires access to the
// private Node type. Eventually we'll want to add SizeOf (or equivalent) to Rust itself. In the
// meantime, we use the dirty hack of transmuting DList into an identical type (DList2) and
// measuring that.
impl<T: SizeOf> SizeOf for DList<T> {
fn size_of_excluding_self(&self) -> usize {
let list2: &DList2<T> = unsafe { transmute(self) };
list2.size_of_excluding_self()
}
}
struct DList2<T> {
_length: usize,
list_head: Link<T>,
_list_tail: Rawlink<Node<T>>,
}
type Link<T> = Option<Box<Node<T>>>;
struct Rawlink<T> {
_p: *mut T,
}
struct Node<T> {
next: Link<T>,
_prev: Rawlink<Node<T>>,
value: T,
}
impl<T: SizeOf> SizeOf for Node<T> {
// Unlike most size_of_excluding_self() functions, this one does *not* measure descendents.
// Instead, DList2<T>::size_of_excluding_self() handles that, so that it can use iteration
// instead of recursion, which avoids potentially blowing the stack.
fn size_of_excluding_self(&self) -> usize {
self.value.size_of_excluding_self()
}
}
impl<T: SizeOf> SizeOf for DList2<T> {
fn size_of_excluding_self(&self) -> usize {
let mut size = 0;
let mut curr: &Link<T> = &self.list_head;
while curr.is_some() {
size += (*curr).size_of_excluding_self();
curr = &curr.as_ref().unwrap().next;
}
size
}
}
// This is a basic sanity check. If the representation of DList changes such that it becomes a
// different size to DList2, this will fail at compile-time.
#[allow(dead_code)]
unsafe fn dlist2_check() {
transmute::<DList<i32>, DList2<i32>>(panic!());
}
// Currently, types that implement the Drop type are larger than those that don't. Because DList
// implements Drop, DList2 must also so that dlist2_check() doesn't fail.
#[unsafe_destructor]
impl<T> Drop for DList2<T> {
fn drop(&mut self) {}
}
//---------------------------------------------------------------------------
#[derive(Clone)]
pub struct MemoryProfilerChan(pub Sender<MemoryProfilerMsg>);
impl MemoryProfilerChan {
pub fn send(&self, msg: MemoryProfilerMsg) {
let MemoryProfilerChan(ref c) = *self;
c.send(msg).unwrap();
}
}
pub struct MemoryReport {
/// The identifying name for this report.
pub name: String,
/// The size, in bytes.
pub size: u64,
}
/// A channel through which memory reports can be sent.
#[derive(Clone)]
pub struct MemoryReportsChan(pub Sender<Vec<MemoryReport>>);
impl MemoryReportsChan {
pub fn send(&self, report: Vec<MemoryReport>) {
let MemoryReportsChan(ref c) = *self;
c.send(report).unwrap();
}
}
/// A memory reporter is capable of measuring some data structure of interest. Because it needs
/// to be passed to and registered with the MemoryProfiler, it's typically a "small" (i.e. easily
/// cloneable) value that provides access to a "large" data structure, e.g. a channel that can
/// inject a request for measurements into the event queue associated with the "large" data
/// structure.
pub trait MemoryReporter {
/// Collect one or more memory reports. Returns true on success, and false on failure.
fn collect_reports(&self, reports_chan: MemoryReportsChan) -> bool;
}
/// Messages that can be sent to the memory profiler thread.
pub enum MemoryProfilerMsg {
/// Register a MemoryReporter with the memory profiler. The String is only used to identify the
/// reporter so it can be unregistered later. The String must be distinct from that used by any
/// other registered reporter otherwise a panic will occur.
RegisterMemoryReporter(String, Box<MemoryReporter + Send>),
/// Unregister a MemoryReporter with the memory profiler. The String must match the name given
/// when the reporter was registered. If the String does not match the name of a registered
/// reporter a panic will occur.
UnregisterMemoryReporter(String),
/// Triggers printing of the memory profiling metrics.
Print,
/// Tells the memory profiler to shut down.
Exit,
}
pub struct MemoryProfiler {
/// The port through which messages are received.
pub port: Receiver<MemoryProfilerMsg>,
/// Registered memory reporters.
reporters: HashMap<String, Box<MemoryReporter + Send>>,
}
impl MemoryProfiler {
pub fn create(period: Option<f64>) -> MemoryProfilerChan {
let (chan, port) = channel();
// Create the timer thread if a period was provided.
if let Some(period) = period {
let period_ms = Duration::milliseconds((period * 1000f64) as i64);
let chan = chan.clone();
spawn_named("Memory profiler timer".to_owned(), move || {
loop {
sleep(period_ms);
if chan.send(MemoryProfilerMsg::Print).is_err() {
break;
}
}
});
}
// Always spawn the memory profiler. If there is no timer thread it won't receive regular
// `Print` events, but it will still receive the other events.
spawn_named("Memory profiler".to_owned(), move || {
let mut memory_profiler = MemoryProfiler::new(port);
memory_profiler.start();
});
let memory_profiler_chan = MemoryProfilerChan(chan);
// Register the system memory reporter, which will run on the memory profiler's own thread.
// It never needs to be unregistered, because as long as the memory profiler is running the
// system memory reporter can make measurements.
let system_reporter = Box::new(SystemMemoryReporter);
memory_profiler_chan.send(MemoryProfilerMsg::RegisterMemoryReporter("system".to_owned(),
system_reporter));
memory_profiler_chan
}
pub fn new(port: Receiver<MemoryProfilerMsg>) -> MemoryProfiler {
MemoryProfiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
loop {
match self.port.recv() {
Ok(msg) => {
if !self.handle_msg(msg) {
break
}
}
_ => break
}
}
}
fn handle_msg(&mut self, msg: MemoryProfilerMsg) -> bool {
match msg {
MemoryProfilerMsg::RegisterMemoryReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) =>
panic!(format!("RegisterMemoryReporter: '{}' name is already in use",
name_clone)),
}
},
MemoryProfilerMsg::UnregisterMemoryReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterMemoryReporter: '{}' name is unknown", &name)),
}
},
MemoryProfilerMsg::Print => {
self.handle_print_msg();
true
},
MemoryProfilerMsg::Exit => false
}
}
fn handle_print_msg(&self) {
println!("{:12}: {}", "_size (MiB)_", "_category_");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
for reporter in self.reporters.values() {
let (chan, port) = channel();
if reporter.collect_reports(MemoryReportsChan(chan)) {
if let Ok(reports) = port.recv() {
for report in reports {
let mebi = 1024f64 * 1024f64;
println!("{:12.2}: {}", (report.size as f64) / mebi, report.name);
}
}
}
}
println!("");
}
}
/// Collects global measurements from the OS and heap allocators.
struct SystemMemoryReporter;
impl MemoryReporter for SystemMemoryReporter {
fn collect_reports(&self, reports_chan: MemoryReportsChan) -> bool {
let mut reports = vec![];
{
let mut report = |name: &str, size| {
if let Some(size) = size {
reports.push(MemoryReport { name: name.to_owned(), size: size });
}
};
// Virtual and physical memory usage, as reported by the OS.
report("vsize", get_vsize());
report("resident", get_resident());
// Memory segments, as reported by the OS.
for seg in get_resident_segments().iter() {
report(seg.0.as_slice(), Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report("system-heap-allocated", get_system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report("jemalloc-heap-allocated", get_jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report("jemalloc-heap-active", get_jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report("jemalloc-heap-mapped", get_jemalloc_stat("stats.mapped"));
}
reports_chan.send(reports);
true
}
}
#[cfg(target_os="linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os="linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os="linux")]
fn get_system_heap_allocated() -> Option<u64> {
let mut info: struct_mallinfo;
unsafe {
info = mallinfo();
}
// The documentation in the glibc man page makes it sound like |uordblks|
// would suffice, but that only gets the small allocations that are put in
// the brk heap. We need |hblkhd| as well to get the larger allocations
// that are mmapped.
Some((info.hblkhd + info.uordblks) as u64)
}
#[cfg(not(target_os="linux"))]
fn get_system_heap_allocated() -> Option<u64> {
None
}
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn get_jemalloc_stat(value_name: &str) -> Option<u64> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::from_slice(epoch_name.as_bytes());
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::from_slice(value_name.as_bytes());
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv != 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len,
null_mut(), 0)
};
if rv != 0 {
return None;
}
Some(value as u64)
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try(
($e:expr) => (match $e { Some(e) => e, None => return None })
);
#[cfg(target_os="linux")]
fn get_proc_self_statm_field(field: usize) -> Option<u64> {
let mut f = File::open(&Path::new("/proc/self/statm"));
match f.read_to_string() {
Ok(contents) => {
let s = option_try!(contents.as_slice().words().nth(field));
let npages = option_try!(s.parse::<u64>().ok());
Some(npages * (::std::env::page_size() as u64))
}
Err(_) => None
}
}
#[cfg(target_os="linux")]
fn get_vsize() -> Option<u64> {
get_proc_self_statm_field(0)
}
#[cfg(target_os="linux")]
fn get_resident() -> Option<u64> {
get_proc_self_statm_field(1)
}
#[cfg(target_os="macos")]
fn get_vsize() -> Option<u64> {
virtual_size()
}
#[cfg(target_os="macos")]
fn get_resident() -> Option<u64> {
resident_size()
}
#[cfg(not(any(target_os="linux", target_os = "macos")))]
fn get_vsize() -> Option<u64> {
None
}
#[cfg(not(any(target_os="linux", target_os = "macos")))]
fn get_resident() -> Option<u64> {
None
}
#[cfg(target_os="linux")]
fn get_resident_segments() -> Vec<(String, u64)> {
use regex::Regex;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
// The first line of an entry in /proc/<pid>/smaps looks just like an entry
// in /proc/<pid>/maps:
//
// address perms offset dev inode pathname
// 02366000-025d8000 rw-p 00000000 00:00 0 [heap]
//
// Each of the following lines contains a key and a value, separated
// by ": ", where the key does not contain either of those characters.
// For example:
//
// Rss: 132 kB
let path = Path::new("/proc/self/smaps");
let mut f = ::std::old_io::BufferedReader::new(File::open(&path));
let seg_re = Regex::new(
r"^[:xdigit:]+-[:xdigit:]+ (....) [:xdigit:]+ [:xdigit:]+:[:xdigit:]+ \d+ +(.*)").unwrap();
let rss_re = Regex::new(r"^Rss: +(\d+) kB").unwrap();
// We record each segment's resident size.
let mut seg_map: HashMap<String, u64> = HashMap::new();
#[derive(PartialEq)]
enum LookingFor { Segment, Rss }
let mut looking_for = LookingFor::Segment;
let mut curr_seg_name = String::new();
// Parse the file.
for line in f.lines() {
let line = match line {
Ok(line) => line,
Err(_) => continue,
};
if looking_for == LookingFor::Segment {
// Look for a segment info line.
let cap = match seg_re.captures(line.as_slice()) {
Some(cap) => cap,
None => continue,
};
let perms = cap.at(1).unwrap();
let pathname = cap.at(2).unwrap();
// Construct the segment name from its pathname and permissions.
curr_seg_name.clear();
curr_seg_name.push_str("- ");
if pathname == "" || pathname.starts_with("[stack:") {
// Anonymous memory. Entries marked with "[stack:nnn]"
// look like thread stacks but they may include other
// anonymous mappings, so we can't trust them and just
// treat them as entirely anonymous.
curr_seg_name.push_str("anonymous");
} else {
curr_seg_name.push_str(pathname);
}
curr_seg_name.push_str(" (");
curr_seg_name.push_str(perms);
curr_seg_name.push_str(")");
looking_for = LookingFor::Rss;
} else {
// Look for an "Rss:" line.
let cap = match rss_re.captures(line.as_slice()) {
Some(cap) => cap,
None => continue,
};
let rss = cap.at(1).unwrap().parse::<u64>().unwrap() * 1024;
if rss > 0 {
// Aggregate small segments into "- other".
let seg_name = if rss < 512 * 1024 {
"- other".to_owned()
} else {
curr_seg_name.clone()
};
match seg_map.entry(seg_name) {
Entry::Vacant(entry) => { entry.insert(rss); },
Entry::Occupied(mut entry) => *entry.get_mut() += rss,
}
}
looking_for = LookingFor::Segment;
}
}
let mut segs: Vec<(String, u64)> = seg_map.into_iter().collect();
// Get the total and add it to the vector. Note that this total differs
// from the "resident" measurement obtained via /proc/<pid>/statm in
// get_resident(). It's unclear why this difference occurs; for some
// processes the measurements match, but for Servo they do not.
let total = segs.iter().map(|&(_, size)| size).sum();
segs.push(("resident-according-to-smaps".to_owned(), total));
// Sort by size; the total will be first.
segs.sort_by(|&(_, rss1), &(_, rss2)| rss2.cmp(&rss1));
segs
}
#[cfg(not(target_os="linux"))]
fn get_resident_segments() -> Vec<(String, u64)> {
vec![]
}
|
#![allow(
dead_code,
mutable_transmutes,
non_camel_case_types,
non_snake_case,
non_upper_case_globals,
unused_assignments
)]
use std::f64;
use std::ffi::CString;
#[cfg(target_arch = "x86")]
pub use std::arch::x86::{
__m128, _mm_add_ps, _mm_loadu_ps, _mm_movehl_ps, _mm_movelh_ps, _mm_mul_ps, _mm_set1_ps,
_mm_setr_ps, _mm_setzero_ps, _mm_storeu_ps, _mm_unpackhi_ps, _mm_unpacklo_ps,
};
#[cfg(target_arch = "x86_64")]
pub use std::{arch::x86_64::{
__m128, _mm_add_ps, _mm_loadu_ps, _mm_movehl_ps, _mm_movelh_ps, _mm_mul_ps, _mm_set1_ps,
_mm_setr_ps, _mm_setzero_ps, _mm_storeu_ps, _mm_unpackhi_ps, _mm_unpacklo_ps,
}};
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_decoder_frame_info {
pub w: i32,
pub h: i32,
pub format: flow_pixel_format,
}
extern "C" {
#[no_mangle]
fn flow_pixel_format_bytes_per_pixel(format: flow_pixel_format) -> u32;
#[no_mangle]
fn flow_effective_pixel_format(b: *mut flow_bitmap_bgra) -> flow_pixel_format;
#[no_mangle]
fn flow_pixel_format_channels(format: flow_pixel_format) -> u32;
#[no_mangle]
fn flow_snprintf(
s: *mut libc::c_char,
n: usize,
fmt: *const libc::c_char,
_: ...
) -> i32;
#[no_mangle]
fn flow_set_owner(c: *mut flow_c, thing: *mut libc::c_void, owner: *mut libc::c_void) -> bool;
#[no_mangle]
fn flow_context_calloc(
c: *mut flow_c,
instance_count: usize,
instance_size: usize,
destructor: flow_destructor_function,
owner: *mut libc::c_void,
file: *const libc::c_char,
line: i32,
) -> *mut libc::c_void;
#[no_mangle]
fn flow_context_malloc(
c: *mut flow_c,
byte_count: usize,
destructor: flow_destructor_function,
owner: *mut libc::c_void,
file: *const libc::c_char,
line: i32,
) -> *mut libc::c_void;
#[no_mangle]
fn flow_deprecated_free(
c: *mut flow_c,
pointer: *mut libc::c_void,
file: *const libc::c_char,
line: i32,
);
#[no_mangle]
fn flow_destroy(
c: *mut flow_c,
pointer: *mut libc::c_void,
file: *const libc::c_char,
line: i32,
) -> bool;
#[no_mangle]
fn flow_context_set_error_get_message_buffer(
c: *mut flow_c,
code: flow_status_code,
file: *const libc::c_char,
line: i32,
function_name: *const libc::c_char,
) -> *mut libc::c_char;
#[no_mangle]
fn flow_context_add_to_callstack(
c: *mut flow_c,
file: *const libc::c_char,
line: i32,
function_name: *const libc::c_char,
) -> bool;
#[no_mangle]
fn flow_context_profiler_start(
c: *mut flow_c,
name: *const libc::c_char,
allow_recursion: bool,
);
#[no_mangle]
fn flow_context_profiler_stop(
c: *mut flow_c,
name: *const libc::c_char,
assert_started: bool,
stop_children: bool,
);
#[no_mangle]
fn pow(_: f64, _: f64) -> f64;
#[no_mangle]
fn flow_bitmap_bgra_create_header(
c: *mut flow_c,
sx: i32,
sy: i32,
) -> *mut flow_bitmap_bgra;
#[no_mangle]
fn memcpy(_: *mut libc::c_void, _: *const libc::c_void, _: u64) -> *mut libc::c_void;
#[no_mangle]
fn fabs(_: f64) -> f64;
#[no_mangle]
fn j1(_: f64) -> f64;
#[no_mangle]
fn fmin(_: f64, _: f64) -> f64;
#[no_mangle]
fn ceil(_: f64) -> f64;
#[no_mangle]
fn floor(_: f64) -> f64;
#[no_mangle]
fn fmax(_: f64, _: f64) -> f64;
#[no_mangle]
fn sqrt(_: f64) -> f64;
#[no_mangle]
fn exp(_: f64) -> f64;
#[no_mangle]
fn memset(_: *mut libc::c_void, _: i32, _: u64) -> *mut libc::c_void;
#[no_mangle]
fn flow_bitmap_float_create(
c: *mut flow_c,
sx: i32,
sy: i32,
channels: i32,
zeroed: bool,
) -> *mut flow_bitmap_float;
#[no_mangle]
fn flow_bitmap_float_create_header(
c: *mut flow_c,
sx: i32,
sy: i32,
channels: i32,
) -> *mut flow_bitmap_float;
}
pub type cmsFloat64Number = f64;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct cmsCIExyY {
pub x: cmsFloat64Number,
pub y: cmsFloat64Number,
pub Y: cmsFloat64Number,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct cmsCIExyYTRIPLE {
pub Red: cmsCIExyY,
pub Green: cmsCIExyY,
pub Blue: cmsCIExyY,
}
pub type FLOW_DIRECTION = u32;
pub const FLOW_INPUT: FLOW_DIRECTION = 4;
pub const FLOW_OUTPUT: FLOW_DIRECTION = 8;
#[repr(u32)]
#[derive(Copy, Clone)]
pub enum flow_status_code {
No_Error = 0,
Out_of_memory = 10,
IO_error = 20,
Invalid_internal_state = 30,
Panic = 31,
Not_implemented = 40,
Invalid_argument = 50,
Null_argument = 51,
Invalid_dimensions = 52,
Unsupported_pixel_format = 53,
Item_does_not_exist = 54,
Image_decoding_failed = 60,
Image_encoding_failed = 61,
ErrorReportingInconsistency = 90,
First_rust_error = 200,
Other_error = 1024,
// ___Last_library_error,
First_user_defined_error = 1025,
Last_user_defined_error = 2147483647
}
pub type flow_interpolation_filter = u32;
pub const flow_interpolation_filter_NCubicSharp: flow_interpolation_filter = 30;
pub const flow_interpolation_filter_NCubic: flow_interpolation_filter = 29;
pub const flow_interpolation_filter_MitchellFast: flow_interpolation_filter = 28;
pub const flow_interpolation_filter_Fastest: flow_interpolation_filter = 27;
pub const flow_interpolation_filter_CatmullRomFastSharp: flow_interpolation_filter = 26;
pub const flow_interpolation_filter_CatmullRomFast: flow_interpolation_filter = 25;
pub const flow_interpolation_filter_Box: flow_interpolation_filter = 24;
pub const flow_interpolation_filter_Linear: flow_interpolation_filter = 23;
pub const flow_interpolation_filter_Triangle: flow_interpolation_filter = 22;
pub const flow_interpolation_filter_RawLanczos2Sharp: flow_interpolation_filter = 21;
pub const flow_interpolation_filter_RawLanczos2: flow_interpolation_filter = 20;
pub const flow_interpolation_filter_RawLanczos3Sharp: flow_interpolation_filter = 19;
pub const flow_interpolation_filter_RawLanczos3: flow_interpolation_filter = 18;
pub const flow_interpolation_filter_Jinc: flow_interpolation_filter = 17;
pub const flow_interpolation_filter_Hermite: flow_interpolation_filter = 16;
pub const flow_interpolation_filter_CubicBSpline: flow_interpolation_filter = 15;
pub const flow_interpolation_filter_Mitchell: flow_interpolation_filter = 14;
pub const flow_interpolation_filter_CatmullRom: flow_interpolation_filter = 13;
pub const flow_interpolation_filter_CubicSharp: flow_interpolation_filter = 12;
pub const flow_interpolation_filter_Cubic: flow_interpolation_filter = 11;
pub const flow_interpolation_filter_CubicFast: flow_interpolation_filter = 10;
pub const flow_interpolation_filter_Lanczos2Sharp: flow_interpolation_filter = 9;
pub const flow_interpolation_filter_Lanczos2: flow_interpolation_filter = 8;
pub const flow_interpolation_filter_LanczosSharp: flow_interpolation_filter = 7;
pub const flow_interpolation_filter_Lanczos: flow_interpolation_filter = 6;
pub const flow_interpolation_filter_GinsengSharp: flow_interpolation_filter = 5;
pub const flow_interpolation_filter_Ginseng: flow_interpolation_filter = 4;
pub const flow_interpolation_filter_RobidouxSharp: flow_interpolation_filter = 3;
pub const flow_interpolation_filter_Robidoux: flow_interpolation_filter = 2;
pub const flow_interpolation_filter_RobidouxFast: flow_interpolation_filter = 1;
pub type flow_pixel_format = u32;
pub const flow_gray8: flow_pixel_format = 1;
pub const flow_bgr32: flow_pixel_format = 70;
pub const flow_bgra32: flow_pixel_format = 4;
pub const flow_bgr24: flow_pixel_format = 3;
pub type flow_bitmap_compositing_mode = u32;
pub const flow_bitmap_compositing_blend_with_matte: flow_bitmap_compositing_mode = 2;
pub const flow_bitmap_compositing_blend_with_self: flow_bitmap_compositing_mode = 1;
pub const flow_bitmap_compositing_replace_self: flow_bitmap_compositing_mode = 0;
pub type flow_working_floatspace = u32;
pub const flow_working_floatspace_gamma: flow_working_floatspace = 2;
pub const flow_working_floatspace_linear: flow_working_floatspace = 1;
pub const flow_working_floatspace_as_is: flow_working_floatspace = 0;
pub const flow_working_floatspace_srgb: flow_working_floatspace = 0;
pub type flow_io_mode = u32;
pub const flow_io_mode_read_write_seekable: flow_io_mode = 15;
pub const flow_io_mode_write_seekable: flow_io_mode = 6;
pub const flow_io_mode_read_seekable: flow_io_mode = 5;
pub const flow_io_mode_write_sequential: flow_io_mode = 2;
pub const flow_io_mode_read_sequential: flow_io_mode = 1;
pub const flow_io_mode_null: flow_io_mode = 0;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_context {
pub codec_set: *mut flow_context_codec_set,
pub underlying_heap: flow_heap,
pub object_tracking: flow_objtracking_info,
pub log: flow_profiling_log,
pub error: flow_error_info,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_error_info {
pub reason: flow_status_code,
pub callstack: [flow_error_callstack_line; 8],
pub callstack_count: i32,
pub callstack_capacity: i32,
pub locked: bool,
pub status_included_in_message: bool,
pub message: [libc::c_char; 1024],
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_error_callstack_line {
pub file: *const libc::c_char,
pub line: i32,
pub function_name: *const libc::c_char,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_profiling_log {
pub log: *mut flow_profiling_entry,
pub count: u32,
pub capacity: u32,
pub ticks_per_second: i64,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_profiling_entry {
pub time: i64,
pub name: *const libc::c_char,
pub flags: flow_profiling_entry_flags,
}
pub type flow_profiling_entry_flags = u32;
pub const flow_profiling_entry_stop_children: flow_profiling_entry_flags = 56;
pub const flow_profiling_entry_stop_assert_started: flow_profiling_entry_flags = 24;
pub const flow_profiling_entry_stop: flow_profiling_entry_flags = 8;
pub const flow_profiling_entry_start_allow_recursion: flow_profiling_entry_flags = 6;
pub const flow_profiling_entry_start: flow_profiling_entry_flags = 2;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_objtracking_info {
pub allocs: *mut flow_heap_object_record,
pub next_free_slot: usize,
pub total_slots: usize,
pub bytes_allocated_net: usize,
pub bytes_allocated_gross: usize,
pub allocations_net: usize,
pub allocations_gross: usize,
pub bytes_freed: usize,
pub allocations_net_peak: usize,
pub bytes_allocated_net_peak: usize,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_heap_object_record {
pub ptr: *mut libc::c_void,
pub bytes: usize,
pub owner: *mut libc::c_void,
pub destructor: flow_destructor_function,
pub destructor_called: bool,
pub allocated_by: *const libc::c_char,
pub allocated_by_line: i32,
pub is_owner: bool,
}
pub type flow_destructor_function =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut libc::c_void) -> bool>;
pub type flow_c = flow_context;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_heap {
pub _calloc: flow_heap_calloc_function,
pub _malloc: flow_heap_malloc_function,
pub _realloc: flow_heap_realloc_function,
pub _free: flow_heap_free_function,
pub _context_terminate: flow_heap_terminate_function,
pub _private_state: *mut libc::c_void,
}
pub type flow_heap_terminate_function =
Option<unsafe extern "C" fn(_: *mut flow_context, _: *mut flow_heap) -> ()>;
pub type flow_heap_free_function = Option<
unsafe extern "C" fn(
_: *mut flow_context,
_: *mut flow_heap,
_: *mut libc::c_void,
_: *const libc::c_char,
_: i32,
) -> (),
>;
pub type flow_heap_realloc_function = Option<
unsafe extern "C" fn(
_: *mut flow_context,
_: *mut flow_heap,
_: *mut libc::c_void,
_: usize,
_: *const libc::c_char,
_: i32,
) -> *mut libc::c_void,
>;
pub type flow_heap_malloc_function = Option<
unsafe extern "C" fn(
_: *mut flow_context,
_: *mut flow_heap,
_: usize,
_: *const libc::c_char,
_: i32,
) -> *mut libc::c_void,
>;
pub type flow_heap_calloc_function = Option<
unsafe extern "C" fn(
_: *mut flow_context,
_: *mut flow_heap,
_: usize,
_: usize,
_: *const libc::c_char,
_: i32,
) -> *mut libc::c_void,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_context_codec_set {
pub codecs: *mut flow_codec_definition,
pub codecs_count: usize,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_codec_definition {
pub codec_id: i64,
pub initialize: codec_initialize,
pub get_info: codec_get_info_fn,
pub get_frame_info: codec_get_frame_info_fn,
pub set_downscale_hints: codec_set_downscale_hints_fn,
pub switch_frame: codec_switch_frame_fn,
pub read_frame: codec_read_frame_fn,
pub write_frame: codec_write_frame_fn,
pub stringify: codec_stringify_fn,
pub name: *const libc::c_char,
pub preferred_mime_type: *const libc::c_char,
pub preferred_extension: *const libc::c_char,
}
pub type codec_stringify_fn = Option<
unsafe extern "C" fn(
_: *mut flow_c,
_: *mut libc::c_void,
_: *mut libc::c_char,
_: usize,
) -> bool,
>;
pub type codec_write_frame_fn = Option<
unsafe extern "C" fn(
_: *mut flow_c,
_: *mut libc::c_void,
_: *mut flow_bitmap_bgra,
_: *mut flow_encoder_hints,
) -> bool,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_encoder_hints {
pub disable_png_alpha: bool,
pub zlib_compression_level: i32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_bitmap_bgra {
pub w: u32,
pub h: u32,
pub stride: u32,
pub pixels: *mut libc::c_uchar,
pub fmt: flow_pixel_format,
pub matte_color: [u8; 4],
pub compositing_mode: flow_bitmap_compositing_mode,
}
pub type codec_read_frame_fn = Option<
unsafe extern "C" fn(
_: *mut flow_c,
_: *mut libc::c_void,
_: *mut flow_bitmap_bgra,
_: *mut flow_decoder_color_info,
) -> bool,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_decoder_color_info {
pub source: flow_codec_color_profile_source,
pub profile_buf: *mut u8,
pub buf_length: usize,
pub white_point: cmsCIExyY,
pub primaries: cmsCIExyYTRIPLE,
pub gamma: f64,
}
pub type flow_codec_color_profile_source = u32;
pub const flow_codec_color_profile_source_sRGB: flow_codec_color_profile_source = 4;
pub const flow_codec_color_profile_source_GAMA_CHRM: flow_codec_color_profile_source = 3;
pub const flow_codec_color_profile_source_ICCP_GRAY: flow_codec_color_profile_source = 2;
pub const flow_codec_color_profile_source_ICCP: flow_codec_color_profile_source = 1;
pub const flow_codec_color_profile_source_null: flow_codec_color_profile_source = 0;
pub type codec_switch_frame_fn =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut libc::c_void, _: usize) -> bool>;
pub type codec_set_downscale_hints_fn = Option<
unsafe extern "C" fn(
_: *mut flow_c,
_: *mut flow_codec_instance,
_: *mut flow_decoder_downscale_hints,
) -> bool,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_decoder_downscale_hints {
pub downscale_if_wider_than: i64,
pub or_if_taller_than: i64,
pub downscaled_min_width: i64,
pub downscaled_min_height: i64,
pub scale_luma_spatially: bool,
pub gamma_correct_for_srgb_during_spatial_luma_scaling: bool,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_codec_instance {
pub io_id: i32,
pub codec_id: i64,
pub codec_state: *mut libc::c_void,
pub io: *mut flow_io,
pub direction: FLOW_DIRECTION,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_io {
pub context: *mut flow_c,
pub mode: flow_io_mode,
pub read_func: flow_io_read_function,
pub write_func: flow_io_write_function,
pub position_func: flow_io_position_function,
pub seek_function: flow_io_seek_function,
pub dispose_func: flow_destructor_function,
pub user_data: *mut libc::c_void,
pub optional_file_length: i64,
}
pub type flow_io_seek_function =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut flow_io, _: i64) -> bool>;
pub type flow_io_position_function =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut flow_io) -> i64>;
pub type flow_io_write_function = Option<
unsafe extern "C" fn(_: *mut flow_c, _: *mut flow_io, _: *const u8, _: usize) -> i64,
>;
pub type flow_io_read_function = Option<
unsafe extern "C" fn(_: *mut flow_c, _: *mut flow_io, _: *mut u8, _: usize) -> i64,
>;
pub type codec_get_frame_info_fn = Option<
unsafe extern "C" fn(
_: *mut flow_c,
_: *mut libc::c_void,
_: *mut flow_decoder_frame_info,
) -> bool,
>;
pub type codec_get_info_fn = Option<
unsafe extern "C" fn(_: *mut flow_c, _: *mut libc::c_void, _: *mut flow_decoder_info) -> bool,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_decoder_info {
pub codec_id: i64,
pub preferred_mime_type: *const libc::c_char,
pub preferred_extension: *const libc::c_char,
pub frame_count: usize,
pub current_frame_index: i64,
pub image_width: i32,
pub image_height: i32,
pub frame_decodes_into: flow_pixel_format,
}
pub type codec_initialize =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut flow_codec_instance) -> bool>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_bitmap_float {
pub w: u32,
pub h: u32,
pub channels: u32,
pub pixels: *mut f32,
pub pixels_borrowed: bool,
pub float_count: u32,
pub float_stride: u32,
pub alpha_premultiplied: bool,
pub alpha_meaningful: bool,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_interpolation_details {
pub window: f64,
pub p1: f64,
pub p2: f64,
pub p3: f64,
pub q1: f64,
pub q2: f64,
pub q3: f64,
pub q4: f64,
pub blur: f64,
pub filter: flow_detailed_interpolation_method,
pub sharpen_percent_goal: f32,
}
pub type flow_detailed_interpolation_method = Option<
unsafe extern "C" fn(_: *const flow_interpolation_details, _: f64) -> f64,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_interpolation_pixel_contributions {
pub Weights: *mut f32,
pub Left: i32,
pub Right: i32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_interpolation_line_contributions {
pub ContribRow: *mut flow_interpolation_pixel_contributions,
pub WindowSize: u32,
pub LineLength: u32,
pub percent_negative: f64,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_convolution_kernel {
pub kernel: *mut f32,
pub width: u32,
pub radius: u32,
pub threshold_min_change: f32,
pub threshold_max_change: f32,
pub buffer: *mut f32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_colorcontext_info {
pub byte_to_float: [f32; 256],
pub floatspace: flow_working_floatspace,
pub apply_srgb: bool,
pub apply_gamma: bool,
pub gamma: f32,
pub gamma_inverse: f32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub union C2RustUnnamed {
pub i: u32,
pub f: f32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub union C2RustUnnamed_0 {
pub i: u32,
pub f: f32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub union C2RustUnnamed_1 {
pub f: f32,
pub i: u32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_nodeinfo_scale2d_render_to_canvas1d {
pub x: u32,
pub y: u32,
pub w: u32,
pub h: u32,
pub sharpen_percent_goal: f32,
pub interpolation_filter: flow_interpolation_filter,
pub scale_in_colorspace: flow_working_floatspace,
}
/*
* Copyright (c) Imazen LLC.
* No part of this project, including this file, may be copied, modified,
* propagated, or distributed except as permitted in COPYRIGHT.txt.
* Licensed under the GNU Affero General Public License, Version 3.0.
* Commercial licenses available at http://imageresizing.net/
*/
pub const BESSEL_01: unsafe extern "C" fn(_: f64) -> f64 = j1;
#[inline]
unsafe extern "C" fn flow_colorcontext_srgb_to_floatspace_uncached(
colorcontext: *mut flow_colorcontext_info,
value: u8,
) -> f32 {
let mut v: f32 = value as f32 * (1.0f32 / 255.0f32);
if (*colorcontext).apply_srgb {
v = srgb_to_linear(v)
} else if (*colorcontext).apply_gamma {
v = flow_colorcontext_remove_gamma(colorcontext, v)
}
return v;
}
#[inline]
unsafe extern "C" fn flow_colorcontext_remove_gamma(
colorcontext: *mut flow_colorcontext_info,
value: f32,
) -> f32 {
return pow(
value as f64,
(*colorcontext).gamma as f64,
) as f32;
}
#[inline]
unsafe extern "C" fn srgb_to_linear(s: f32) -> f32 {
if s <= 0.04045f32 {
return s / 12.92f32;
} else {
return pow(
((s + 0.055f32) / (1 as i32 as f32 + 0.055f32)) as f64,
2.4f32 as f64,
) as f32;
};
}
pub const NULL: i32 = 0 as i32;
pub const FLOW_ERROR_MESSAGE_SIZE: i32 = 1023 as i32;
pub const IR_PI: f64 = 3.1415926535897932384626433832795f64;
#[inline]
unsafe extern "C" fn int_max(a: i32, b: i32) -> i32 {
return if a >= b { a } else { b };
}
#[inline]
unsafe extern "C" fn int_min(a: i32, b: i32) -> i32 {
return if a <= b { a } else { b };
}
#[inline]
unsafe extern "C" fn ir_gaussian(x: f64, stdDev: f64) -> f64 {
return exp(-x * x / (2 as i32 as f64 * stdDev * stdDev))
/ (sqrt(2 as i32 as f64 * IR_PI) * stdDev);
}
#[inline]
unsafe extern "C" fn uchar_clamp_ff(clr: f32) -> u8 {
let mut result: u16 = 0;
result = (clr as f64 + 0.5f64) as i16 as u16;
if result as i32 > 255 as i32 {
result = if clr < 0 as i32 as f32 {
0 as i32
} else {
255 as i32
} as u16
}
return result as u8;
}
#[inline]
unsafe extern "C" fn fastpow2(p: f32) -> f32 {
let offset: f32 = if p < 0 as i32 as f32 {
1.0f32
} else {
0.0f32
};
let clipp: f32 = if p < -(126 as i32) as f32 {
-126.0f32
} else {
p
};
let w: i32 = clipp as i32;
let z: f32 = clipp - w as f32 + offset;
let v: C2RustUnnamed = C2RustUnnamed {
i: (((1 as i32) << 23 as i32) as f32
* (clipp + 121.2740575f32 + 27.7280233f32 / (4.84252568f32 - z) - 1.49012907f32 * z))
as u32,
};
return v.f;
}
#[inline]
unsafe extern "C" fn fastlog2(x: f32) -> f32 {
let vx: C2RustUnnamed_1 = C2RustUnnamed_1 { f: x };
let mx: C2RustUnnamed_0 = C2RustUnnamed_0 {
i: vx.i & 0x7fffff as i32 as u32
| 0x3f000000 as i32 as u32,
};
let mut y: f32 = vx.i as f32;
y *= 1.1920928955078125e-7f32;
return y - 124.22551499f32 - 1.498030302f32 * mx.f - 1.72587999f32 / (0.3520887068f32 + mx.f);
}
#[inline]
unsafe extern "C" fn fastpow(x: f32, p: f32) -> f32 {
return fastpow2(p * fastlog2(x));
}
#[inline]
unsafe extern "C" fn linear_to_srgb(clr: f32) -> f32 {
if clr <= 0.0031308f32 {
return 12.92f32 * clr * 255.0f32;
}
return 1.055f32 * 255.0f32 * fastpow(clr, 0.41666666f32) - 14.025f32;
}
#[inline]
unsafe extern "C" fn flow_colorcontext_apply_gamma(
colorcontext: *mut flow_colorcontext_info,
value: f32,
) -> f32 {
return pow(
value as f64,
(*colorcontext).gamma_inverse as f64,
) as f32;
}
#[inline]
unsafe extern "C" fn flow_colorcontext_srgb_to_floatspace(
colorcontext: *mut flow_colorcontext_info,
value: u8,
) -> f32 {
return (*colorcontext).byte_to_float[value as usize];
}
#[inline]
unsafe extern "C" fn flow_colorcontext_floatspace_to_srgb(
color: *mut flow_colorcontext_info,
space_value: f32,
) -> u8 {
let v: f32 = space_value;
if (*color).apply_gamma {
return uchar_clamp_ff(flow_colorcontext_apply_gamma(color, v) * 255.0f32);
}
if (*color).apply_srgb {
return uchar_clamp_ff(linear_to_srgb(v));
}
return uchar_clamp_ff(255.0f32 * v);
}
#[inline]
unsafe extern "C" fn linear_to_luv(bgr: *mut f32) {
let xn: f32 = 0.312713f32;
let yn: f32 = 0.329016f32;
let Yn: f32 = 1.0f32;
let un: f32 = 4 as i32 as f32 * xn
/ (-(2 as i32) as f32 * xn
+ 12 as i32 as f32 * yn
+ 3 as i32 as f32);
let vn: f32 = 9 as i32 as f32 * yn
/ (-(2 as i32) as f32 * xn
+ 12 as i32 as f32 * yn
+ 3 as i32 as f32);
let y_split: f32 = 0.00885645f32;
let y_adjust: f32 = 903.3f32;
let R: f32 = *bgr.offset(2);
let G: f32 = *bgr.offset(1);
let B: f32 = *bgr.offset(0);
if R == 0 as i32 as f32
&& G == 0 as i32 as f32
&& B == 0 as i32 as f32
{
*bgr.offset(0) = 0 as i32 as f32;
let ref mut fresh0 = *bgr.offset(2);
*fresh0 = 100 as i32 as f32;
*bgr.offset(1) = *fresh0;
return;
}
let X: f32 = 0.412453f32 * R + 0.35758f32 * G + 0.180423f32 * B;
let Y: f32 = 0.212671f32 * R + 0.71516f32 * G + 0.072169f32 * B;
let Z: f32 = 0.019334f32 * R + 0.119193f32 * G + 0.950227f32 * B;
let Yd: f32 = Y / Yn;
let u: f32 = 4 as i32 as f32 * X
/ (X + 15 as i32 as f32 * Y + 3 as i32 as f32 * Z);
let v: f32 = 9 as i32 as f32 * Y
/ (X + 15 as i32 as f32 * Y + 3 as i32 as f32 * Z);
let ref mut fresh1 = *bgr.offset(0);
*fresh1 = if Yd > y_split {
(116 as i32 as f32
* pow(Yd as f64, (1.0f32 / 3.0f32) as f64) as f32)
- 16 as i32 as f32
} else {
(y_adjust) * Yd
};
let L: f32 = *fresh1;
*bgr.offset(1) =
13 as i32 as f32 * L * (u - un) + 100 as i32 as f32;
*bgr.offset(2) =
13 as i32 as f32 * L * (v - vn) + 100 as i32 as f32;
}
#[inline]
unsafe extern "C" fn luv_to_linear(luv: *mut f32) {
let L: f32 = *luv.offset(0);
let U: f32 = *luv.offset(1) - 100.0f32;
let V: f32 = *luv.offset(2) - 100.0f32;
if L == 0 as i32 as f32 {
let ref mut fresh2 = *luv.offset(2);
*fresh2 = 0 as i32 as f32;
let ref mut fresh3 = *luv.offset(1);
*fresh3 = *fresh2;
*luv.offset(0) = *fresh3;
return;
}
let xn: f32 = 0.312713f32;
let yn: f32 = 0.329016f32;
let Yn: f32 = 1.0f32;
let un: f32 = 4 as i32 as f32 * xn
/ (-(2 as i32) as f32 * xn
+ 12 as i32 as f32 * yn
+ 3 as i32 as f32);
let vn: f32 = 9 as i32 as f32 * yn
/ (-(2 as i32) as f32 * xn
+ 12 as i32 as f32 * yn
+ 3 as i32 as f32);
let y_adjust_2: f32 = 0.00110705645f32;
let u: f32 = U / (13 as i32 as f32 * L) + un;
let v: f32 = V / (13 as i32 as f32 * L) + vn;
let Y: f32 = if L > 8 as i32 as f32 {
(Yn) * pow(
((L + 16 as i32 as f32) / 116 as i32 as f32)
as f64,
3 as i32 as f64,
) as f32
} else {
(Yn * L) * y_adjust_2
};
let X: f32 = 9 as i32 as f32 / 4.0f32 * Y * u / v;
let Z: f32 = (9 as i32 as f32 * Y
- 15 as i32 as f32 * v * Y
- v * X)
/ (3 as i32 as f32 * v);
let r: f32 = 3.240479f32 * X - 1.53715f32 * Y - 0.498535f32 * Z;
let g: f32 = -0.969256f32 * X + 1.875991f32 * Y + 0.041556f32 * Z;
let b: f32 = 0.055648f32 * X - 0.204043f32 * Y + 1.057311f32 * Z;
*luv.offset(0) = b;
*luv.offset(1) = g;
*luv.offset(2) = r;
}
unsafe extern "C" fn derive_cubic_coefficients(
B: f64,
C: f64,
out: *mut flow_interpolation_details,
) {
let bx2: f64 = B + B;
(*out).p1 = 1.0f64 - 1.0f64 / 3.0f64 * B;
(*out).p2 = -3.0f64 + bx2 + C;
(*out).p3 = 2.0f64 - 1.5f64 * B - C;
(*out).q1 = 4.0f64 / 3.0f64 * B + 4.0f64 * C;
(*out).q2 = -8.0f64 * C - bx2;
(*out).q3 = B + 5.0f64 * C;
(*out).q4 = -1.0f64 / 6.0f64 * B - C;
}
unsafe extern "C" fn filter_flex_cubic(
d: *const flow_interpolation_details,
x: f64,
) -> f64 {
let t: f64 = fabs(x) / (*d).blur;
if t < 1.0f64 {
return (*d).p1 + t * (t * ((*d).p2 + t * (*d).p3));
}
if t < 2.0f64 {
return (*d).q1 + t * ((*d).q2 + t * ((*d).q3 + t * (*d).q4));
}
return 0.0f64;
}
unsafe extern "C" fn filter_bicubic_fast(
d: *const flow_interpolation_details,
t: f64,
) -> f64 {
let abs_t: f64 = fabs(t) / (*d).blur;
let abs_t_sq: f64 = abs_t * abs_t;
if abs_t < 1 as i32 as f64 {
return 1 as i32 as f64 - 2 as i32 as f64 * abs_t_sq
+ abs_t_sq * abs_t;
}
if abs_t < 2 as i32 as f64 {
return 4 as i32 as f64 - 8 as i32 as f64 * abs_t
+ 5 as i32 as f64 * abs_t_sq
- abs_t_sq * abs_t;
}
return 0 as i32 as f64;
}
unsafe extern "C" fn filter_sinc(
d: *const flow_interpolation_details,
t: f64,
) -> f64 {
let abs_t: f64 = fabs(t) / (*d).blur;
if abs_t == 0 as i32 as f64 {
return 1 as i32 as f64;
// Avoid division by zero
}
if abs_t > (*d).window {
return 0 as i32 as f64;
}
let a = abs_t * IR_PI;
return a.sin() / a;
}
unsafe extern "C" fn filter_box(
d: *const flow_interpolation_details,
t: f64,
) -> f64 {
let x: f64 = t / (*d).blur;
return if x >= -(1 as i32) as f64 * (*d).window && x < (*d).window {
1 as i32
} else {
0 as i32
} as f64;
}
unsafe extern "C" fn filter_triangle(
d: *const flow_interpolation_details,
t: f64,
) -> f64 {
let x: f64 = fabs(t) / (*d).blur;
if x < 1.0f64 {
return 1.0f64 - x;
}
return 0.0f64;
}
unsafe extern "C" fn filter_sinc_windowed(
d: *const flow_interpolation_details,
t: f64,
) -> f64 {
let x: f64 = t / (*d).blur;
let abs_t: f64 = fabs(x);
if abs_t == 0 as i32 as f64 {
return 1 as i32 as f64;
// Avoid division by zero
}
if abs_t > (*d).window {
return 0 as i32 as f64;
}
return (*d).window * (IR_PI * x / (*d).window).sin() * (x * IR_PI).sin() / (IR_PI * IR_PI * x * x);
}
unsafe extern "C" fn filter_jinc(
d: *const flow_interpolation_details,
t: f64,
) -> f64 {
let x: f64 = fabs(t) / (*d).blur;
if x == 0.0f64 {
return 0.5f64 * IR_PI;
}
return j1(IR_PI * x) / x;
// //x crossing #1 1.2196698912665045
}
/*
static inline double window_jinc (double x) {
double x_a = x * 1.2196698912665045;
if (x == 0.0)
return 1;
return (BesselOrderOne (IR_PI*x_a) / (x_a * IR_PI * 0.5));
// //x crossing #1 1.2196698912665045
}
static double filter_window_jinc (const struct flow_interpolation_details * d, double t) {
return window_jinc (t / (d->blur * d->window));
}
*/
unsafe extern "C" fn filter_ginseng(
d: *const flow_interpolation_details,
t: f64,
) -> f64 {
// Sinc windowed by jinc
let abs_t: f64 = fabs(t) / (*d).blur;
let t_pi: f64 = abs_t * IR_PI;
if abs_t == 0 as i32 as f64 {
return 1 as i32 as f64;
// Avoid division by zero
}
if abs_t > 3 as i32 as f64 {
return 0 as i32 as f64;
}
let jinc_input: f64 = 1.2196698912665045f64 * t_pi / (*d).window;
let jinc_output: f64 = j1(jinc_input) / (jinc_input * 0.5f64);
return jinc_output * (t_pi).sin() / t_pi;
}
pub const TONY: f64 = 0.00001f64;
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_percent_negative_weight(
details: *const flow_interpolation_details,
) -> f64 {
let samples: i32 = 50 as i32;
let step: f64 = (*details).window / samples as f64;
let mut last_height: f64 =
(*details).filter.expect("non-null function pointer")(details, -step);
let mut positive_area: f64 = 0 as i32 as f64;
let mut negative_area: f64 = 0 as i32 as f64;
let mut i: i32 = 0 as i32;
while i <= samples + 2 as i32 {
let height: f64 = (*details).filter.expect("non-null function pointer")(
details,
i as f64 * step,
);
let area: f64 = (height + last_height) / 2.0f64 * step;
last_height = height;
if area > 0 as i32 as f64 {
positive_area += area
} else {
negative_area -= area
}
i += 1
}
return negative_area / positive_area;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_create(
context: *mut flow_c,
) -> *mut flow_interpolation_details {
let mut d: *mut flow_interpolation_details = flow_context_calloc(
context,
1 as i32 as usize,
::std::mem::size_of::<flow_interpolation_details>(),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
189 as i32,
) as *mut flow_interpolation_details;
if d.is_null() {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
191 as i32,
(*::std::mem::transmute::<&[u8; 34], &[libc::c_char; 34]>(
b"flow_interpolation_details_create\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_details;
}
(*d).blur = 1 as i32 as f64;
(*d).window = 2 as i32 as f64;
(*d).q1 = 0 as i32 as f64;
(*d).p1 = (*d).q1;
(*d).q4 = 1 as i32 as f64;
(*d).q3 = (*d).q4;
(*d).p3 = (*d).q3;
(*d).q2 = (*d).p3;
(*d).p2 = (*d).q2;
(*d).sharpen_percent_goal = 0 as i32 as f32;
return d;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_create_bicubic_custom(
context: *mut flow_c,
window: f64,
blur: f64,
B: f64,
C: f64,
) -> *mut flow_interpolation_details {
let mut d: *mut flow_interpolation_details = flow_interpolation_details_create(context);
if !d.is_null() {
(*d).blur = blur;
derive_cubic_coefficients(B, C, d);
(*d).filter = Some(
filter_flex_cubic
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
);
(*d).window = window
} else {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
212 as i32,
(*::std::mem::transmute::<&[u8; 49], &[libc::c_char; 49]>(
b"flow_interpolation_details_create_bicubic_custom\x00",
))
.as_ptr(),
);
}
return d;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_create_custom(
context: *mut flow_c,
window: f64,
blur: f64,
filter: flow_detailed_interpolation_method,
) -> *mut flow_interpolation_details {
let mut d: *mut flow_interpolation_details = flow_interpolation_details_create(context);
if !d.is_null() {
(*d).blur = blur;
(*d).filter = filter;
(*d).window = window
} else {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
226 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_interpolation_details_create_custom\x00",
))
.as_ptr(),
);
}
return d;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_destroy(
context: *mut flow_c,
details: *mut flow_interpolation_details,
) {
flow_deprecated_free(
context,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
233 as i32,
);
}
unsafe extern "C" fn InterpolationDetails_create_from_internal(
context: *mut flow_c,
filter: flow_interpolation_filter,
checkExistenceOnly: bool,
) -> *mut flow_interpolation_details {
let ex: bool = checkExistenceOnly;
let truePtr: *mut flow_interpolation_details =
-(1 as i32) as *mut flow_interpolation_details;
match filter as u32 {
23 | 22 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
1 as i32 as f64,
1 as i32 as f64,
Some(
filter_triangle
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
20 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
Some(
filter_sinc
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
18 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
1 as i32 as f64,
Some(
filter_sinc
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
21 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
2 as i32 as f64,
0.9549963639785485f64,
Some(
filter_sinc
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
19 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
0.9812505644269356f64,
Some(
filter_sinc
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
15 => {
// Hermite and BSpline no negative weights
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
1 as i32 as f64,
0 as i32 as f64,
)
};
}
8 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
Some(
filter_sinc_windowed
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
6 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
1 as i32 as f64,
Some(
filter_sinc_windowed
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
9 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
2 as i32 as f64,
0.9549963639785485f64,
Some(
filter_sinc_windowed
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
7 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
0.9812505644269356f64,
Some(
filter_sinc_windowed
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
10 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
Some(
filter_bicubic_fast
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
11 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
0 as i32 as f64,
1 as i32 as f64,
)
}
}
12 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
0.9549963639785485f64,
0 as i32 as f64,
1 as i32 as f64,
)
}
}
13 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
0 as i32 as f64,
0.5f64,
)
}
}
25 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
1 as i32 as f64,
1 as i32 as f64,
0 as i32 as f64,
0.5f64,
)
}
}
26 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
1 as i32 as f64,
13.0f64 / 16.0f64,
0 as i32 as f64,
0.5f64,
)
}
}
14 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
1.0f64 / 3.0f64,
1.0f64 / 3.0f64,
)
}
}
28 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
1 as i32 as f64,
1 as i32 as f64,
1.0f64 / 3.0f64,
1.0f64 / 3.0f64,
)
}
}
29 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2.5f64,
1.0f64 / 1.1685777620836932f64,
0.37821575509399867f64,
0.31089212245300067f64,
)
}
}
30 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2.5f64,
1.0f64 / 1.105822933719019f64,
0.2620145123990142f64,
0.3689927438004929f64,
)
}
}
2 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
0.37821575509399867f64,
0.31089212245300067f64,
)
}
}
27 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
0.74f64,
0.74f64,
0.37821575509399867f64,
0.31089212245300067f64,
)
}
}
1 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
1.05f64,
1 as i32 as f64,
0.37821575509399867f64,
0.31089212245300067f64,
)
}
}
3 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
0.2620145123990142f64,
0.3689927438004929f64,
)
}
}
16 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
1 as i32 as f64,
1 as i32 as f64,
0 as i32 as f64,
0 as i32 as f64,
)
}
}
24 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
0.5f64,
1 as i32 as f64,
Some(
filter_box
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
4 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
1 as i32 as f64,
Some(
filter_ginseng
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
5 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
0.9812505644269356f64,
Some(
filter_ginseng
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
17 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
6 as i32 as f64,
1.0f64,
Some(
filter_jinc
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
_ => {}
}
if !checkExistenceOnly {
flow_snprintf(
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
323 as i32,
(*::std::mem::transmute::<&[u8; 42], &[libc::c_char; 42]>(
b"InterpolationDetails_create_from_internal\x00",
))
.as_ptr(),
),
FLOW_ERROR_MESSAGE_SIZE as usize,
b"Invalid interpolation filter %d\x00" as *const u8 as *const libc::c_char,
filter as i32,
);
}
return NULL as *mut flow_interpolation_details;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_create_from(
context: *mut flow_c,
filter: flow_interpolation_filter,
) -> *mut flow_interpolation_details {
return InterpolationDetails_create_from_internal(context, filter, false);
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_filter_exists(
filter: flow_interpolation_filter,
) -> bool {
return !InterpolationDetails_create_from_internal(NULL as *mut flow_c, filter, true).is_null();
}
unsafe extern "C" fn LineContributions_alloc(
context: *mut flow_c,
line_length: u32,
windows_size: u32,
) -> *mut flow_interpolation_line_contributions {
let mut res: *mut flow_interpolation_line_contributions = flow_context_malloc(
context,
::std::mem::size_of::<flow_interpolation_line_contributions>(),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
343 as i32,
)
as *mut flow_interpolation_line_contributions;
if res.is_null() {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
345 as i32,
(*::std::mem::transmute::<&[u8; 24], &[libc::c_char; 24]>(
b"LineContributions_alloc\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_line_contributions;
}
(*res).WindowSize = windows_size;
(*res).LineLength = line_length;
(*res).ContribRow = flow_context_malloc(
context,
(line_length as usize).wrapping_mul(::std::mem::size_of::<
flow_interpolation_pixel_contributions,
>()),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
351 as i32,
) as *mut flow_interpolation_pixel_contributions;
if (*res).ContribRow.is_null() {
flow_deprecated_free(
context,
res as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
353 as i32,
);
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
354 as i32,
(*::std::mem::transmute::<&[u8; 24], &[libc::c_char; 24]>(
b"LineContributions_alloc\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_line_contributions;
}
let allWeights: *mut f32 = flow_context_calloc(
context,
windows_size.wrapping_mul(line_length) as usize,
::std::mem::size_of::<f32>(),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
358 as i32,
) as *mut f32;
if allWeights.is_null() {
flow_deprecated_free(
context,
(*res).ContribRow as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
360 as i32,
);
flow_deprecated_free(
context,
res as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
361 as i32,
);
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
362 as i32,
(*::std::mem::transmute::<&[u8; 24], &[libc::c_char; 24]>(
b"LineContributions_alloc\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_line_contributions;
}
let mut i: u32 = 0 as i32 as u32;
while i < line_length {
let ref mut fresh4 = (*(*res).ContribRow.offset(i as isize)).Weights;
*fresh4 = allWeights.offset(i.wrapping_mul(windows_size) as isize);
i = i.wrapping_add(1)
}
return res;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_line_contributions_destroy(
context: *mut flow_c,
p: *mut flow_interpolation_line_contributions,
) {
if !p.is_null() {
if !(*p).ContribRow.is_null() {
flow_deprecated_free(
context,
(*(*p).ContribRow.offset(0)).Weights as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
377 as i32,
);
}
flow_deprecated_free(
context,
(*p).ContribRow as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
379 as i32,
);
}
flow_deprecated_free(
context,
p as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
381 as i32,
);
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_line_contributions_create(
context: *mut flow_c,
output_line_size: u32,
input_line_size: u32,
details: *const flow_interpolation_details,
) -> *mut flow_interpolation_line_contributions {
let sharpen_ratio: f64 = flow_interpolation_details_percent_negative_weight(details);
let desired_sharpen_ratio: f64 = fmin(
0.999999999f32 as f64,
fmax(
sharpen_ratio,
(*details).sharpen_percent_goal as f64 / 100.0f64,
),
);
let scale_factor: f64 =
output_line_size as f64 / input_line_size as f64;
let downscale_factor: f64 = fmin(1.0f64, scale_factor);
let half_source_window: f64 = ((*details).window + 0.5f64) / downscale_factor;
let allocated_window_size: u32 =
(ceil(2 as i32 as f64 * (half_source_window - TONY)) as i32
+ 1 as i32) as u32;
let mut u: u32 = 0;
let mut ix: u32 = 0;
let mut res: *mut flow_interpolation_line_contributions =
LineContributions_alloc(context, output_line_size, allocated_window_size);
if res.is_null() {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
401 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_interpolation_line_contributions_create\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_line_contributions;
}
let mut negative_area: f64 = 0 as i32 as f64;
let mut positive_area: f64 = 0 as i32 as f64;
u = 0 as i32 as u32;
while u < output_line_size {
let center_src_pixel: f64 =
(u as f64 + 0.5f64) / scale_factor - 0.5f64;
let left_edge: i32 = (floor(center_src_pixel) as i32 as u32)
.wrapping_sub(
allocated_window_size
.wrapping_sub(1u32)
.wrapping_div(2u32),
) as i32;
let right_edge: i32 = (left_edge as u32)
.wrapping_add(allocated_window_size)
.wrapping_sub(1u32)
as i32;
let left_src_pixel: u32 = int_max(0 as i32, left_edge) as u32;
let right_src_pixel: u32 = int_min(
right_edge,
input_line_size as i32 - 1 as i32,
) as u32;
// Net weight
let mut total_weight: f64 = 0.0f64;
// Sum of negative and positive weights
let mut total_negative_weight: f64 = 0.0f64;
let mut total_positive_weight: f64 = 0.0f64;
let source_pixel_count: u32 = right_src_pixel
.wrapping_sub(left_src_pixel)
.wrapping_add(1u32);
if source_pixel_count > allocated_window_size {
flow_interpolation_line_contributions_destroy(context, res);
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
426 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_interpolation_line_contributions_create\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_line_contributions;
}
(*(*res).ContribRow.offset(u as isize)).Left = left_src_pixel as i32;
(*(*res).ContribRow.offset(u as isize)).Right = right_src_pixel as i32;
let mut weights: *mut f32 = (*(*res).ContribRow.offset(u as isize)).Weights;
ix = left_src_pixel;
while ix <= right_src_pixel {
let tx: i32 = ix.wrapping_sub(left_src_pixel) as i32;
let mut add: f64 =
Some((*details).filter.expect("non-null function pointer"))
.expect("non-null function pointer")(
details,
downscale_factor * (ix as f64 - center_src_pixel),
);
if fabs(add) <= 0.00000002f64 {
add = 0.0f64
// Weights below a certain threshold make consistent x-plat
// integration test results impossible. pos/neg zero, etc.
// They should be rounded down to zero at the threshold at which results are consistent.
}
*weights.offset(tx as isize) = add as f32;
total_weight += add;
total_negative_weight += fmin(0 as i32 as f64, add);
total_positive_weight += fmax(0 as i32 as f64, add);
ix = ix.wrapping_add(1)
}
let mut neg_factor: f32 = 0.;
let mut pos_factor: f32 = 0.;
pos_factor = (1.0f32 as f64 / total_weight) as f32;
neg_factor = pos_factor;
//printf("cur= %f cur+= %f cur-= %f desired_sharpen_ratio=%f sharpen_ratio-=%f\n", total_weight, total_positive_weight, total_negative_weight, desired_sharpen_ratio, sharpen_ratio);
if total_weight <= 0.0f32 as f64 || desired_sharpen_ratio > sharpen_ratio {
if total_negative_weight < 0.0f32 as f64 {
if desired_sharpen_ratio < 1.0f32 as f64 {
let target_positive_weight: f64 = 1.0f32 as f64
/ (1.0f32 as f64 - desired_sharpen_ratio);
let target_negative_weight: f64 =
desired_sharpen_ratio * -target_positive_weight;
pos_factor = (target_positive_weight / total_positive_weight) as f32;
neg_factor = (target_negative_weight / total_negative_weight) as f32;
if total_negative_weight == 0 as i32 as f64 {
neg_factor = 1.0f32
}
//printf("target=%f target-=%f, pos_factor=%f neg_factor=%f\n", total_positive_weight - target_negative_weight, target_negative_weight, pos_factor, neg_factor);
}
} else if total_weight == 0.0 {
// In this situation we have a problem to report
}
}
//printf("\n");
ix = 0 as i32 as u32;
while ix < source_pixel_count {
if *weights.offset(ix as isize) < 0 as i32 as f32 {
*weights.offset(ix as isize) *= neg_factor;
negative_area -= *weights.offset(ix as isize) as f64
} else {
*weights.offset(ix as isize) *= pos_factor;
positive_area += *weights.offset(ix as isize) as f64
}
ix = ix.wrapping_add(1)
}
// Shrink to improve perf & result consistency
let mut iix: i32 = 0;
// Shrink region from the right
iix = source_pixel_count.wrapping_sub(1u32) as i32;
while iix >= 0 as i32 {
if *weights.offset(iix as isize) != 0 as i32 as f32 {
break;
}
let ref mut fresh5 = (*(*res).ContribRow.offset(u as isize)).Right;
*fresh5 -= 1;
iix -= 1
}
// Shrink region from the left
iix = 0 as i32;
while iix < source_pixel_count as i32 {
if *weights.offset(0) != 0 as i32 as f32 {
break;
}
let ref mut fresh6 = (*(*res).ContribRow.offset(u as isize)).Weights;
*fresh6 = (*fresh6).offset(1);
weights = weights.offset(1);
let ref mut fresh7 = (*(*res).ContribRow.offset(u as isize)).Left;
*fresh7 += 1;
iix += 1
}
u = u.wrapping_add(1)
}
(*res).percent_negative = negative_area / positive_area;
return res;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_scale_rows(
context: *mut flow_c,
from: *mut flow_bitmap_float,
from_row: u32,
to: *mut flow_bitmap_float,
to_row: u32,
row_count: u32,
weights: *mut flow_interpolation_pixel_contributions,
) -> bool {
let from_step: u32 = (*from).channels;
let to_step: u32 = (*to).channels;
let dest_buffer_count: u32 = (*to).w;
let min_channels: u32 = from_step.min(to_step);
let mut ndx: u32 = 0;
if min_channels > 4 as i32 as u32 {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
520 as i32,
(*::std::mem::transmute::<&[u8; 29], &[libc::c_char; 29]>(
b"flow_bitmap_float_scale_rows\x00",
))
.as_ptr(),
);
return false;
}
let mut avg: [f32; 4] = [0.; 4];
// if both have alpha, process it
if from_step == 4 && to_step == 4
{
let mut row: u32 = 0;
while row < row_count {
let source_offset = ((from_row + row) * (*from).float_stride) as isize;
let source_buffer: *const __m128 = (*from).pixels.offset(source_offset) as *const __m128;
let dest_offset = ((to_row + row) * (*to).float_stride) as isize;
let dest_buffer: *mut __m128 = (*to).pixels.offset(dest_offset) as *mut __m128;
let dest_buffer: &mut[__m128] = std::slice::from_raw_parts_mut(
dest_buffer,
dest_buffer_count as usize
);
ndx = 0;
while ndx < dest_buffer_count {
let mut sums: __m128 = _mm_set1_ps(0.0);
let left: i32 = (*weights.offset(ndx as isize)).Left;
let right: i32 = (*weights.offset(ndx as isize)).Right;
let weightArray: *const f32 = (*weights.offset(ndx as isize)).Weights;
let source_buffer: &[__m128] = std::slice::from_raw_parts(
source_buffer,
(right + 1) as usize
);
/* Accumulate each channel */
let mut i = left;
while i <= right {
let factor: __m128 = _mm_set1_ps(*weightArray.offset((i - left) as isize));
// sums += factor * *source_buffer[i as usize];
let mid = _mm_mul_ps(factor, source_buffer[i as usize]);
sums = _mm_add_ps(sums, mid);
i += 1
}
dest_buffer[ndx as usize] = sums;
ndx += 1
}
row += 1
}
} else if from_step == 3 as i32 as u32
&& to_step == 3 as i32 as u32
{
let mut row_0: u32 = 0 as i32 as u32;
while row_0 < row_count {
let source_buffer_0: *const f32 = (*from).pixels.offset(
from_row
.wrapping_add(row_0)
.wrapping_mul((*from).float_stride) as isize,
);
let dest_buffer_0: *mut f32 = (*to)
.pixels
.offset(to_row.wrapping_add(row_0).wrapping_mul((*to).float_stride) as isize);
ndx = 0 as i32 as u32;
while ndx < dest_buffer_count {
let mut bgr: [f32; 3] = [0.0f32, 0.0f32, 0.0f32];
let left_0: i32 = (*weights.offset(ndx as isize)).Left;
let right_0: i32 = (*weights.offset(ndx as isize)).Right;
let weightArray_0: *const f32 = (*weights.offset(ndx as isize)).Weights;
let mut i_0: i32 = 0;
/* Accumulate each channel */
i_0 = left_0;
while i_0 <= right_0 {
let weight: f32 = *weightArray_0.offset((i_0 - left_0) as isize);
bgr[0] += weight
* *source_buffer_0
.offset((i_0 as u32).wrapping_mul(from_step) as isize);
bgr[1] += weight
* *source_buffer_0.offset(
(i_0 as u32)
.wrapping_mul(from_step)
.wrapping_add(1u32)
as isize,
);
bgr[2] += weight
* *source_buffer_0.offset(
(i_0 as u32)
.wrapping_mul(from_step)
.wrapping_add(2u32)
as isize,
);
i_0 += 1
}
*dest_buffer_0.offset(ndx.wrapping_mul(to_step) as isize) =
bgr[0];
*dest_buffer_0.offset(
ndx.wrapping_mul(to_step)
.wrapping_add(1u32)
as isize,
) = bgr[1];
*dest_buffer_0.offset(
ndx.wrapping_mul(to_step)
.wrapping_add(2u32)
as isize,
) = bgr[2];
ndx = ndx.wrapping_add(1)
}
row_0 = row_0.wrapping_add(1)
}
} else {
let mut row_1: u32 = 0 as i32 as u32;
while row_1 < row_count {
let source_buffer_1: *const f32 = (*from).pixels.offset(
from_row
.wrapping_add(row_1)
.wrapping_mul((*from).float_stride) as isize,
);
let dest_buffer_1: *mut f32 = (*to)
.pixels
.offset(to_row.wrapping_add(row_1).wrapping_mul((*to).float_stride) as isize);
ndx = 0 as i32 as u32;
while ndx < dest_buffer_count {
avg[0] = 0 as i32 as f32;
avg[1] = 0 as i32 as f32;
avg[2] = 0 as i32 as f32;
avg[3 as i32 as usize] = 0 as i32 as f32;
let left_1: i32 = (*weights.offset(ndx as isize)).Left;
let right_1: i32 = (*weights.offset(ndx as isize)).Right;
let weightArray_1: *const f32 = (*weights.offset(ndx as isize)).Weights;
/* Accumulate each channel */
let mut i_1: i32 = left_1;
while i_1 <= right_1 {
let weight_0: f32 = *weightArray_1.offset((i_1 - left_1) as isize);
let mut j: u32 = 0 as i32 as u32;
while j < min_channels {
avg[j as usize] += weight_0
* *source_buffer_1.offset(
(i_1 as u32)
.wrapping_mul(from_step)
.wrapping_add(j) as isize,
);
j = j.wrapping_add(1)
}
i_1 += 1
}
let mut j_0: u32 = 0 as i32 as u32;
while j_0 < min_channels {
*dest_buffer_1.offset(ndx.wrapping_mul(to_step).wrapping_add(j_0) as isize) =
avg[j_0 as usize];
j_0 = j_0.wrapping_add(1)
}
ndx = ndx.wrapping_add(1)
}
row_1 = row_1.wrapping_add(1)
}
}
return true;
}
unsafe extern "C" fn multiply_row(
row: *mut f32,
length: usize,
coefficient: f32,
) {
let mut i: usize = 0 as i32 as usize;
while i < length {
*row.offset(i as isize) *= coefficient;
i = i.wrapping_add(1)
}
}
unsafe extern "C" fn add_row(
mutate_row: *mut f32,
input_row: *mut f32,
length: usize,
) {
let mut i: usize = 0 as i32 as usize;
while i < length {
*mutate_row.offset(i as isize) += *input_row.offset(i as isize);
i = i.wrapping_add(1)
}
}
unsafe extern "C" fn crop(
c: *mut flow_c,
b: *mut flow_bitmap_bgra,
x: u32,
y: u32,
w: u32,
h: u32,
) -> *mut flow_bitmap_bgra {
if h.wrapping_add(y) > (*b).h || w.wrapping_add(x) > (*b).w {
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
632 as i32,
(*::std::mem::transmute::<&[u8; 5], &[libc::c_char; 5]>(b"crop\x00")).as_ptr(),
);
return NULL as *mut flow_bitmap_bgra;
}
let mut cropped_canvas: *mut flow_bitmap_bgra =
flow_bitmap_bgra_create_header(c, w as i32, h as i32);
let bpp: u32 = flow_pixel_format_bytes_per_pixel((*b).fmt);
if cropped_canvas.is_null() {
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
640 as i32,
(*::std::mem::transmute::<&[u8; 5], &[libc::c_char; 5]>(b"crop\x00")).as_ptr(),
);
return NULL as *mut flow_bitmap_bgra;
}
(*cropped_canvas).fmt = (*b).fmt;
memcpy(
&mut *(*cropped_canvas)
.matte_color
.as_mut_ptr()
.offset(0) as *mut u8 as *mut libc::c_void,
&mut *(*b)
.matte_color
.as_mut_ptr()
.offset(0) as *mut u8 as *const libc::c_void,
::std::mem::size_of::<[u8; 4]>() as u64,
);
(*cropped_canvas).compositing_mode = (*b).compositing_mode;
(*cropped_canvas).pixels = (*b)
.pixels
.offset(y.wrapping_mul((*b).stride) as isize)
.offset(x.wrapping_mul(bpp) as isize);
(*cropped_canvas).stride = (*b).stride;
return cropped_canvas;
}
/// Note: Rust version of `FLOW_error` takes the name of the caller as its third parameter since
/// there does not seem to be a way to get the name of the current or calling function in Rust.
fn FLOW_error(context: *mut flow_context, status_code: flow_status_code, caller: &str) -> *mut libc::c_char {
let file = CString::new(file!()).unwrap().as_ptr();
let func = CString::new(caller).unwrap().as_ptr();
unsafe {
flow_context_set_error_get_message_buffer(
context,
status_code,
file as *const libc::c_char,
line!() as i32,
func as *const libc::c_char // was __func__ in C macro
)
}
}
#[no_mangle]
pub unsafe extern "C" fn flow_node_execute_scale2d_render1d(
c: *mut flow_c,
input: *mut flow_bitmap_bgra,
uncropped_canvas: *mut flow_bitmap_bgra,
info: *mut flow_nodeinfo_scale2d_render_to_canvas1d,
) -> bool {
if (*info).h.wrapping_add((*info).y) > (*uncropped_canvas).h
|| (*info).w.wrapping_add((*info).x) > (*uncropped_canvas).w
{
FLOW_error(c, flow_status_code::Invalid_argument, "flow_node_execute_scale2d_render1d");
return false;
}
let cropped_canvas: *mut flow_bitmap_bgra = if (*info).x == 0
&& (*info).y == 0
&& (*info).w == (*uncropped_canvas).w
&& (*info).h == (*uncropped_canvas).h
{
uncropped_canvas
} else {
crop(
c,
uncropped_canvas,
(*info).x,
(*info).y,
(*info).w,
(*info).h,
)
};
if cropped_canvas.is_null() {
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
665 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
let input_fmt: flow_pixel_format = flow_effective_pixel_format(input);
let canvas_fmt: flow_pixel_format = flow_effective_pixel_format(cropped_canvas);
if input_fmt as u32 != flow_bgra32 as i32 as u32
&& input_fmt as u32 != flow_bgr32 as i32 as u32
{
FLOW_error(c, flow_status_code::Not_implemented, "flow_node_execute_scale2d_render1d");
return false;
}
if canvas_fmt as u32 != flow_bgra32 as i32 as u32
&& canvas_fmt as u32 != flow_bgr32 as i32 as u32
{
FLOW_error(c, flow_status_code::Not_implemented, "flow_node_execute_scale2d_render1d");
return false;
}
let mut colorcontext: flow_colorcontext_info = flow_colorcontext_info {
byte_to_float: [0.; 256],
floatspace: flow_working_floatspace_srgb,
apply_srgb: false,
apply_gamma: false,
gamma: 0.,
gamma_inverse: 0.,
};
flow_colorcontext_init(
c,
&mut colorcontext,
(*info).scale_in_colorspace,
0 as i32 as f32,
0 as i32 as f32,
0 as i32 as f32,
);
// Use details as a parent structure to ensure everything gets freed
let mut details: *mut flow_interpolation_details =
flow_interpolation_details_create_from(c, (*info).interpolation_filter);
if details.is_null() {
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
686 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
(*details).sharpen_percent_goal = (*info).sharpen_percent_goal;
let mut contrib_v: *mut flow_interpolation_line_contributions =
NULL as *mut flow_interpolation_line_contributions;
let mut contrib_h: *mut flow_interpolation_line_contributions =
NULL as *mut flow_interpolation_line_contributions;
flow_context_profiler_start(
c,
b"contributions_calc\x00" as *const u8 as *const libc::c_char,
0 as i32 != 0,
);
contrib_v = flow_interpolation_line_contributions_create(c, (*info).h, (*input).h, details);
if contrib_v.is_null()
|| !flow_set_owner(
c,
contrib_v as *mut libc::c_void,
details as *mut libc::c_void,
)
{
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
697 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
698 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
contrib_h = flow_interpolation_line_contributions_create(c, (*info).w, (*input).w, details);
if contrib_h.is_null()
|| !flow_set_owner(
c,
contrib_h as *mut libc::c_void,
details as *mut libc::c_void,
)
{
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
702 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
703 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
flow_context_profiler_stop(
c,
b"contributions_calc\x00" as *const u8 as *const libc::c_char,
1 as i32 != 0,
0 as i32 != 0,
);
flow_context_profiler_start(
c,
b"create_bitmap_float (buffers)\x00" as *const u8 as *const libc::c_char,
0 as i32 != 0,
);
let mut source_buf: *mut flow_bitmap_float = flow_bitmap_float_create_header(
c,
(*input).w as i32,
1 as i32,
4 as i32,
);
if source_buf.is_null()
|| !flow_set_owner(
c,
source_buf as *mut libc::c_void,
details as *mut libc::c_void,
)
{
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
711 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
712 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
let mut dest_buf: *mut flow_bitmap_float = flow_bitmap_float_create(
c,
(*info).w as i32,
1 as i32,
4 as i32,
true,
);
if dest_buf.is_null()
|| !flow_set_owner(
c,
dest_buf as *mut libc::c_void,
details as *mut libc::c_void,
)
{
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
716 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
717 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
(*source_buf).alpha_meaningful =
input_fmt as u32 == flow_bgra32 as i32 as u32;
(*dest_buf).alpha_meaningful = (*source_buf).alpha_meaningful;
(*source_buf).alpha_premultiplied = (*source_buf).channels == 4 as i32 as u32;
(*dest_buf).alpha_premultiplied = (*source_buf).alpha_premultiplied;
flow_context_profiler_stop(
c,
b"create_bitmap_float (buffers)\x00" as *const u8 as *const libc::c_char,
1 as i32 != 0,
0 as i32 != 0,
);
// Determine how many rows we need to buffer
let mut max_input_rows: i32 = 0 as i32;
let mut i: u32 = 0 as i32 as u32;
while i < (*contrib_v).LineLength {
let inputs: i32 = (*(*contrib_v).ContribRow.offset(i as isize)).Right
- (*(*contrib_v).ContribRow.offset(i as isize)).Left
+ 1 as i32;
if inputs > max_input_rows {
max_input_rows = inputs
}
i = i.wrapping_add(1)
}
// Allocate space
let row_floats: usize = (4u32).wrapping_mul((*input).w) as usize;
let buf: *mut f32 = flow_context_malloc(
c,
::std::mem::size_of::<f32>()
.wrapping_mul(row_floats)
.wrapping_mul((max_input_rows + 1) as usize),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
737 as i32,
) as *mut f32;
let rows: *mut *mut f32 = flow_context_malloc(
c,
(::std::mem::size_of::<*mut f32>())
.wrapping_mul(max_input_rows as usize),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
738 as i32,
) as *mut *mut f32;
let row_coefficients: *mut f32 = flow_context_malloc(
c,
::std::mem::size_of::<f32>()
.wrapping_mul(max_input_rows as usize),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
739 as i32,
) as *mut f32;
let row_indexes: *mut i32 = flow_context_malloc(
c,
::std::mem::size_of::<i32>()
.wrapping_mul(max_input_rows as usize),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
740 as i32,
) as *mut i32;
if buf.is_null() || rows.is_null() || row_coefficients.is_null() || row_indexes.is_null() {
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
742 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
743 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
let output_address: *mut f32 = &mut *buf
.offset(row_floats.wrapping_mul(max_input_rows as usize) as isize)
as *mut f32;
let mut i_0: i32 = 0 as i32;
while i_0 < max_input_rows {
let ref mut fresh8 = *rows.offset(i_0 as isize);
*fresh8 = &mut *buf.offset(
(4u32)
.wrapping_mul((*input).w)
.wrapping_mul(i_0 as u32) as isize,
) as *mut f32;
*row_coefficients.offset(i_0 as isize) = 1 as i32 as f32;
*row_indexes.offset(i_0 as isize) = -(1 as i32);
i_0 += 1
}
let mut out_row: u32 = 0 as i32 as u32;
while out_row < (*cropped_canvas).h {
let contrib: flow_interpolation_pixel_contributions =
*(*contrib_v).ContribRow.offset(out_row as isize);
// Clear output row
::libc::memset(
output_address as *mut libc::c_void,
0 as i32,
::std::mem::size_of::<f32>().wrapping_mul(row_floats),
);
let mut input_row: i32 = contrib.Left;
while input_row <= contrib.Right {
// Try to find row in buffer if already loaded
let mut loaded: bool = false;
let mut active_buf_ix: i32 = -(1 as i32);
let mut buf_row: i32 = 0 as i32;
while buf_row < max_input_rows {
if *row_indexes.offset(buf_row as isize) == input_row {
active_buf_ix = buf_row;
loaded = true;
break;
} else {
buf_row += 1
}
}
// Not loaded?
if !loaded {
let mut buf_row_0: i32 = 0 as i32; // Buffer too small!
while buf_row_0 < max_input_rows {
if *row_indexes.offset(buf_row_0 as isize) < contrib.Left {
active_buf_ix = buf_row_0;
loaded = false;
break;
} else {
buf_row_0 += 1
}
}
}
if active_buf_ix < 0 as i32 {
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
779 as i32,
);
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
780 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
if !loaded {
// Load row
(*source_buf).pixels = *rows.offset(active_buf_ix as isize);
flow_context_profiler_start(
c,
b"convert_srgb_to_linear\x00" as *const u8 as *const libc::c_char,
0 as i32 != 0,
);
if !flow_bitmap_float_convert_srgb_to_linear(
c,
&mut colorcontext,
input,
input_row as u32,
source_buf,
0 as i32 as u32,
1 as i32 as u32,
) {
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
789 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
790 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
flow_context_profiler_stop(
c,
b"convert_srgb_to_linear\x00" as *const u8 as *const libc::c_char,
1 as i32 != 0,
0 as i32 != 0,
);
*row_coefficients.offset(active_buf_ix as isize) =
1 as i32 as f32;
*row_indexes.offset(active_buf_ix as isize) = input_row;
loaded = true
}
let weight: f32 =
*contrib.Weights.offset((input_row - contrib.Left) as isize);
if fabs(weight as f64) > 0.00000002f64 {
// Apply coefficient, update tracking
let delta_coefficient: f32 =
weight / *row_coefficients.offset(active_buf_ix as isize);
multiply_row(
*rows.offset(active_buf_ix as isize),
row_floats,
delta_coefficient,
);
*row_coefficients.offset(active_buf_ix as isize) = weight;
// Add row
add_row(
output_address,
*rows.offset(active_buf_ix as isize),
row_floats,
);
}
input_row += 1
}
// The container now points to the row which has been vertically scaled
(*source_buf).pixels = output_address;
// Now scale horizontally!
flow_context_profiler_start(
c,
b"ScaleBgraFloatRows\x00" as *const u8 as *const libc::c_char,
0 as i32 != 0,
);
if !flow_bitmap_float_scale_rows(
c,
source_buf,
0 as i32 as u32,
dest_buf,
0 as i32 as u32,
1 as i32 as u32,
(*contrib_h).ContribRow,
) {
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
816 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
817 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
flow_context_profiler_stop(
c,
b"ScaleBgraFloatRows\x00" as *const u8 as *const libc::c_char,
1 as i32 != 0,
0 as i32 != 0,
);
if !flow_bitmap_float_composite_linear_over_srgb(
c,
&mut colorcontext,
dest_buf,
0 as i32 as u32,
cropped_canvas,
out_row,
1 as i32 as u32,
false,
) {
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
822 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
823 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
out_row = out_row.wrapping_add(1)
}
flow_destroy(
c,
if cropped_canvas == uncropped_canvas {
0 as *mut flow_bitmap_bgra
} else {
cropped_canvas
} as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
826 as i32,
);
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
827 as i32,
);
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_create(
context: *mut flow_c,
radius: u32,
) -> *mut flow_convolution_kernel {
let mut k: *mut flow_convolution_kernel = flow_context_calloc(
context,
1 as i32 as usize,
::std::mem::size_of::<flow_convolution_kernel>(),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
832 as i32,
) as *mut flow_convolution_kernel;
// For the actual array;
let a: *mut f32 = flow_context_calloc(
context,
radius
.wrapping_mul(2u32)
.wrapping_add(1u32) as usize,
::std::mem::size_of::<f32>(),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
834 as i32,
) as *mut f32;
// we assume a maximum of 4 channels are going to need buffering during convolution
let buf: *mut f32 = flow_context_malloc(
context,
(radius as usize)
.wrapping_add(2)
.wrapping_mul(4)
.wrapping_mul(::std::mem::size_of::<f32>()),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
836 as i32,
) as *mut f32; // nothing to do here, zeroes are as normalized as you can get ;)
if k.is_null() || a.is_null() || buf.is_null() {
flow_deprecated_free(
context,
k as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
839 as i32,
);
flow_deprecated_free(
context,
a as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
840 as i32,
);
flow_deprecated_free(
context,
buf as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
841 as i32,
);
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
842 as i32,
(*::std::mem::transmute::<&[u8; 31], &[libc::c_char; 31]>(
b"flow_convolution_kernel_create\x00",
))
.as_ptr(),
);
return NULL as *mut flow_convolution_kernel;
}
(*k).kernel = a;
(*k).width = radius
.wrapping_mul(2u32)
.wrapping_add(1u32);
(*k).buffer = buf;
(*k).radius = radius;
return k;
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_destroy(
context: *mut flow_c,
mut kernel: *mut flow_convolution_kernel,
) {
if !kernel.is_null() {
flow_deprecated_free(
context,
(*kernel).kernel as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
854 as i32,
);
flow_deprecated_free(
context,
(*kernel).buffer as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
855 as i32,
);
(*kernel).kernel = NULL as *mut f32;
(*kernel).buffer = NULL as *mut f32
}
flow_deprecated_free(
context,
kernel as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
859 as i32,
);
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_create_gaussian(
context: *mut flow_c,
stdDev: f64,
radius: u32,
) -> *mut flow_convolution_kernel {
let k: *mut flow_convolution_kernel = flow_convolution_kernel_create(context, radius);
if !k.is_null() {
let mut i: u32 = 0 as i32 as u32;
while i < (*k).width {
*(*k).kernel.offset(i as isize) = ir_gaussian(
(radius as i32 - i as i32).abs() as f64,
stdDev,
) as f32;
i = i.wrapping_add(1)
}
}
return k;
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_sum(
kernel: *mut flow_convolution_kernel,
) -> f64 {
let mut sum: f64 = 0 as i32 as f64;
let mut i: u32 = 0 as i32 as u32;
while i < (*kernel).width {
sum += *(*kernel).kernel.offset(i as isize) as f64;
i = i.wrapping_add(1)
}
return sum;
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_normalize(
kernel: *mut flow_convolution_kernel,
desiredSum: f32,
) {
let sum: f64 = flow_convolution_kernel_sum(kernel);
if sum == 0 as i32 as f64 {
return;
}
let factor: f32 = (desiredSum as f64 / sum) as f32;
let mut i: u32 = 0 as i32 as u32;
while i < (*kernel).width {
*(*kernel).kernel.offset(i as isize) *= factor;
i = i.wrapping_add(1)
}
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_create_gaussian_normalized(
context: *mut flow_c,
stdDev: f64,
radius: u32,
) -> *mut flow_convolution_kernel {
let kernel: *mut flow_convolution_kernel =
flow_convolution_kernel_create_gaussian(context, stdDev, radius);
if !kernel.is_null() {
flow_convolution_kernel_normalize(kernel, 1 as i32 as f32);
}
return kernel;
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_create_gaussian_sharpen(
context: *mut flow_c,
stdDev: f64,
radius: u32,
) -> *mut flow_convolution_kernel {
let kernel: *mut flow_convolution_kernel =
flow_convolution_kernel_create_gaussian(context, stdDev, radius);
if !kernel.is_null() {
let sum: f64 = flow_convolution_kernel_sum(kernel);
let mut i: u32 = 0 as i32 as u32;
while i < (*kernel).width {
if i == radius {
*(*kernel).kernel.offset(i as isize) = (2 as i32 as f64 * sum
- *(*kernel).kernel.offset(i as isize) as f64)
as f32
} else {
*(*kernel).kernel.offset(i as isize) *= -(1 as i32) as f32
}
i = i.wrapping_add(1)
}
flow_convolution_kernel_normalize(kernel, 1 as i32 as f32);
}
return kernel;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_convolve_rows(
_context: *mut flow_c,
buf: *mut flow_bitmap_float,
kernel: *mut flow_convolution_kernel,
convolve_channels: u32,
from_row: u32,
row_count: i32,
) -> bool {
let radius: u32 = (*kernel).radius;
let threshold_min: f32 = (*kernel).threshold_min_change;
let threshold_max: f32 = (*kernel).threshold_max_change;
// Do nothing unless the image is at least half as wide as the kernel.
if (*buf).w < radius.wrapping_add(1u32) {
return true;
}
let buffer_count: u32 = radius.wrapping_add(1u32);
let w: u32 = (*buf).w;
let int_w: i32 = (*buf).w as i32;
let step: u32 = (*buf).channels;
let until_row: u32 = if row_count < 0 as i32 {
(*buf).h
} else {
from_row.wrapping_add(row_count as u32)
};
let ch_used: u32 = convolve_channels;
let buffer: *mut f32 = (*kernel).buffer;
let avg: *mut f32 = &mut *(*kernel)
.buffer
.offset(buffer_count.wrapping_mul(ch_used) as isize)
as *mut f32;
let kern: *const f32 = (*kernel).kernel;
let wrap_mode: i32 = 0 as i32;
let mut row: u32 = from_row;
while row < until_row {
let source_buffer: *mut f32 = &mut *(*buf)
.pixels
.offset(row.wrapping_mul((*buf).float_stride) as isize)
as *mut f32;
let mut circular_idx: i32 = 0 as i32;
let mut ndx: u32 = 0 as i32 as u32;
while ndx < w.wrapping_add(buffer_count) {
// Flush old value
if ndx >= buffer_count {
memcpy(
&mut *source_buffer
.offset(ndx.wrapping_sub(buffer_count).wrapping_mul(step) as isize)
as *mut f32 as *mut libc::c_void,
&mut *buffer
.offset((circular_idx as u32).wrapping_mul(ch_used) as isize)
as *mut f32 as *const libc::c_void,
(ch_used as u64)
.wrapping_mul(::std::mem::size_of::<f32>() as u64),
);
}
// Calculate and enqueue new value
if ndx < w {
let left: i32 = ndx.wrapping_sub(radius) as i32;
let right: i32 = ndx.wrapping_add(radius) as i32;
let mut i: i32 = 0;
memset(
avg as *mut libc::c_void,
0 as i32,
(::std::mem::size_of::<f32>() as u64)
.wrapping_mul(ch_used as u64),
);
if left < 0 as i32 || right >= w as i32 {
if wrap_mode == 0 as i32 {
// Only sample what's present, and fix the average later.
let mut total_weight: f32 = 0 as i32 as f32;
/* Accumulate each channel */
i = left;
while i <= right {
if i > 0 as i32 && i < int_w {
let weight: f32 = *kern.offset((i - left) as isize);
total_weight += weight;
let mut j: u32 = 0 as i32 as u32;
while j < ch_used {
*avg.offset(j as isize) += weight
* *source_buffer.offset(
(i as u32).wrapping_mul(step).wrapping_add(j)
as isize,
);
j = j.wrapping_add(1)
}
}
i += 1
}
let mut j_0: u32 = 0 as i32 as u32;
while j_0 < ch_used {
*avg.offset(j_0 as isize) = *avg.offset(j_0 as isize) / total_weight;
j_0 = j_0.wrapping_add(1)
}
} else if wrap_mode == 1 as i32 {
// Extend last pixel to be used for all missing inputs
/* Accumulate each channel */
i = left;
while i <= right {
let weight_0: f32 = *kern.offset((i - left) as isize);
let ix: u32 = if i > int_w - 1 as i32 {
(int_w) - 1 as i32
} else if i < 0 as i32 {
0 as i32
} else {
i
} as u32;
let mut j_1: u32 = 0 as i32 as u32;
while j_1 < ch_used {
*avg.offset(j_1 as isize) += weight_0
* *source_buffer
.offset(ix.wrapping_mul(step).wrapping_add(j_1) as isize);
j_1 = j_1.wrapping_add(1)
}
i += 1
}
}
} else {
/* Accumulate each channel */
i = left;
while i <= right {
let weight_1: f32 = *kern.offset((i - left) as isize);
let mut j_2: u32 = 0 as i32 as u32;
while j_2 < ch_used {
*avg.offset(j_2 as isize) += weight_1
* *source_buffer.offset(
(i as u32).wrapping_mul(step).wrapping_add(j_2)
as isize,
);
j_2 = j_2.wrapping_add(1)
}
i += 1
}
}
// Enqueue difference
memcpy(
&mut *buffer
.offset((circular_idx as u32).wrapping_mul(ch_used) as isize)
as *mut f32 as *mut libc::c_void,
avg as *const libc::c_void,
(ch_used as u64)
.wrapping_mul(::std::mem::size_of::<f32>() as u64),
);
if threshold_min > 0 as i32 as f32
|| threshold_max > 0 as i32 as f32
{
let mut change: f32 = 0 as i32 as f32;
let mut j_3: u32 = 0 as i32 as u32;
while j_3 < ch_used {
change += fabs(
(*source_buffer
.offset(ndx.wrapping_mul(step).wrapping_add(j_3) as isize)
- *avg.offset(j_3 as isize))
as f64,
) as f32;
j_3 = j_3.wrapping_add(1)
}
if change < threshold_min || change > threshold_max {
memcpy(
&mut *buffer.offset(
(circular_idx as u32).wrapping_mul(ch_used) as isize,
) as *mut f32
as *mut libc::c_void,
&mut *source_buffer.offset(ndx.wrapping_mul(step) as isize)
as *mut f32
as *const libc::c_void,
(ch_used as u64)
.wrapping_mul(
::std::mem::size_of::<f32>() as u64
),
);
}
}
}
circular_idx = ((circular_idx + 1 as i32) as u32)
.wrapping_rem(buffer_count) as i32;
ndx = ndx.wrapping_add(1)
}
row = row.wrapping_add(1)
}
return true;
}
unsafe extern "C" fn BitmapFloat_boxblur_rows(
_context: *mut flow_c,
image: *mut flow_bitmap_float,
radius: u32,
passes: u32,
convolve_channels: u32,
work_buffer: *mut f32,
from_row: u32,
row_count: i32,
) -> bool {
let buffer_count: u32 = radius.wrapping_add(1u32);
let w: u32 = (*image).w;
let step: u32 = (*image).channels;
let until_row: u32 = if row_count < 0 as i32 {
(*image).h
} else {
from_row.wrapping_add(row_count as u32)
};
let ch_used: u32 = (*image).channels;
let buffer: *mut f32 = work_buffer;
let std_count: u32 = radius
.wrapping_mul(2u32)
.wrapping_add(1u32);
let std_factor: f32 = 1.0f32 / std_count as f32;
let mut row: u32 = from_row;
while row < until_row {
let source_buffer: *mut f32 = &mut *(*image)
.pixels
.offset(row.wrapping_mul((*image).float_stride) as isize)
as *mut f32;
let mut pass_index: u32 = 0 as i32 as u32;
while pass_index < passes {
let mut circular_idx: i32 = 0 as i32;
let mut sum: [f32; 4] = [
0 as i32 as f32,
0 as i32 as f32,
0 as i32 as f32,
0 as i32 as f32,
];
let mut count: u32 = 0 as i32 as u32;
let mut ndx: u32 = 0 as i32 as u32;
while ndx < radius {
let mut ch: u32 = 0 as i32 as u32;
while ch < convolve_channels {
sum[ch as usize] +=
*source_buffer.offset(ndx.wrapping_mul(step).wrapping_add(ch) as isize);
ch = ch.wrapping_add(1)
}
count = count.wrapping_add(1);
ndx = ndx.wrapping_add(1)
}
let mut ndx_0: u32 = 0 as i32 as u32;
while ndx_0 < w.wrapping_add(buffer_count) {
// Pixels
if ndx_0 >= buffer_count {
// same as ndx > radius
// Remove trailing item from average
let mut ch_0: u32 = 0 as i32 as u32;
while ch_0 < convolve_channels {
sum[ch_0 as usize] -= *source_buffer.offset(
ndx_0
.wrapping_sub(radius)
.wrapping_sub(1u32)
.wrapping_mul(step)
.wrapping_add(ch_0) as isize,
);
ch_0 = ch_0.wrapping_add(1)
}
count = count.wrapping_sub(1);
// Flush old value
memcpy(
&mut *source_buffer
.offset(ndx_0.wrapping_sub(buffer_count).wrapping_mul(step) as isize)
as *mut f32 as *mut libc::c_void,
&mut *buffer
.offset((circular_idx as u32).wrapping_mul(ch_used) as isize)
as *mut f32 as *const libc::c_void,
(ch_used as u64)
.wrapping_mul(::std::mem::size_of::<f32>() as u64),
);
}
// Calculate and enqueue new value
if ndx_0 < w {
if ndx_0 < w.wrapping_sub(radius) {
let mut ch_1: u32 = 0 as i32 as u32;
while ch_1 < convolve_channels {
sum[ch_1 as usize] += *source_buffer.offset(
ndx_0
.wrapping_add(radius)
.wrapping_mul(step)
.wrapping_add(ch_1) as isize,
);
ch_1 = ch_1.wrapping_add(1)
}
count = count.wrapping_add(1)
}
// Enqueue averaged value
if count != std_count {
let mut ch_2: u32 = 0 as i32 as u32;
while ch_2 < convolve_channels {
*buffer.offset(
(circular_idx as u32)
.wrapping_mul(ch_used)
.wrapping_add(ch_2) as isize,
) = sum[ch_2 as usize] / count as f32;
ch_2 = ch_2.wrapping_add(1)
// Recompute factor
}
} else {
let mut ch_3: u32 = 0 as i32 as u32;
while ch_3 < convolve_channels {
*buffer.offset(
(circular_idx as u32)
.wrapping_mul(ch_used)
.wrapping_add(ch_3) as isize,
) = sum[ch_3 as usize] * std_factor;
ch_3 = ch_3.wrapping_add(1)
}
}
}
circular_idx = ((circular_idx + 1 as i32) as u32)
.wrapping_rem(buffer_count) as i32;
ndx_0 = ndx_0.wrapping_add(1)
}
pass_index = pass_index.wrapping_add(1)
}
row = row.wrapping_add(1)
}
return true;
}
unsafe extern "C" fn BitmapFloat_boxblur_misaligned_rows(
context: *mut flow_c,
image: *mut flow_bitmap_float,
radius: u32,
align: i32,
convolve_channels: u32,
work_buffer: *mut f32,
from_row: u32,
row_count: i32,
) -> bool {
if align != 1 as i32 && align != -(1 as i32) {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1088 as i32,
(*::std::mem::transmute::<&[u8; 36], &[libc::c_char; 36]>(
b"BitmapFloat_boxblur_misaligned_rows\x00",
))
.as_ptr(),
);
return false;
}
let buffer_count: u32 = radius.wrapping_add(2u32);
let w: u32 = (*image).w;
let step: u32 = (*image).channels;
let until_row: u32 = if row_count < 0 as i32 {
(*image).h
} else {
from_row.wrapping_add(row_count as u32)
};
let ch_used: u32 = (*image).channels;
let buffer: *mut f32 = work_buffer;
let write_offset: u32 = if align == -(1 as i32) {
0 as i32
} else {
1 as i32
} as u32;
let mut row: u32 = from_row;
while row < until_row {
let source_buffer: *mut f32 = &mut *(*image)
.pixels
.offset(row.wrapping_mul((*image).float_stride) as isize)
as *mut f32;
let mut circular_idx: i32 = 0 as i32;
let mut sum: [f32; 4] = [
0 as i32 as f32,
0 as i32 as f32,
0 as i32 as f32,
0 as i32 as f32,
];
let mut count: f32 = 0 as i32 as f32;
let mut ndx: u32 = 0 as i32 as u32;
while ndx < radius {
let factor: f32 =
if ndx == radius.wrapping_sub(1u32) {
0.5f32
} else {
1 as i32 as f32
};
let mut ch: u32 = 0 as i32 as u32;
while ch < convolve_channels {
sum[ch as usize] += *source_buffer
.offset(ndx.wrapping_mul(step).wrapping_add(ch) as isize)
* factor;
ch = ch.wrapping_add(1)
}
count += factor;
ndx = ndx.wrapping_add(1)
}
let mut ndx_0: u32 = 0 as i32 as u32;
while ndx_0 < w.wrapping_add(buffer_count).wrapping_sub(write_offset) {
// Pixels
// Calculate new value
if ndx_0 < w {
if ndx_0 < w.wrapping_sub(radius) {
let mut ch_0: u32 = 0 as i32 as u32;
while ch_0 < convolve_channels {
sum[ch_0 as usize] += *source_buffer.offset(
ndx_0
.wrapping_add(radius)
.wrapping_mul(step)
.wrapping_add(ch_0) as isize,
) * 0.5f32;
ch_0 = ch_0.wrapping_add(1)
}
count += 0.5f32
}
if ndx_0
< w.wrapping_sub(radius)
.wrapping_add(1u32)
{
let mut ch_1: u32 = 0 as i32 as u32;
while ch_1 < convolve_channels {
sum[ch_1 as usize] += *source_buffer.offset(
ndx_0
.wrapping_sub(1u32)
.wrapping_add(radius)
.wrapping_mul(step)
.wrapping_add(ch_1) as isize,
) * 0.5f32;
ch_1 = ch_1.wrapping_add(1)
}
count += 0.5f32
}
// Remove trailing items from average
if ndx_0 >= radius {
let mut ch_2: u32 = 0 as i32 as u32;
while ch_2 < convolve_channels {
sum[ch_2 as usize] -= *source_buffer.offset(
ndx_0
.wrapping_sub(radius)
.wrapping_mul(step)
.wrapping_add(ch_2) as isize,
) * 0.5f32;
ch_2 = ch_2.wrapping_add(1)
}
count -= 0.5f32
}
if ndx_0 >= radius.wrapping_add(1u32) {
let mut ch_3: u32 = 0 as i32 as u32;
while ch_3 < convolve_channels {
sum[ch_3 as usize] -= *source_buffer.offset(
ndx_0
.wrapping_sub(1u32)
.wrapping_sub(radius)
.wrapping_mul(step)
.wrapping_add(ch_3) as isize,
) * 0.5f32;
ch_3 = ch_3.wrapping_add(1)
}
count -= 0.5f32
}
}
// Flush old value
if ndx_0 >= buffer_count.wrapping_sub(write_offset) {
memcpy(
&mut *source_buffer.offset(
ndx_0
.wrapping_add(write_offset)
.wrapping_sub(buffer_count)
.wrapping_mul(step) as isize,
) as *mut f32 as *mut libc::c_void,
&mut *buffer
.offset((circular_idx as u32).wrapping_mul(ch_used) as isize)
as *mut f32 as *const libc::c_void,
(ch_used as u64)
.wrapping_mul(::std::mem::size_of::<f32>() as u64),
);
}
// enqueue new value
if ndx_0 < w {
let mut ch_4: u32 = 0 as i32 as u32; // Never exceed half the size of the buffer.
while ch_4 < convolve_channels {
*buffer.offset(
(circular_idx as u32)
.wrapping_mul(ch_used)
.wrapping_add(ch_4) as isize,
) = sum[ch_4 as usize] / count;
ch_4 = ch_4.wrapping_add(1)
}
}
circular_idx = ((circular_idx + 1 as i32) as u32)
.wrapping_rem(buffer_count) as i32;
ndx_0 = ndx_0.wrapping_add(1)
}
row = row.wrapping_add(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_approx_gaussian_calculate_d(
sigma: f32,
bitmap_width: u32,
) -> u32 {
let mut d: u32 =
(1.8799712059732503768118239636082839397552400554574537f32 * sigma + 0.5f32).floor()
as i32 as u32;
d = d.min(
bitmap_width
.wrapping_sub(1u32)
.wrapping_div(2u32)
);
return d;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_approx_gaussian_buffer_element_count_required(
sigma: f32,
bitmap_width: u32,
) -> u32 {
return flow_bitmap_float_approx_gaussian_calculate_d(sigma, bitmap_width)
.wrapping_mul(2u32)
.wrapping_add(12 as i32 as u32);
// * sizeof(float);
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_approx_gaussian_blur_rows(
context: *mut flow_c,
image: *mut flow_bitmap_float,
sigma: f32,
buffer: *mut f32,
buffer_element_count: usize,
from_row: u32,
row_count: i32,
) -> bool {
// Ensure sigma is large enough for approximation to be accurate.
if sigma < 2 as i32 as f32 {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1173 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
// Ensure the buffer is large enough
if flow_bitmap_float_approx_gaussian_buffer_element_count_required(sigma, (*image).w)
as usize > buffer_element_count
{
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1179 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
// http://www.w3.org/TR/SVG11/filters.html#feGaussianBlur
// For larger values of 's' (s >= 2.0), an approximation can be used :
// Three successive box - blurs build a piece - wise quadratic convolution kernel, which approximates the Gaussian
// kernel to within roughly 3 % .
let d: u32 = flow_bitmap_float_approx_gaussian_calculate_d(sigma, (*image).w);
//... if d is odd, use three box - blurs of size 'd', centered on the output pixel.
if d.wrapping_rem(2u32) > 0 as i32 as u32 {
if !BitmapFloat_boxblur_rows(
context,
image,
d.wrapping_div(2u32),
3 as i32 as u32,
(*image).channels,
buffer,
from_row,
row_count,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1191 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
} else {
// ... if d is even, two box - blurs of size 'd'
// (the first one centered on the pixel boundary between the output pixel and the one to the left,
// the second one centered on the pixel boundary between the output pixel and the one to the right)
// and one box blur of size 'd+1' centered on the output pixel.
if !BitmapFloat_boxblur_misaligned_rows(
context,
image,
d.wrapping_div(2u32),
-(1 as i32),
(*image).channels,
buffer,
from_row,
row_count,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1200 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
if !BitmapFloat_boxblur_misaligned_rows(
context,
image,
d.wrapping_div(2u32),
1 as i32,
(*image).channels,
buffer,
from_row,
row_count,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1204 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
if !BitmapFloat_boxblur_rows(
context,
image,
d.wrapping_div(2u32)
.wrapping_add(1u32),
1 as i32 as u32,
(*image).channels,
buffer,
from_row,
row_count,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1207 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
}
return true;
}
#[inline]
unsafe extern "C" fn transpose4x4_SSE(
A: *mut f32,
B: *mut f32,
lda: i32,
ldb: i32,
) {
let mut row1: __m128 = _mm_loadu_ps(&mut *A.offset((0 as i32 * lda) as isize));
let mut row2: __m128 = _mm_loadu_ps(&mut *A.offset((1 as i32 * lda) as isize));
let mut row3: __m128 = _mm_loadu_ps(&mut *A.offset((2 as i32 * lda) as isize));
let mut row4: __m128 = _mm_loadu_ps(&mut *A.offset((3 as i32 * lda) as isize));
let mut tmp3: __m128 = _mm_setzero_ps();
let mut tmp2: __m128 = _mm_setzero_ps();
let mut tmp1: __m128 = _mm_setzero_ps();
let mut tmp0: __m128 = _mm_setzero_ps();
tmp0 = _mm_unpacklo_ps(row1, row2);
tmp2 = _mm_unpacklo_ps(row3, row4);
tmp1 = _mm_unpackhi_ps(row1, row2);
tmp3 = _mm_unpackhi_ps(row3, row4);
row1 = _mm_movelh_ps(tmp0, tmp2);
row2 = _mm_movehl_ps(tmp2, tmp0);
row3 = _mm_movelh_ps(tmp1, tmp3);
row4 = _mm_movehl_ps(tmp3, tmp1);
_mm_storeu_ps(&mut *B.offset((0 as i32 * ldb) as isize), row1);
_mm_storeu_ps(&mut *B.offset((1 as i32 * ldb) as isize), row2);
_mm_storeu_ps(&mut *B.offset((2 as i32 * ldb) as isize), row3);
_mm_storeu_ps(&mut *B.offset((3 as i32 * ldb) as isize), row4);
}
#[inline]
unsafe extern "C" fn transpose_block_SSE4x4(
A: *mut f32,
B: *mut f32,
n: i32,
m: i32,
lda: i32,
ldb: i32,
block_size: i32,
) {
//#pragma omp parallel for collapse(2)
let mut i: i32 = 0 as i32;
while i < n {
let mut j: i32 = 0 as i32;
while j < m {
let max_i2: i32 = if i + block_size < n {
(i) + block_size
} else {
n
};
let max_j2: i32 = if j + block_size < m {
(j) + block_size
} else {
m
};
let mut i2: i32 = i;
while i2 < max_i2 {
let mut j2: i32 = j;
while j2 < max_j2 {
transpose4x4_SSE(
&mut *A.offset((i2 * lda + j2) as isize),
&mut *B.offset((j2 * ldb + i2) as isize),
lda,
ldb,
);
j2 += 4 as i32
}
i2 += 4 as i32
}
j += block_size
}
i += block_size
}
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_transpose(
c: *mut flow_c,
from: *mut flow_bitmap_bgra,
to: *mut flow_bitmap_bgra,
) -> bool {
if (*from).w != (*to).h
|| (*from).h != (*to).w
|| (*from).fmt as u32 != (*to).fmt as u32
{
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1252 as i32,
(*::std::mem::transmute::<&[u8; 27], &[libc::c_char; 27]>(
b"flow_bitmap_bgra_transpose\x00",
))
.as_ptr(),
);
return false;
}
if (*from).fmt as u32 != flow_bgra32 as i32 as u32
&& (*from).fmt as u32 != flow_bgr32 as i32 as u32
{
if !flow_bitmap_bgra_transpose_slow(c, from, to) {
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1258 as i32,
(*::std::mem::transmute::<&[u8; 27], &[libc::c_char; 27]>(
b"flow_bitmap_bgra_transpose\x00",
))
.as_ptr(),
);
return false;
}
return true;
}
// We require 8 when we only need 4 - in case we ever want to enable avx (like if we make it faster)
let min_block_size: i32 = 8 as i32;
// Strides must be multiple of required alignments
if (*from).stride.wrapping_rem(min_block_size as u32)
!= 0 as i32 as u32
|| (*to).stride.wrapping_rem(min_block_size as u32)
!= 0 as i32 as u32
{
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1269 as i32,
(*::std::mem::transmute::<&[u8; 27], &[libc::c_char; 27]>(
b"flow_bitmap_bgra_transpose\x00",
))
.as_ptr(),
);
return false;
}
// 256 (1024x1024 bytes) at 18.18ms, 128 at 18.6ms, 64 at 20.4ms, 16 at 25.71ms
let block_size: i32 = 128 as i32;
let cropped_h: i32 = (*from)
.h
.wrapping_sub((*from).h.wrapping_rem(min_block_size as u32))
as i32;
let cropped_w: i32 = (*from)
.w
.wrapping_sub((*from).w.wrapping_rem(min_block_size as u32))
as i32;
transpose_block_SSE4x4(
(*from).pixels as *mut f32,
(*to).pixels as *mut f32,
cropped_h,
cropped_w,
(*from)
.stride
.wrapping_div(4u32) as i32,
(*to).stride.wrapping_div(4u32) as i32,
block_size,
);
// Copy missing bits
let mut x: u32 = cropped_h as u32;
while x < (*to).w {
let mut y: u32 = 0 as i32 as u32;
while y < (*to).h {
*(&mut *(*to).pixels.offset(
x.wrapping_mul(4u32)
.wrapping_add(y.wrapping_mul((*to).stride)) as isize,
) as *mut libc::c_uchar as *mut u32) = *(&mut *(*from).pixels.offset(
x.wrapping_mul((*from).stride)
.wrapping_add(y.wrapping_mul(4u32))
as isize,
) as *mut libc::c_uchar
as *mut u32);
y = y.wrapping_add(1)
}
x = x.wrapping_add(1)
}
let mut x_0: u32 = 0 as i32 as u32;
while x_0 < cropped_h as u32 {
let mut y_0: u32 = cropped_w as u32;
while y_0 < (*to).h {
*(&mut *(*to).pixels.offset(
x_0.wrapping_mul(4u32)
.wrapping_add(y_0.wrapping_mul((*to).stride)) as isize,
) as *mut libc::c_uchar as *mut u32) = *(&mut *(*from).pixels.offset(
x_0.wrapping_mul((*from).stride)
.wrapping_add(y_0.wrapping_mul(4u32))
as isize,
) as *mut libc::c_uchar
as *mut u32);
y_0 = y_0.wrapping_add(1)
}
x_0 = x_0.wrapping_add(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_transpose_slow(
c: *mut flow_c,
from: *mut flow_bitmap_bgra,
to: *mut flow_bitmap_bgra,
) -> bool {
if (*from).w != (*to).h
|| (*from).h != (*to).w
|| (*from).fmt as u32 != (*to).fmt as u32
{
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1300 as i32,
(*::std::mem::transmute::<&[u8; 32], &[libc::c_char; 32]>(
b"flow_bitmap_bgra_transpose_slow\x00",
))
.as_ptr(),
);
return false;
}
if (*from).fmt as u32 == flow_bgra32 as i32 as u32
|| (*from).fmt as u32 == flow_bgr32 as i32 as u32
{
let mut x: u32 = 0 as i32 as u32;
while x < (*to).w {
let mut y: u32 = 0 as i32 as u32;
while y < (*to).h {
*(&mut *(*to).pixels.offset(
x.wrapping_mul(4u32)
.wrapping_add(y.wrapping_mul((*to).stride)) as isize,
) as *mut libc::c_uchar as *mut u32) =
*(&mut *(*from).pixels.offset(
x.wrapping_mul((*from).stride)
.wrapping_add(y.wrapping_mul(4u32))
as isize,
) as *mut libc::c_uchar as *mut u32);
y = y.wrapping_add(1)
}
x = x.wrapping_add(1)
}
return true;
} else if (*from).fmt as u32 == flow_bgr24 as i32 as u32 {
let from_stride: i32 = (*from).stride as i32;
let to_stride: i32 = (*to).stride as i32;
let mut x_0: u32 = 0 as i32 as u32;
let mut x_stride: u32 = 0 as i32 as u32;
let mut x_3: u32 = 0 as i32 as u32;
while x_0 < (*to).w {
let mut y_0: u32 = 0 as i32 as u32;
let mut y_stride: u32 = 0 as i32 as u32;
let mut y_3: u32 = 0 as i32 as u32;
while y_0 < (*to).h {
*(*to).pixels.offset(x_3.wrapping_add(y_stride) as isize) =
*(*from).pixels.offset(x_stride.wrapping_add(y_3) as isize);
*(*to).pixels.offset(
x_3.wrapping_add(y_stride)
.wrapping_add(1u32)
as isize,
) = *(*from).pixels.offset(
x_stride
.wrapping_add(y_3)
.wrapping_add(1u32)
as isize,
);
*(*to).pixels.offset(
x_3.wrapping_add(y_stride)
.wrapping_add(2u32)
as isize,
) = *(*from).pixels.offset(
x_stride
.wrapping_add(y_3)
.wrapping_add(2u32)
as isize,
);
y_0 = y_0.wrapping_add(1);
y_stride = (y_stride as u32).wrapping_add(to_stride as u32)
as u32 as u32;
y_3 = (y_3 as u32).wrapping_add(3u32)
as u32 as u32
}
x_0 = x_0.wrapping_add(1);
x_stride = (x_stride as u32).wrapping_add(from_stride as u32)
as u32 as u32;
x_3 = (x_3 as u32).wrapping_add(3u32) as u32
as u32
}
return true;
} else {
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1325 as i32,
(*::std::mem::transmute::<&[u8; 32], &[libc::c_char; 32]>(
b"flow_bitmap_bgra_transpose_slow\x00",
))
.as_ptr(),
);
return false;
};
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_convert_srgb_to_linear(
context: *mut flow_c,
colorcontext: *mut flow_colorcontext_info,
src: *mut flow_bitmap_bgra,
from_row: u32,
dest: *mut flow_bitmap_float,
dest_row: u32,
row_count: u32,
) -> bool {
if ((*src).w != (*dest).w) as i32 as libc::c_long != 0 {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1339 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_bitmap_float_convert_srgb_to_linear\x00",
))
.as_ptr(),
);
return false;
}
if !(from_row.wrapping_add(row_count) <= (*src).h
&& dest_row.wrapping_add(row_count) <= (*dest).h) as i32 as libc::c_long
!= 0
{
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1345 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_bitmap_float_convert_srgb_to_linear\x00",
))
.as_ptr(),
);
return false;
}
let w = (*src).w;
let units: u32 = w * flow_pixel_format_bytes_per_pixel((*src).fmt);
let from_step: u32 = flow_pixel_format_bytes_per_pixel((*src).fmt);
let from_copy: u32 = flow_pixel_format_channels(flow_effective_pixel_format(src));
let to_step: u32 = (*dest).channels;
let copy_step: u32 = from_copy.min(to_step);
if copy_step != 3 && copy_step != 4 {
flow_snprintf(
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1361 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_bitmap_float_convert_srgb_to_linear\x00",
))
.as_ptr(),
),
FLOW_ERROR_MESSAGE_SIZE as usize,
b"copy_step=%d\x00" as *const u8 as *const libc::c_char,
copy_step,
);
return false;
}
if copy_step == 4 && from_step != 4 && to_step != 4 {
flow_snprintf(
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1368 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_bitmap_float_convert_srgb_to_linear\x00",
))
.as_ptr(),
),
FLOW_ERROR_MESSAGE_SIZE as usize,
b"copy_step=%d, from_step=%d, to_step=%d\x00" as *const u8 as *const libc::c_char,
copy_step,
from_step,
to_step,
);
return false;
}
if copy_step == 4 {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_start: *mut u8 = (*src)
.pixels
.offset(from_row.wrapping_add(row).wrapping_mul((*src).stride) as isize);
let buf: *mut f32 = (*dest).pixels.offset(
(*dest)
.float_stride
.wrapping_mul(row.wrapping_add(dest_row)) as isize,
);
let mut to_x: u32 = 0 as i32 as u32;
let mut bix: u32 = 0 as i32 as u32;
while bix < units {
let alpha: f32 = *src_start
.offset(bix.wrapping_add(3u32) as isize)
as f32
/ 255.0f32;
*buf.offset(to_x as isize) = alpha
* flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix as isize),
);
*buf.offset(to_x.wrapping_add(1u32) as isize) = alpha
* flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start
.offset(bix.wrapping_add(1u32) as isize),
);
*buf.offset(to_x.wrapping_add(2u32) as isize) = alpha
* flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start
.offset(bix.wrapping_add(2u32) as isize),
);
*buf.offset(to_x.wrapping_add(3u32) as isize) = alpha;
to_x = (to_x as u32).wrapping_add(4u32)
as u32 as u32;
bix = (bix as u32).wrapping_add(4u32)
as u32 as u32
}
row = row.wrapping_add(1)
}
} else if from_step == 3 && to_step == 3 {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_start_0: *mut u8 = (*src)
.pixels
.offset(from_row.wrapping_add(row).wrapping_mul((*src).stride) as isize);
let buf: *mut f32 = (*dest).pixels.offset(
(*dest)
.float_stride
.wrapping_mul(row.wrapping_add(dest_row)) as isize,
);
let mut to_x: u32 = 0 as i32 as u32;
let mut bix: u32 = 0 as i32 as u32;
while bix < units {
*buf.offset(to_x as isize) = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start_0.offset(bix as isize),
);
*buf.offset(to_x.wrapping_add(1u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start_0
.offset(bix.wrapping_add(1u32) as isize),
);
*buf.offset(to_x.wrapping_add(2u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start_0
.offset(bix.wrapping_add(2u32) as isize),
);
to_x = (to_x as u32).wrapping_add(3u32)
as u32 as u32;
bix = (bix as u32).wrapping_add(3u32)
as u32 as u32
}
row += 1
}
} else if from_step == 4 && to_step == 3 {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_start: *mut u8 = (*src)
.pixels
.offset(from_row.wrapping_add(row).wrapping_mul((*src).stride) as isize);
let buf: *mut f32 = (*dest).pixels.offset(
(*dest)
.float_stride
.wrapping_mul(row.wrapping_add(dest_row)) as isize,
);
let mut to_x: u32 = 0 as i32 as u32;
let mut bix: u32 = 0 as i32 as u32;
while bix < units {
*buf.offset(to_x as isize) = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix as isize),
);
*buf.offset(to_x.wrapping_add(1u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start
.offset(bix.wrapping_add(1u32) as isize),
);
*buf.offset(to_x.wrapping_add(2u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start
.offset(bix.wrapping_add(2u32) as isize),
);
to_x = (to_x as u32).wrapping_add(3u32)
as u32 as u32;
bix = (bix as u32).wrapping_add(4u32)
as u32 as u32
}
row += 1
}
} else if from_step == 3 && to_step == 4 {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_start: *mut u8 = (*src)
.pixels
.offset(from_row.wrapping_add(row).wrapping_mul((*src).stride) as isize);
let buf: *mut f32 = (*dest).pixels.offset(
(*dest)
.float_stride
.wrapping_mul(row.wrapping_add(dest_row)) as isize,
);
let mut to_x: u32 = 0 as i32 as u32;
let mut bix: u32 = 0 as i32 as u32;
while bix < units {
*buf.offset(to_x as isize) = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix as isize),
);
*buf.offset(to_x.wrapping_add(1u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start
.offset(bix.wrapping_add(1u32) as isize),
);
*buf.offset(to_x.wrapping_add(2u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start
.offset(bix.wrapping_add(2u32) as isize),
);
to_x = (to_x as u32).wrapping_add(4u32)
as u32 as u32;
bix = (bix as u32).wrapping_add(3u32)
as u32 as u32
}
row += 1
}
} else if from_step == 4 && to_step == 4 {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_start: *mut u8 = (*src)
.pixels
.offset(from_row.wrapping_add(row).wrapping_mul((*src).stride) as isize);
let buf: *mut f32 = (*dest).pixels.offset(
(*dest)
.float_stride
.wrapping_mul(row.wrapping_add(dest_row)) as isize,
);
let mut to_x: u32 = 0 as i32 as u32;
let mut bix: u32 = 0 as i32 as u32;
while bix < units {
*buf.offset(to_x as isize) = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix as isize),
);
*buf.offset(to_x.wrapping_add(1u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start
.offset(bix.wrapping_add(1u32) as isize),
);
*buf.offset(to_x.wrapping_add(2u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start
.offset(bix.wrapping_add(2u32) as isize),
);
to_x = (to_x as u32).wrapping_add(4u32)
as u32 as u32;
bix = (bix as u32).wrapping_add(4u32)
as u32 as u32
}
row += 1
}
} else {
flow_snprintf(
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1411 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_bitmap_float_convert_srgb_to_linear\x00",
))
.as_ptr(),
),
FLOW_ERROR_MESSAGE_SIZE as usize,
b"copy_step=%d, from_step=%d, to_step=%d\x00" as *const u8 as *const libc::c_char,
copy_step,
from_step,
to_step,
);
return false;
}
return true;
}
/*
static void unpack24bitRow(u32 width, unsigned char* sourceLine, unsigned char* destArray){
for (u32 i = 0; i < width; i++){
memcpy(destArray + i * 4, sourceLine + i * 3, 3);
destArray[i * 4 + 3] = 255;
}
}
*/
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_flip_vertical(
context: *mut flow_c,
b: *mut flow_bitmap_bgra,
) -> bool {
let swap: *mut libc::c_void = flow_context_malloc(
context,
(*b).stride as usize,
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1430 as i32,
);
if swap.is_null() {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1432 as i32,
(*::std::mem::transmute::<&[u8; 31], &[libc::c_char; 31]>(
b"flow_bitmap_bgra_flip_vertical\x00",
))
.as_ptr(),
);
return false;
}
// Dont' copy the full stride (padding), it could be windowed!
// Todo: try multiple swap rows? 5ms isn't bad, but could be better
let row_length: u32 = (*b).stride.min((*b).w
.wrapping_mul(flow_pixel_format_bytes_per_pixel((*b).fmt))
);
let mut i: u32 = 0 as i32 as u32;
while i < (*b).h.wrapping_div(2u32) {
let top: *mut libc::c_void =
(*b).pixels.offset(i.wrapping_mul((*b).stride) as isize) as *mut libc::c_void;
let bottom: *mut libc::c_void = (*b).pixels.offset(
(*b).h
.wrapping_sub(1u32)
.wrapping_sub(i)
.wrapping_mul((*b).stride) as isize,
) as *mut libc::c_void;
memcpy(swap, top, row_length as u64);
memcpy(top, bottom, row_length as u64);
memcpy(bottom, swap, row_length as u64);
i = i.wrapping_add(1)
}
flow_deprecated_free(
context,
swap,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1445 as i32,
);
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_flip_horizontal(
_context: *mut flow_c,
b: *mut flow_bitmap_bgra,
) -> bool {
if (*b).fmt as u32 == flow_bgra32 as i32 as u32
|| (*b).fmt as u32 == flow_bgr32 as i32 as u32
{
// 12ms simple
let mut y: u32 = 0 as i32 as u32;
while y < (*b).h {
let mut left: *mut u32 =
(*b).pixels.offset(y.wrapping_mul((*b).stride) as isize) as *mut u32;
let mut right: *mut u32 = (*b)
.pixels
.offset(y.wrapping_mul((*b).stride) as isize)
.offset(
(4u32)
.wrapping_mul((*b).w.wrapping_sub(1u32))
as isize,
) as *mut u32;
while left < right {
let swap: u32 = *left;
*left = *right;
*right = swap;
left = left.offset(1);
right = right.offset(-1)
}
y = y.wrapping_add(1)
}
} else if (*b).fmt as u32 == flow_bgr24 as i32 as u32 {
let mut swap_0: [u32; 4] = [0; 4];
// Dont' copy the full stride (padding), it could be windowed!
let mut y_0: u32 = 0 as i32 as u32;
while y_0 < (*b).h {
let mut left_0: *mut u8 =
(*b).pixels.offset(y_0.wrapping_mul((*b).stride) as isize);
let mut right_0: *mut u8 = (*b)
.pixels
.offset(y_0.wrapping_mul((*b).stride) as isize)
.offset(
(3u32)
.wrapping_mul((*b).w.wrapping_sub(1u32))
as isize,
);
while left_0 < right_0 {
memcpy(
&mut swap_0 as *mut [u32; 4] as *mut libc::c_void,
left_0 as *const libc::c_void,
3 as i32 as u64,
);
memcpy(
left_0 as *mut libc::c_void,
right_0 as *const libc::c_void,
3 as i32 as u64,
);
memcpy(
right_0 as *mut libc::c_void,
&mut swap_0 as *mut [u32; 4] as *const libc::c_void,
3 as i32 as u64,
);
left_0 = left_0.offset(3 as i32 as isize);
right_0 = right_0.offset(-(3 as i32 as isize))
}
y_0 = y_0.wrapping_add(1)
}
} else {
let mut swap_1: [u32; 4] = [0; 4];
// Dont' copy the full stride (padding), it could be windowed!
let mut y_1: u32 = 0 as i32 as u32;
while y_1 < (*b).h {
let mut left_1: *mut u8 =
(*b).pixels.offset(y_1.wrapping_mul((*b).stride) as isize);
let mut right_1: *mut u8 = (*b)
.pixels
.offset(y_1.wrapping_mul((*b).stride) as isize)
.offset(
flow_pixel_format_bytes_per_pixel((*b).fmt)
.wrapping_mul((*b).w.wrapping_sub(1u32))
as isize,
);
while left_1 < right_1 {
memcpy(
&mut swap_1 as *mut [u32; 4] as *mut libc::c_void,
left_1 as *const libc::c_void,
flow_pixel_format_bytes_per_pixel((*b).fmt) as u64,
);
memcpy(
left_1 as *mut libc::c_void,
right_1 as *const libc::c_void,
flow_pixel_format_bytes_per_pixel((*b).fmt) as u64,
);
memcpy(
right_1 as *mut libc::c_void,
&mut swap_1 as *mut [u32; 4] as *const libc::c_void,
flow_pixel_format_bytes_per_pixel((*b).fmt) as u64,
);
left_1 = left_1.offset(flow_pixel_format_bytes_per_pixel((*b).fmt) as isize);
right_1 = right_1.offset(-(flow_pixel_format_bytes_per_pixel((*b).fmt) as isize))
}
y_1 = y_1.wrapping_add(1)
}
}
return true;
}
unsafe extern "C" fn flow_bitmap_float_blend_matte(
_context: *mut flow_c,
colorcontext: *mut flow_colorcontext_info,
src: *mut flow_bitmap_float,
from_row: u32,
row_count: u32,
matte: *const u8,
) -> bool {
// We assume that matte is BGRA, regardless.
let matte_a: f32 =
*matte.offset(3 as i32 as isize) as f32 / 255.0f32;
let b: f32 = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*matte.offset(0),
);
let g: f32 = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*matte.offset(1),
);
let r: f32 = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*matte.offset(2),
);
let mut row: u32 = from_row;
while row < from_row.wrapping_add(row_count) {
let start_ix: u32 = row.wrapping_mul((*src).float_stride);
let end_ix: u32 = start_ix.wrapping_add((*src).w.wrapping_mul((*src).channels));
let mut ix: u32 = start_ix;
while ix < end_ix {
let src_a: f32 = *(*src)
.pixels
.offset(ix.wrapping_add(3u32) as isize);
let a: f32 = (1.0f32 - src_a) * matte_a;
let final_alpha: f32 = src_a + a;
*(*src).pixels.offset(ix as isize) =
(*(*src).pixels.offset(ix as isize) + b * a) / final_alpha;
*(*src)
.pixels
.offset(ix.wrapping_add(1u32) as isize) = (*(*src)
.pixels
.offset(ix.wrapping_add(1u32) as isize)
+ g * a)
/ final_alpha;
*(*src)
.pixels
.offset(ix.wrapping_add(2u32) as isize) = (*(*src)
.pixels
.offset(ix.wrapping_add(2u32) as isize)
+ r * a)
/ final_alpha;
*(*src)
.pixels
.offset(ix.wrapping_add(3u32) as isize) = final_alpha;
ix = (ix as u32).wrapping_add(4u32) as u32
as u32
}
row = row.wrapping_add(1)
}
// Ensure alpha is demultiplied
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_demultiply_alpha(
_context: *mut flow_c,
src: *mut flow_bitmap_float,
from_row: u32,
row_count: u32,
) -> bool {
let mut row: u32 = from_row;
while row < from_row.wrapping_add(row_count) {
let start_ix: u32 = row.wrapping_mul((*src).float_stride);
let end_ix: u32 = start_ix.wrapping_add((*src).w.wrapping_mul((*src).channels));
let mut ix: u32 = start_ix;
while ix < end_ix {
let alpha: f32 = *(*src)
.pixels
.offset(ix.wrapping_add(3u32) as isize);
if alpha > 0 as i32 as f32 {
*(*src).pixels.offset(ix as isize) /= alpha;
*(*src)
.pixels
.offset(ix.wrapping_add(1u32) as isize) /= alpha;
*(*src)
.pixels
.offset(ix.wrapping_add(2u32) as isize) /= alpha
}
ix = (ix as u32).wrapping_add(4u32) as u32
as u32
}
row = row.wrapping_add(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_copy_linear_over_srgb(
_context: *mut flow_c,
colorcontext: *mut flow_colorcontext_info,
src: *mut flow_bitmap_float,
from_row: u32,
dest: *mut flow_bitmap_bgra,
dest_row: u32,
row_count: u32,
from_col: u32,
col_count: u32,
transpose: bool,
) -> bool {
let dest_bytes_pp: u32 = flow_pixel_format_bytes_per_pixel((*dest).fmt);
let srcitems: u32 = from_col.wrapping_add(col_count).min(
(*src).w).wrapping_mul((*src).channels
);
let dest_fmt: flow_pixel_format = flow_effective_pixel_format(dest);
let ch: u32 = (*src).channels;
let copy_alpha: bool = dest_fmt as u32 == flow_bgra32 as i32 as u32
&& ch == 4 as i32 as u32
&& (*src).alpha_meaningful as i32 != 0;
let clean_alpha: bool =
!copy_alpha && dest_fmt as u32 == flow_bgra32 as i32 as u32;
let dest_row_stride: u32 = if transpose as i32 != 0 {
dest_bytes_pp
} else {
(*dest).stride
};
let dest_pixel_stride: u32 = if transpose as i32 != 0 {
(*dest).stride
} else {
dest_bytes_pp
};
if dest_pixel_stride == 4 as i32 as u32 {
if ch == 3 as i32 as u32 {
if copy_alpha && !clean_alpha {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_row: *mut f32 =
(*src)
.pixels
.offset(row.wrapping_add(from_row).wrapping_mul((*src).float_stride)
as isize);
let mut dest_row_bytes: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix: u32 = from_col.wrapping_mul(3u32);
while ix < srcitems {
*dest_row_bytes.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row.offset(ix as isize),
);
*dest_row_bytes.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row.offset(
ix.wrapping_add(1u32) as isize
),
);
*dest_row_bytes.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row.offset(
ix.wrapping_add(2u32) as isize
),
);
*dest_row_bytes.offset(3 as i32 as isize) = uchar_clamp_ff(
*src_row
.offset(ix.wrapping_add(3u32) as isize)
* 255.0f32,
);
dest_row_bytes = dest_row_bytes.offset(4 as i32 as isize);
ix = (ix as u32).wrapping_add(3u32)
as u32 as u32
}
row = row.wrapping_add(1)
}
}
if !copy_alpha && !clean_alpha {
let mut row_0: u32 = 0 as i32 as u32;
while row_0 < row_count {
let src_row_0: *mut f32 = (*src).pixels.offset(
row_0
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_0: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_0).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix_0: u32 =
from_col.wrapping_mul(3u32);
while ix_0 < srcitems {
*dest_row_bytes_0.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_0.offset(ix_0 as isize),
);
*dest_row_bytes_0.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_0
.offset(ix_0.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_0.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_0
.offset(ix_0.wrapping_add(2u32)
as isize),
);
dest_row_bytes_0 = dest_row_bytes_0.offset(4 as i32 as isize);
ix_0 = (ix_0 as u32).wrapping_add(3u32)
as u32 as u32
}
row_0 = row_0.wrapping_add(1)
}
}
if !copy_alpha && clean_alpha {
let mut row_1: u32 = 0 as i32 as u32;
while row_1 < row_count {
let src_row_1: *mut f32 = (*src).pixels.offset(
row_1
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_1: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_1).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix_1: u32 =
from_col.wrapping_mul(3u32);
while ix_1 < srcitems {
*dest_row_bytes_1.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_1.offset(ix_1 as isize),
);
*dest_row_bytes_1.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_1
.offset(ix_1.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_1.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_1
.offset(ix_1.wrapping_add(2u32)
as isize),
);
*dest_row_bytes_1.offset(3 as i32 as isize) =
0xff as i32 as u8;
dest_row_bytes_1 = dest_row_bytes_1.offset(4 as i32 as isize);
ix_1 = (ix_1 as u32).wrapping_add(3u32)
as u32 as u32
}
row_1 = row_1.wrapping_add(1)
}
}
}
if ch == 4 as i32 as u32 {
if copy_alpha && !clean_alpha {
let mut row_2: u32 = 0 as i32 as u32;
while row_2 < row_count {
let src_row_2: *mut f32 = (*src).pixels.offset(
row_2
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_2: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_2).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix_2: u32 =
from_col.wrapping_mul(4u32);
while ix_2 < srcitems {
*dest_row_bytes_2.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_2.offset(ix_2 as isize),
);
*dest_row_bytes_2.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_2
.offset(ix_2.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_2.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_2
.offset(ix_2.wrapping_add(2u32)
as isize),
);
*dest_row_bytes_2.offset(3 as i32 as isize) = uchar_clamp_ff(
*src_row_2.offset(
ix_2.wrapping_add(3u32) as isize
) * 255.0f32,
);
dest_row_bytes_2 = dest_row_bytes_2.offset(4 as i32 as isize);
ix_2 = (ix_2 as u32).wrapping_add(4u32)
as u32 as u32
}
row_2 = row_2.wrapping_add(1)
}
}
if !copy_alpha && !clean_alpha {
let mut row_3: u32 = 0 as i32 as u32;
while row_3 < row_count {
let src_row_3: *mut f32 = (*src).pixels.offset(
row_3
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_3: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_3).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix_3: u32 =
from_col.wrapping_mul(4u32);
while ix_3 < srcitems {
*dest_row_bytes_3.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_3.offset(ix_3 as isize),
);
*dest_row_bytes_3.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_3
.offset(ix_3.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_3.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_3
.offset(ix_3.wrapping_add(2u32)
as isize),
);
dest_row_bytes_3 = dest_row_bytes_3.offset(4 as i32 as isize);
ix_3 = (ix_3 as u32).wrapping_add(4u32)
as u32 as u32
}
row_3 = row_3.wrapping_add(1)
}
}
if !copy_alpha && clean_alpha {
let mut row_4: u32 = 0 as i32 as u32;
while row_4 < row_count {
let src_row_4: *mut f32 = (*src).pixels.offset(
row_4
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_4: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_4).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix_4: u32 =
from_col.wrapping_mul(4u32);
while ix_4 < srcitems {
*dest_row_bytes_4.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_4.offset(ix_4 as isize),
);
*dest_row_bytes_4.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_4
.offset(ix_4.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_4.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_4
.offset(ix_4.wrapping_add(2u32)
as isize),
);
*dest_row_bytes_4.offset(3 as i32 as isize) =
0xff as i32 as u8;
dest_row_bytes_4 = dest_row_bytes_4.offset(4 as i32 as isize);
ix_4 = (ix_4 as u32).wrapping_add(4u32)
as u32 as u32
}
row_4 = row_4.wrapping_add(1)
}
}
}
} else {
if ch == 3 as i32 as u32 {
if copy_alpha && !clean_alpha {
let mut row_5: u32 = 0 as i32 as u32;
while row_5 < row_count {
let src_row_5: *mut f32 = (*src).pixels.offset(
row_5
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_5: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_5).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_5: u32 =
from_col.wrapping_mul(3u32);
while ix_5 < srcitems {
*dest_row_bytes_5.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_5.offset(ix_5 as isize),
);
*dest_row_bytes_5.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_5
.offset(ix_5.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_5.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_5
.offset(ix_5.wrapping_add(2u32)
as isize),
);
*dest_row_bytes_5.offset(3 as i32 as isize) = uchar_clamp_ff(
*src_row_5.offset(
ix_5.wrapping_add(3u32) as isize
) * 255.0f32,
);
dest_row_bytes_5 = dest_row_bytes_5.offset(dest_pixel_stride as isize);
ix_5 = (ix_5 as u32).wrapping_add(3u32)
as u32 as u32
}
row_5 = row_5.wrapping_add(1)
}
}
if !copy_alpha && !clean_alpha {
let mut row_6: u32 = 0 as i32 as u32;
while row_6 < row_count {
let src_row_6: *mut f32 = (*src).pixels.offset(
row_6
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_6: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_6).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_6: u32 =
from_col.wrapping_mul(3u32);
while ix_6 < srcitems {
*dest_row_bytes_6.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_6.offset(ix_6 as isize),
);
*dest_row_bytes_6.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_6
.offset(ix_6.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_6.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_6
.offset(ix_6.wrapping_add(2u32)
as isize),
);
dest_row_bytes_6 = dest_row_bytes_6.offset(dest_pixel_stride as isize);
ix_6 = (ix_6 as u32).wrapping_add(3u32)
as u32 as u32
}
row_6 = row_6.wrapping_add(1)
}
}
if !copy_alpha && clean_alpha {
let mut row_7: u32 = 0 as i32 as u32;
while row_7 < row_count {
let src_row_7: *mut f32 = (*src).pixels.offset(
row_7
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_7: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_7).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_7: u32 =
from_col.wrapping_mul(3u32);
while ix_7 < srcitems {
*dest_row_bytes_7.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_7.offset(ix_7 as isize),
);
*dest_row_bytes_7.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_7
.offset(ix_7.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_7.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_7
.offset(ix_7.wrapping_add(2u32)
as isize),
);
*dest_row_bytes_7.offset(3 as i32 as isize) =
0xff as i32 as u8;
dest_row_bytes_7 = dest_row_bytes_7.offset(dest_pixel_stride as isize);
ix_7 = (ix_7 as u32).wrapping_add(3u32)
as u32 as u32
}
row_7 = row_7.wrapping_add(1)
}
}
}
if ch == 4 as i32 as u32 {
if copy_alpha && !clean_alpha {
let mut row_8: u32 = 0 as i32 as u32;
while row_8 < row_count {
let src_row_8: *mut f32 = (*src).pixels.offset(
row_8
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_8: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_8).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_8: u32 =
from_col.wrapping_mul(4u32);
while ix_8 < srcitems {
*dest_row_bytes_8.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_8.offset(ix_8 as isize),
);
*dest_row_bytes_8.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_8
.offset(ix_8.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_8.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_8
.offset(ix_8.wrapping_add(2u32)
as isize),
);
*dest_row_bytes_8.offset(3 as i32 as isize) = uchar_clamp_ff(
*src_row_8.offset(
ix_8.wrapping_add(3u32) as isize
) * 255.0f32,
);
dest_row_bytes_8 = dest_row_bytes_8.offset(dest_pixel_stride as isize);
ix_8 = (ix_8 as u32).wrapping_add(4u32)
as u32 as u32
}
row_8 = row_8.wrapping_add(1)
}
}
if !copy_alpha && !clean_alpha {
let mut row_9: u32 = 0 as i32 as u32;
while row_9 < row_count {
let src_row_9: *mut f32 = (*src).pixels.offset(
row_9
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_9: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_9).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_9: u32 =
from_col.wrapping_mul(4u32);
while ix_9 < srcitems {
*dest_row_bytes_9.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_9.offset(ix_9 as isize),
);
*dest_row_bytes_9.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_9
.offset(ix_9.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_9.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_9
.offset(ix_9.wrapping_add(2u32)
as isize),
);
dest_row_bytes_9 = dest_row_bytes_9.offset(dest_pixel_stride as isize);
ix_9 = (ix_9 as u32).wrapping_add(4u32)
as u32 as u32
}
row_9 = row_9.wrapping_add(1)
}
}
if !copy_alpha && clean_alpha {
let mut row_10: u32 = 0 as i32 as u32;
while row_10 < row_count {
let src_row_10: *mut f32 = (*src).pixels.offset(
row_10
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_10: *mut u8 =
(*dest)
.pixels
.offset(dest_row.wrapping_add(row_10).wrapping_mul(dest_row_stride)
as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_10: u32 =
from_col.wrapping_mul(4u32);
while ix_10 < srcitems {
*dest_row_bytes_10.offset(0) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_10.offset(ix_10 as isize),
);
*dest_row_bytes_10.offset(1) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_10
.offset(ix_10.wrapping_add(1u32)
as isize),
);
*dest_row_bytes_10.offset(2) =
flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_10
.offset(ix_10.wrapping_add(2u32)
as isize),
);
*dest_row_bytes_10.offset(3 as i32 as isize) =
0xff as i32 as u8;
dest_row_bytes_10 = dest_row_bytes_10.offset(dest_pixel_stride as isize);
ix_10 = (ix_10 as u32)
.wrapping_add(4u32)
as u32 as u32
}
row_10 = row_10.wrapping_add(1)
}
}
}
}
return true;
}
unsafe extern "C" fn BitmapFloat_compose_linear_over_srgb(
_context: *mut flow_c,
colorcontext: *mut flow_colorcontext_info,
src: *mut flow_bitmap_float,
from_row: u32,
dest: *mut flow_bitmap_bgra,
dest_row: u32,
row_count: u32,
from_col: u32,
col_count: u32,
transpose: bool,
) -> bool {
let dest_bytes_pp: u32 = flow_pixel_format_bytes_per_pixel((*dest).fmt);
let dest_row_stride: u32 = if transpose as i32 != 0 {
dest_bytes_pp
} else {
(*dest).stride
};
let dest_pixel_stride: u32 = if transpose as i32 != 0 {
(*dest).stride
} else {
dest_bytes_pp
};
let srcitems: u32 = from_col.wrapping_add(col_count).min(
(*src).w).wrapping_mul((*src).channels
);
let ch: u32 = (*src).channels;
let dest_effective_format: flow_pixel_format = flow_effective_pixel_format(dest);
let dest_alpha: bool =
dest_effective_format as u32 == flow_bgra32 as i32 as u32;
let dest_alpha_index: u8 = if dest_alpha as i32 != 0 {
3 as i32
} else {
0 as i32
} as u8;
let dest_alpha_to_float_coeff: f32 = if dest_alpha as i32 != 0 {
(1.0f32) / 255.0f32
} else {
0.0f32
};
let dest_alpha_to_float_offset: f32 = if dest_alpha as i32 != 0 {
0.0f32
} else {
1.0f32
};
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
// const float * const __restrict src_row = src->pixels + (row + from_row) * src->float_stride;
let src_row: *mut f32 = (*src)
.pixels
.offset(row.wrapping_add(from_row).wrapping_mul((*src).float_stride) as isize);
let mut dest_row_bytes: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix: u32 = from_col.wrapping_mul(ch);
while ix < srcitems {
let dest_b: u8 = *dest_row_bytes.offset(0);
let dest_g: u8 = *dest_row_bytes.offset(1);
let dest_r: u8 = *dest_row_bytes.offset(2);
let dest_a: u8 = *dest_row_bytes.offset(dest_alpha_index as isize);
let src_b: f32 =
*src_row.offset(ix.wrapping_add(0u32) as isize);
let src_g: f32 =
*src_row.offset(ix.wrapping_add(1u32) as isize);
let src_r: f32 =
*src_row.offset(ix.wrapping_add(2u32) as isize);
let src_a: f32 =
*src_row.offset(ix.wrapping_add(3u32) as isize);
let a: f32 = (1.0f32 - src_a)
* (dest_alpha_to_float_coeff * dest_a as i32 as f32
+ dest_alpha_to_float_offset);
let b: f32 =
flow_colorcontext_srgb_to_floatspace(colorcontext, dest_b) * a + src_b;
let g: f32 =
flow_colorcontext_srgb_to_floatspace(colorcontext, dest_g) * a + src_g;
let r: f32 =
flow_colorcontext_srgb_to_floatspace(colorcontext, dest_r) * a + src_r;
let final_alpha: f32 = src_a + a;
*dest_row_bytes.offset(0) =
flow_colorcontext_floatspace_to_srgb(colorcontext, b / final_alpha);
*dest_row_bytes.offset(1) =
flow_colorcontext_floatspace_to_srgb(colorcontext, g / final_alpha);
*dest_row_bytes.offset(2) =
flow_colorcontext_floatspace_to_srgb(colorcontext, r / final_alpha);
if dest_alpha {
*dest_row_bytes.offset(3 as i32 as isize) =
uchar_clamp_ff(final_alpha * 255 as i32 as f32)
}
// TODO: split out 4 and 3 so compiler can vectorize maybe?
dest_row_bytes = dest_row_bytes.offset(dest_pixel_stride as isize);
ix = (ix as u32).wrapping_add(ch) as u32 as u32
}
row = row.wrapping_add(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_composite_linear_over_srgb(
context: *mut flow_c,
colorcontext: *mut flow_colorcontext_info,
src_mut: *mut flow_bitmap_float,
from_row: u32,
dest: *mut flow_bitmap_bgra,
dest_row: u32,
row_count: u32,
transpose: bool,
) -> bool {
if if transpose as i32 != 0 {
((*src_mut).w != (*dest).h) as i32
} else {
((*src_mut).w != (*dest).w) as i32
} != 0
{
// TODO: Add more bounds checks
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1699 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
);
return false;
}
if (*dest).compositing_mode as u32
== flow_bitmap_compositing_blend_with_self as i32 as u32
&& (*src_mut).alpha_meaningful as i32 != 0
&& (*src_mut).channels == 4 as i32 as u32
{
if !(*src_mut).alpha_premultiplied {
// Something went wrong. We should always have alpha premultiplied.
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1706 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
);
return false;
}
// Compose
if !BitmapFloat_compose_linear_over_srgb(
context,
colorcontext,
src_mut,
from_row,
dest,
dest_row,
row_count,
0 as i32 as u32,
(*src_mut).w,
transpose,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1712 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
);
return false;
}
} else {
if (*src_mut).channels == 4 as i32 as u32
&& (*src_mut).alpha_meaningful as i32 != 0
{
let mut demultiply: bool = (*src_mut).alpha_premultiplied;
if (*dest).compositing_mode as u32
== flow_bitmap_compositing_blend_with_matte as i32 as u32
{
if !flow_bitmap_float_blend_matte(
context,
colorcontext,
src_mut,
from_row,
row_count,
(*dest).matte_color.as_mut_ptr(),
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1722 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
);
return false;
}
demultiply = false
}
if demultiply {
// Demultiply before copy
if !flow_bitmap_float_demultiply_alpha(context, src_mut, from_row, row_count) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1730 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
);
return false;
}
}
}
// Copy/overwrite
if !flow_bitmap_float_copy_linear_over_srgb(
context,
colorcontext,
src_mut,
from_row,
dest,
dest_row,
row_count,
0 as i32 as u32,
(*src_mut).w,
transpose,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1738 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
); // Don't access rows past the end of the bitmap
return false;
}
} // This algorithm can't handle padding, if present
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_linear_to_luv_rows(
context: *mut flow_c,
bit: *mut flow_bitmap_float,
start_row: u32,
row_count: u32,
) -> bool {
if !(start_row.wrapping_add(row_count) <= (*bit).h) {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1751 as i32,
(*::std::mem::transmute::<&[u8; 37], &[libc::c_char; 37]>(
b"flow_bitmap_float_linear_to_luv_rows\x00",
))
.as_ptr(),
);
return false;
}
if (*bit).w.wrapping_mul((*bit).channels) != (*bit).float_stride {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1755 as i32,
(*::std::mem::transmute::<&[u8; 37], &[libc::c_char; 37]>(
b"flow_bitmap_float_linear_to_luv_rows\x00",
))
.as_ptr(),
);
return false;
}
let start_at: *mut f32 = (*bit)
.pixels
.offset((*bit).float_stride.wrapping_mul(start_row) as isize);
let end_at: *const f32 = (*bit).pixels.offset(
(*bit)
.float_stride
.wrapping_mul(start_row.wrapping_add(row_count)) as isize,
);
let mut pix: *mut f32 = start_at;
while pix < end_at as *mut f32 {
linear_to_luv(pix);
pix = pix.offset(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_luv_to_linear_rows(
context: *mut flow_c,
bit: *mut flow_bitmap_float,
start_row: u32,
row_count: u32,
) -> bool {
if !(start_row.wrapping_add(row_count) <= (*bit).h) {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1772 as i32,
(*::std::mem::transmute::<&[u8; 37], &[libc::c_char; 37]>(
b"flow_bitmap_float_luv_to_linear_rows\x00",
))
.as_ptr(),
);
return false;
}
if (*bit).w.wrapping_mul((*bit).channels) != (*bit).float_stride {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1776 as i32,
(*::std::mem::transmute::<&[u8; 37], &[libc::c_char; 37]>(
b"flow_bitmap_float_luv_to_linear_rows\x00",
))
.as_ptr(),
);
return false;
}
let start_at: *mut f32 = (*bit)
.pixels
.offset((*bit).float_stride.wrapping_mul(start_row) as isize);
let end_at: *const f32 = (*bit).pixels.offset(
(*bit)
.float_stride
.wrapping_mul(start_row.wrapping_add(row_count)) as isize,
);
let mut pix: *mut f32 = start_at;
while pix < end_at as *mut f32 {
luv_to_linear(pix);
pix = pix.offset(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_apply_color_matrix(
context: *mut flow_c,
bmp: *mut flow_bitmap_bgra,
row: u32,
count: u32,
m: *const *mut f32,
) -> bool {
let stride: u32 = (*bmp).stride;
let ch: u32 = flow_pixel_format_bytes_per_pixel((*bmp).fmt);
let w: u32 = (*bmp).w;
let h: u32 = row.wrapping_add(count).min((*bmp).h);
let m40: f32 =
*(*m.offset(4 as i32 as isize)).offset(0) * 255.0f32;
let m41: f32 =
*(*m.offset(4 as i32 as isize)).offset(1) * 255.0f32;
let m42: f32 =
*(*m.offset(4 as i32 as isize)).offset(2) * 255.0f32;
let m43: f32 =
*(*m.offset(4 as i32 as isize)).offset(3 as i32 as isize) * 255.0f32;
if ch == 4 as i32 as u32 {
let mut y: u32 = row;
while y < h {
let mut x: u32 = 0 as i32 as u32;
while x < w {
let data: *mut u8 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y) as isize)
.offset(x.wrapping_mul(ch) as isize);
let r: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(0)
* *data.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(0)
* *data.offset(1) as i32
as f32
+ *(*m.offset(2)).offset(0)
* *data.offset(0) as i32
as f32
+ *(*m.offset(3 as i32 as isize)).offset(0)
* *data.offset(3 as i32 as isize) as i32
as f32
+ m40,
);
let g: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(1)
* *data.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(1)
* *data.offset(1) as i32
as f32
+ *(*m.offset(2)).offset(1)
* *data.offset(0) as i32
as f32
+ *(*m.offset(3 as i32 as isize)).offset(1)
* *data.offset(3 as i32 as isize) as i32
as f32
+ m41,
);
let b: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(2)
* *data.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(2)
* *data.offset(1) as i32
as f32
+ *(*m.offset(2)).offset(2)
* *data.offset(0) as i32
as f32
+ *(*m.offset(3 as i32 as isize)).offset(2)
* *data.offset(3 as i32 as isize) as i32
as f32
+ m42,
);
let a: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(3 as i32 as isize)
* *data.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(3 as i32 as isize)
* *data.offset(1) as i32
as f32
+ *(*m.offset(2)).offset(3 as i32 as isize)
* *data.offset(0) as i32
as f32
+ *(*m.offset(3 as i32 as isize)).offset(3 as i32 as isize)
* *data.offset(3 as i32 as isize) as i32
as f32
+ m43,
);
let newdata: *mut u8 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y) as isize)
.offset(x.wrapping_mul(ch) as isize);
*newdata.offset(0) = b;
*newdata.offset(1) = g;
*newdata.offset(2) = r;
*newdata.offset(3 as i32 as isize) = a;
x = x.wrapping_add(1)
}
y = y.wrapping_add(1)
}
} else if ch == 3 as i32 as u32 {
let mut y_0: u32 = row;
while y_0 < h {
let mut x_0: u32 = 0 as i32 as u32;
while x_0 < w {
let data_0: *mut libc::c_uchar = (*bmp)
.pixels
.offset(stride.wrapping_mul(y_0) as isize)
.offset(x_0.wrapping_mul(ch) as isize);
let r_0: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(0)
* *data_0.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(0)
* *data_0.offset(1) as i32
as f32
+ *(*m.offset(2)).offset(0)
* *data_0.offset(0) as i32
as f32
+ m40,
);
let g_0: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(1)
* *data_0.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(1)
* *data_0.offset(1) as i32
as f32
+ *(*m.offset(2)).offset(1)
* *data_0.offset(0) as i32
as f32
+ m41,
);
let b_0: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(2)
* *data_0.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(2)
* *data_0.offset(1) as i32
as f32
+ *(*m.offset(2)).offset(2)
* *data_0.offset(0) as i32
as f32
+ m42,
);
let newdata_0: *mut u8 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y_0) as isize)
.offset(x_0.wrapping_mul(ch) as isize);
*newdata_0.offset(0) = b_0;
*newdata_0.offset(1) = g_0;
*newdata_0.offset(2) = r_0;
x_0 = x_0.wrapping_add(1)
}
y_0 = y_0.wrapping_add(1)
}
} else {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1838 as i32,
(*::std::mem::transmute::<&[u8; 36], &[libc::c_char; 36]>(
b"flow_bitmap_bgra_apply_color_matrix\x00",
))
.as_ptr(),
);
return false;
}
return true;
}
// note: this file isn't exercised by test suite
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_apply_color_matrix(
context: *mut flow_c,
bmp: *mut flow_bitmap_float,
row: u32,
count: u32,
m: *mut *mut f32,
) -> bool {
let stride: u32 = (*bmp).float_stride;
let ch: u32 = (*bmp).channels;
let w: u32 = (*bmp).w;
let h: u32 = row.wrapping_add(count).min((*bmp).h);
match ch {
4 => {
let mut y: u32 = row;
while y < h {
let mut x: u32 = 0 as i32 as u32;
while x < w {
let data: *mut f32 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y) as isize)
.offset(x.wrapping_mul(ch) as isize);
let r: f32 = *(*m.offset(0))
.offset(0)
* *data.offset(2)
+ *(*m.offset(1)).offset(0)
* *data.offset(1)
+ *(*m.offset(2)).offset(0)
* *data.offset(0)
+ *(*m.offset(3 as i32 as isize)).offset(0)
* *data.offset(3 as i32 as isize)
+ *(*m.offset(4 as i32 as isize)).offset(0);
let g: f32 = *(*m.offset(0))
.offset(1)
* *data.offset(2)
+ *(*m.offset(1)).offset(1)
* *data.offset(1)
+ *(*m.offset(2)).offset(1)
* *data.offset(0)
+ *(*m.offset(3 as i32 as isize)).offset(1)
* *data.offset(3 as i32 as isize)
+ *(*m.offset(4 as i32 as isize)).offset(1);
let b: f32 = *(*m.offset(0))
.offset(2)
* *data.offset(2)
+ *(*m.offset(1)).offset(2)
* *data.offset(1)
+ *(*m.offset(2)).offset(2)
* *data.offset(0)
+ *(*m.offset(3 as i32 as isize)).offset(2)
* *data.offset(3 as i32 as isize)
+ *(*m.offset(4 as i32 as isize)).offset(2);
let a: f32 = *(*m.offset(0))
.offset(3 as i32 as isize)
* *data.offset(2)
+ *(*m.offset(1)).offset(3 as i32 as isize)
* *data.offset(1)
+ *(*m.offset(2)).offset(3 as i32 as isize)
* *data.offset(0)
+ *(*m.offset(3 as i32 as isize)).offset(3 as i32 as isize)
* *data.offset(3 as i32 as isize)
+ *(*m.offset(4 as i32 as isize)).offset(3 as i32 as isize);
let newdata: *mut f32 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y) as isize)
.offset(x.wrapping_mul(ch) as isize);
*newdata.offset(0) = b;
*newdata.offset(1) = g;
*newdata.offset(2) = r;
*newdata.offset(3 as i32 as isize) = a;
x = x.wrapping_add(1)
}
y = y.wrapping_add(1)
}
return true;
}
3 => {
let mut y_0: u32 = row;
while y_0 < h {
let mut x_0: u32 = 0 as i32 as u32;
while x_0 < w {
let data_0: *mut f32 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y_0) as isize)
.offset(x_0.wrapping_mul(ch) as isize);
let r_0: f32 = *(*m.offset(0))
.offset(0)
* *data_0.offset(2)
+ *(*m.offset(1)).offset(0)
* *data_0.offset(1)
+ *(*m.offset(2)).offset(0)
* *data_0.offset(0)
+ *(*m.offset(4 as i32 as isize)).offset(0);
let g_0: f32 = *(*m.offset(0))
.offset(1)
* *data_0.offset(2)
+ *(*m.offset(1)).offset(1)
* *data_0.offset(1)
+ *(*m.offset(2)).offset(1)
* *data_0.offset(0)
+ *(*m.offset(4 as i32 as isize)).offset(1);
let b_0: f32 = *(*m.offset(0))
.offset(2)
* *data_0.offset(2)
+ *(*m.offset(1)).offset(2)
* *data_0.offset(1)
+ *(*m.offset(2)).offset(2)
* *data_0.offset(0)
+ *(*m.offset(4 as i32 as isize)).offset(2);
let newdata_0: *mut f32 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y_0) as isize)
.offset(x_0.wrapping_mul(ch) as isize);
*newdata_0.offset(0) = b_0;
*newdata_0.offset(1) = g_0;
*newdata_0.offset(2) = r_0;
x_0 = x_0.wrapping_add(1)
}
y_0 = y_0.wrapping_add(1)
}
return true;
}
_ => {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1893 as i32,
(*::std::mem::transmute::<&[u8; 37], &[libc::c_char; 37]>(
b"flow_bitmap_float_apply_color_matrix\x00",
))
.as_ptr(),
);
return false;
}
};
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_populate_histogram(
context: *mut flow_c,
bmp: *const flow_bitmap_bgra,
histograms: *mut u64,
histogram_size_per_channel: u32,
histogram_count: u32,
pixels_sampled: *mut u64,
) -> bool {
let row: u32 = 0;
let count: u32 = (*bmp).h;
let stride: u32 = (*bmp).stride;
let ch: u32 = flow_pixel_format_bytes_per_pixel((*bmp).fmt);
let w: u32 = (*bmp).w;
let h: u32 = (row.wrapping_add(count)).min((*bmp).h);
if histogram_size_per_channel != 256 {
// We're restricting it to this for speed
FLOW_error(context, flow_status_code::Invalid_argument, "flow_bitmap_bgra_populate_histogram");
return false;
}
let shift = 0; // 8 - intlog2(histogram_size_per_channel);
if ch == 4 || ch == 3 {
if histogram_count == 1 {
let mut y: u32 = row;
while y < h {
let mut x: u32 = 0;
while x < w {
let data: *const u8 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y) as isize)
.offset(x.wrapping_mul(ch) as isize);
let ref mut fresh9 = *histograms.offset(
(306 as i32
* *data.offset(2) as i32
+ 601 as i32
* *data.offset(1) as i32
+ 117 as i32
* *data.offset(0) as i32
>> shift) as isize,
);
*fresh9 = (*fresh9).wrapping_add(1);
x = x.wrapping_add(1)
}
y = y.wrapping_add(1)
}
} else if histogram_count == 3 {
let mut y: u32 = row;
while y < h {
let mut x: u32 = 0;
while x < w {
let data: *const u8 = (*bmp)
.pixels
.offset((stride * y) as isize)
.offset((x * ch) as isize);
let ref mut fresh10 = *histograms.offset(
(*data.offset(2) as i32 >> shift)
as isize,
);
*fresh10 = (*fresh10).wrapping_add(1);
let ref mut fresh11 = *histograms.offset(
((*data.offset(1) as i32 >> shift)
as u32)
.wrapping_add(histogram_size_per_channel)
as isize,
);
*fresh11 = (*fresh11).wrapping_add(1);
let ref mut fresh12 = *histograms.offset(
((*data.offset(0) as i32 >> shift)
as u32)
.wrapping_add(
(2u32)
.wrapping_mul(histogram_size_per_channel),
) as isize,
);
*fresh12 = (*fresh12).wrapping_add(1);
x = x.wrapping_add(1)
}
y = y.wrapping_add(1)
}
} else if histogram_count == 2 {
let mut y_1: u32 = row;
while y_1 < h {
let mut x_1: u32 = 0 as i32 as u32;
while x_1 < w {
let data_1: *const u8 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y_1) as isize)
.offset(x_1.wrapping_mul(ch) as isize);
// Calculate luminosity and saturation
let ref mut fresh13 = *histograms.offset(
(306 as i32
* *data_1.offset(2) as i32
+ 601 as i32
* *data_1.offset(1) as i32
+ 117 as i32
* *data_1.offset(0) as i32
>> shift) as isize,
);
*fresh13 = (*fresh13).wrapping_add(1);
let ref mut fresh14 =
*histograms.offset(histogram_size_per_channel.wrapping_add(
(int_max(
255 as i32,
int_max(
(*data_1.offset(2) as i32
- *data_1.offset(1) as i32).abs(),
(*data_1.offset(1) as i32
- *data_1.offset(0) as i32).abs(),
),
) >> shift) as u32,
) as isize);
*fresh14 = (*fresh14).wrapping_add(1);
x_1 = x_1.wrapping_add(1)
}
y_1 = y_1.wrapping_add(1)
}
} else {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1950 as i32,
(*::std::mem::transmute::<&[u8; 36], &[libc::c_char; 36]>(
b"flow_bitmap_bgra_populate_histogram\x00",
))
.as_ptr(),
);
return false;
}
*pixels_sampled = h.wrapping_sub(row).wrapping_mul(w) as u64
} else {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1956 as i32,
(*::std::mem::transmute::<&[u8; 36], &[libc::c_char; 36]>(
b"flow_bitmap_bgra_populate_histogram\x00",
))
.as_ptr(),
);
return false;
}
return true;
}
// Gamma correction http://www.4p8.com/eric.brasseur/gamma.html#formulas
#[no_mangle]
pub unsafe extern "C" fn flow_colorcontext_init(
_context: *mut flow_c,
mut colorcontext: *mut flow_colorcontext_info,
space: flow_working_floatspace,
a: f32,
_b: f32,
_c: f32,
) {
(*colorcontext).floatspace = space;
(*colorcontext).apply_srgb = (space & flow_working_floatspace_linear) > 0;
(*colorcontext).apply_gamma = (space & flow_working_floatspace_gamma) > 0;
/* Code guarded by #ifdef EXPOSE_SIGMOID not translated */
if (*colorcontext).apply_gamma {
(*colorcontext).gamma = a;
(*colorcontext).gamma_inverse = (1.0f64 / a as f64) as f32
}
for n in 0..256 {
(*colorcontext).byte_to_float[n] =
flow_colorcontext_srgb_to_floatspace_uncached(colorcontext, n as u8);
}
}
rustfmt graphics.rs
#![allow(
dead_code,
mutable_transmutes,
non_camel_case_types,
non_snake_case,
non_upper_case_globals,
unused_assignments
)]
#[cfg(target_arch = "x86")]
pub use std::arch::x86::{
__m128, _mm_add_ps, _mm_loadu_ps, _mm_movehl_ps, _mm_movelh_ps, _mm_mul_ps, _mm_set1_ps,
_mm_setr_ps, _mm_setzero_ps, _mm_storeu_ps, _mm_unpackhi_ps, _mm_unpacklo_ps,
};
#[cfg(target_arch = "x86_64")]
pub use std::arch::x86_64::{
__m128, _mm_add_ps, _mm_loadu_ps, _mm_movehl_ps, _mm_movelh_ps, _mm_mul_ps, _mm_set1_ps,
_mm_setr_ps, _mm_setzero_ps, _mm_storeu_ps, _mm_unpackhi_ps, _mm_unpacklo_ps,
};
use std::f64;
use std::ffi::CString;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_decoder_frame_info {
pub w: i32,
pub h: i32,
pub format: flow_pixel_format,
}
extern "C" {
#[no_mangle]
fn flow_pixel_format_bytes_per_pixel(format: flow_pixel_format) -> u32;
#[no_mangle]
fn flow_effective_pixel_format(b: *mut flow_bitmap_bgra) -> flow_pixel_format;
#[no_mangle]
fn flow_pixel_format_channels(format: flow_pixel_format) -> u32;
#[no_mangle]
fn flow_snprintf(s: *mut libc::c_char, n: usize, fmt: *const libc::c_char, _: ...) -> i32;
#[no_mangle]
fn flow_set_owner(c: *mut flow_c, thing: *mut libc::c_void, owner: *mut libc::c_void) -> bool;
#[no_mangle]
fn flow_context_calloc(
c: *mut flow_c,
instance_count: usize,
instance_size: usize,
destructor: flow_destructor_function,
owner: *mut libc::c_void,
file: *const libc::c_char,
line: i32,
) -> *mut libc::c_void;
#[no_mangle]
fn flow_context_malloc(
c: *mut flow_c,
byte_count: usize,
destructor: flow_destructor_function,
owner: *mut libc::c_void,
file: *const libc::c_char,
line: i32,
) -> *mut libc::c_void;
#[no_mangle]
fn flow_deprecated_free(
c: *mut flow_c,
pointer: *mut libc::c_void,
file: *const libc::c_char,
line: i32,
);
#[no_mangle]
fn flow_destroy(
c: *mut flow_c,
pointer: *mut libc::c_void,
file: *const libc::c_char,
line: i32,
) -> bool;
#[no_mangle]
fn flow_context_set_error_get_message_buffer(
c: *mut flow_c,
code: flow_status_code,
file: *const libc::c_char,
line: i32,
function_name: *const libc::c_char,
) -> *mut libc::c_char;
#[no_mangle]
fn flow_context_add_to_callstack(
c: *mut flow_c,
file: *const libc::c_char,
line: i32,
function_name: *const libc::c_char,
) -> bool;
#[no_mangle]
fn flow_context_profiler_start(
c: *mut flow_c,
name: *const libc::c_char,
allow_recursion: bool,
);
#[no_mangle]
fn flow_context_profiler_stop(
c: *mut flow_c,
name: *const libc::c_char,
assert_started: bool,
stop_children: bool,
);
#[no_mangle]
fn pow(_: f64, _: f64) -> f64;
#[no_mangle]
fn flow_bitmap_bgra_create_header(c: *mut flow_c, sx: i32, sy: i32) -> *mut flow_bitmap_bgra;
#[no_mangle]
fn memcpy(_: *mut libc::c_void, _: *const libc::c_void, _: u64) -> *mut libc::c_void;
#[no_mangle]
fn fabs(_: f64) -> f64;
#[no_mangle]
fn j1(_: f64) -> f64;
#[no_mangle]
fn fmin(_: f64, _: f64) -> f64;
#[no_mangle]
fn ceil(_: f64) -> f64;
#[no_mangle]
fn floor(_: f64) -> f64;
#[no_mangle]
fn fmax(_: f64, _: f64) -> f64;
#[no_mangle]
fn sqrt(_: f64) -> f64;
#[no_mangle]
fn exp(_: f64) -> f64;
#[no_mangle]
fn memset(_: *mut libc::c_void, _: i32, _: u64) -> *mut libc::c_void;
#[no_mangle]
fn flow_bitmap_float_create(
c: *mut flow_c,
sx: i32,
sy: i32,
channels: i32,
zeroed: bool,
) -> *mut flow_bitmap_float;
#[no_mangle]
fn flow_bitmap_float_create_header(
c: *mut flow_c,
sx: i32,
sy: i32,
channels: i32,
) -> *mut flow_bitmap_float;
}
pub type cmsFloat64Number = f64;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct cmsCIExyY {
pub x: cmsFloat64Number,
pub y: cmsFloat64Number,
pub Y: cmsFloat64Number,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct cmsCIExyYTRIPLE {
pub Red: cmsCIExyY,
pub Green: cmsCIExyY,
pub Blue: cmsCIExyY,
}
pub type FLOW_DIRECTION = u32;
pub const FLOW_INPUT: FLOW_DIRECTION = 4;
pub const FLOW_OUTPUT: FLOW_DIRECTION = 8;
#[repr(u32)]
#[derive(Copy, Clone)]
pub enum flow_status_code {
No_Error = 0,
Out_of_memory = 10,
IO_error = 20,
Invalid_internal_state = 30,
Panic = 31,
Not_implemented = 40,
Invalid_argument = 50,
Null_argument = 51,
Invalid_dimensions = 52,
Unsupported_pixel_format = 53,
Item_does_not_exist = 54,
Image_decoding_failed = 60,
Image_encoding_failed = 61,
ErrorReportingInconsistency = 90,
First_rust_error = 200,
Other_error = 1024,
// ___Last_library_error,
First_user_defined_error = 1025,
Last_user_defined_error = 2147483647,
}
pub type flow_interpolation_filter = u32;
pub const flow_interpolation_filter_NCubicSharp: flow_interpolation_filter = 30;
pub const flow_interpolation_filter_NCubic: flow_interpolation_filter = 29;
pub const flow_interpolation_filter_MitchellFast: flow_interpolation_filter = 28;
pub const flow_interpolation_filter_Fastest: flow_interpolation_filter = 27;
pub const flow_interpolation_filter_CatmullRomFastSharp: flow_interpolation_filter = 26;
pub const flow_interpolation_filter_CatmullRomFast: flow_interpolation_filter = 25;
pub const flow_interpolation_filter_Box: flow_interpolation_filter = 24;
pub const flow_interpolation_filter_Linear: flow_interpolation_filter = 23;
pub const flow_interpolation_filter_Triangle: flow_interpolation_filter = 22;
pub const flow_interpolation_filter_RawLanczos2Sharp: flow_interpolation_filter = 21;
pub const flow_interpolation_filter_RawLanczos2: flow_interpolation_filter = 20;
pub const flow_interpolation_filter_RawLanczos3Sharp: flow_interpolation_filter = 19;
pub const flow_interpolation_filter_RawLanczos3: flow_interpolation_filter = 18;
pub const flow_interpolation_filter_Jinc: flow_interpolation_filter = 17;
pub const flow_interpolation_filter_Hermite: flow_interpolation_filter = 16;
pub const flow_interpolation_filter_CubicBSpline: flow_interpolation_filter = 15;
pub const flow_interpolation_filter_Mitchell: flow_interpolation_filter = 14;
pub const flow_interpolation_filter_CatmullRom: flow_interpolation_filter = 13;
pub const flow_interpolation_filter_CubicSharp: flow_interpolation_filter = 12;
pub const flow_interpolation_filter_Cubic: flow_interpolation_filter = 11;
pub const flow_interpolation_filter_CubicFast: flow_interpolation_filter = 10;
pub const flow_interpolation_filter_Lanczos2Sharp: flow_interpolation_filter = 9;
pub const flow_interpolation_filter_Lanczos2: flow_interpolation_filter = 8;
pub const flow_interpolation_filter_LanczosSharp: flow_interpolation_filter = 7;
pub const flow_interpolation_filter_Lanczos: flow_interpolation_filter = 6;
pub const flow_interpolation_filter_GinsengSharp: flow_interpolation_filter = 5;
pub const flow_interpolation_filter_Ginseng: flow_interpolation_filter = 4;
pub const flow_interpolation_filter_RobidouxSharp: flow_interpolation_filter = 3;
pub const flow_interpolation_filter_Robidoux: flow_interpolation_filter = 2;
pub const flow_interpolation_filter_RobidouxFast: flow_interpolation_filter = 1;
pub type flow_pixel_format = u32;
pub const flow_gray8: flow_pixel_format = 1;
pub const flow_bgr32: flow_pixel_format = 70;
pub const flow_bgra32: flow_pixel_format = 4;
pub const flow_bgr24: flow_pixel_format = 3;
pub type flow_bitmap_compositing_mode = u32;
pub const flow_bitmap_compositing_blend_with_matte: flow_bitmap_compositing_mode = 2;
pub const flow_bitmap_compositing_blend_with_self: flow_bitmap_compositing_mode = 1;
pub const flow_bitmap_compositing_replace_self: flow_bitmap_compositing_mode = 0;
pub type flow_working_floatspace = u32;
pub const flow_working_floatspace_gamma: flow_working_floatspace = 2;
pub const flow_working_floatspace_linear: flow_working_floatspace = 1;
pub const flow_working_floatspace_as_is: flow_working_floatspace = 0;
pub const flow_working_floatspace_srgb: flow_working_floatspace = 0;
pub type flow_io_mode = u32;
pub const flow_io_mode_read_write_seekable: flow_io_mode = 15;
pub const flow_io_mode_write_seekable: flow_io_mode = 6;
pub const flow_io_mode_read_seekable: flow_io_mode = 5;
pub const flow_io_mode_write_sequential: flow_io_mode = 2;
pub const flow_io_mode_read_sequential: flow_io_mode = 1;
pub const flow_io_mode_null: flow_io_mode = 0;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_context {
pub codec_set: *mut flow_context_codec_set,
pub underlying_heap: flow_heap,
pub object_tracking: flow_objtracking_info,
pub log: flow_profiling_log,
pub error: flow_error_info,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_error_info {
pub reason: flow_status_code,
pub callstack: [flow_error_callstack_line; 8],
pub callstack_count: i32,
pub callstack_capacity: i32,
pub locked: bool,
pub status_included_in_message: bool,
pub message: [libc::c_char; 1024],
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_error_callstack_line {
pub file: *const libc::c_char,
pub line: i32,
pub function_name: *const libc::c_char,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_profiling_log {
pub log: *mut flow_profiling_entry,
pub count: u32,
pub capacity: u32,
pub ticks_per_second: i64,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_profiling_entry {
pub time: i64,
pub name: *const libc::c_char,
pub flags: flow_profiling_entry_flags,
}
pub type flow_profiling_entry_flags = u32;
pub const flow_profiling_entry_stop_children: flow_profiling_entry_flags = 56;
pub const flow_profiling_entry_stop_assert_started: flow_profiling_entry_flags = 24;
pub const flow_profiling_entry_stop: flow_profiling_entry_flags = 8;
pub const flow_profiling_entry_start_allow_recursion: flow_profiling_entry_flags = 6;
pub const flow_profiling_entry_start: flow_profiling_entry_flags = 2;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_objtracking_info {
pub allocs: *mut flow_heap_object_record,
pub next_free_slot: usize,
pub total_slots: usize,
pub bytes_allocated_net: usize,
pub bytes_allocated_gross: usize,
pub allocations_net: usize,
pub allocations_gross: usize,
pub bytes_freed: usize,
pub allocations_net_peak: usize,
pub bytes_allocated_net_peak: usize,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_heap_object_record {
pub ptr: *mut libc::c_void,
pub bytes: usize,
pub owner: *mut libc::c_void,
pub destructor: flow_destructor_function,
pub destructor_called: bool,
pub allocated_by: *const libc::c_char,
pub allocated_by_line: i32,
pub is_owner: bool,
}
pub type flow_destructor_function =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut libc::c_void) -> bool>;
pub type flow_c = flow_context;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_heap {
pub _calloc: flow_heap_calloc_function,
pub _malloc: flow_heap_malloc_function,
pub _realloc: flow_heap_realloc_function,
pub _free: flow_heap_free_function,
pub _context_terminate: flow_heap_terminate_function,
pub _private_state: *mut libc::c_void,
}
pub type flow_heap_terminate_function =
Option<unsafe extern "C" fn(_: *mut flow_context, _: *mut flow_heap) -> ()>;
pub type flow_heap_free_function = Option<
unsafe extern "C" fn(
_: *mut flow_context,
_: *mut flow_heap,
_: *mut libc::c_void,
_: *const libc::c_char,
_: i32,
) -> (),
>;
pub type flow_heap_realloc_function = Option<
unsafe extern "C" fn(
_: *mut flow_context,
_: *mut flow_heap,
_: *mut libc::c_void,
_: usize,
_: *const libc::c_char,
_: i32,
) -> *mut libc::c_void,
>;
pub type flow_heap_malloc_function = Option<
unsafe extern "C" fn(
_: *mut flow_context,
_: *mut flow_heap,
_: usize,
_: *const libc::c_char,
_: i32,
) -> *mut libc::c_void,
>;
pub type flow_heap_calloc_function = Option<
unsafe extern "C" fn(
_: *mut flow_context,
_: *mut flow_heap,
_: usize,
_: usize,
_: *const libc::c_char,
_: i32,
) -> *mut libc::c_void,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_context_codec_set {
pub codecs: *mut flow_codec_definition,
pub codecs_count: usize,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_codec_definition {
pub codec_id: i64,
pub initialize: codec_initialize,
pub get_info: codec_get_info_fn,
pub get_frame_info: codec_get_frame_info_fn,
pub set_downscale_hints: codec_set_downscale_hints_fn,
pub switch_frame: codec_switch_frame_fn,
pub read_frame: codec_read_frame_fn,
pub write_frame: codec_write_frame_fn,
pub stringify: codec_stringify_fn,
pub name: *const libc::c_char,
pub preferred_mime_type: *const libc::c_char,
pub preferred_extension: *const libc::c_char,
}
pub type codec_stringify_fn = Option<
unsafe extern "C" fn(
_: *mut flow_c,
_: *mut libc::c_void,
_: *mut libc::c_char,
_: usize,
) -> bool,
>;
pub type codec_write_frame_fn = Option<
unsafe extern "C" fn(
_: *mut flow_c,
_: *mut libc::c_void,
_: *mut flow_bitmap_bgra,
_: *mut flow_encoder_hints,
) -> bool,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_encoder_hints {
pub disable_png_alpha: bool,
pub zlib_compression_level: i32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_bitmap_bgra {
pub w: u32,
pub h: u32,
pub stride: u32,
pub pixels: *mut libc::c_uchar,
pub fmt: flow_pixel_format,
pub matte_color: [u8; 4],
pub compositing_mode: flow_bitmap_compositing_mode,
}
pub type codec_read_frame_fn = Option<
unsafe extern "C" fn(
_: *mut flow_c,
_: *mut libc::c_void,
_: *mut flow_bitmap_bgra,
_: *mut flow_decoder_color_info,
) -> bool,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_decoder_color_info {
pub source: flow_codec_color_profile_source,
pub profile_buf: *mut u8,
pub buf_length: usize,
pub white_point: cmsCIExyY,
pub primaries: cmsCIExyYTRIPLE,
pub gamma: f64,
}
pub type flow_codec_color_profile_source = u32;
pub const flow_codec_color_profile_source_sRGB: flow_codec_color_profile_source = 4;
pub const flow_codec_color_profile_source_GAMA_CHRM: flow_codec_color_profile_source = 3;
pub const flow_codec_color_profile_source_ICCP_GRAY: flow_codec_color_profile_source = 2;
pub const flow_codec_color_profile_source_ICCP: flow_codec_color_profile_source = 1;
pub const flow_codec_color_profile_source_null: flow_codec_color_profile_source = 0;
pub type codec_switch_frame_fn =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut libc::c_void, _: usize) -> bool>;
pub type codec_set_downscale_hints_fn = Option<
unsafe extern "C" fn(
_: *mut flow_c,
_: *mut flow_codec_instance,
_: *mut flow_decoder_downscale_hints,
) -> bool,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_decoder_downscale_hints {
pub downscale_if_wider_than: i64,
pub or_if_taller_than: i64,
pub downscaled_min_width: i64,
pub downscaled_min_height: i64,
pub scale_luma_spatially: bool,
pub gamma_correct_for_srgb_during_spatial_luma_scaling: bool,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_codec_instance {
pub io_id: i32,
pub codec_id: i64,
pub codec_state: *mut libc::c_void,
pub io: *mut flow_io,
pub direction: FLOW_DIRECTION,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_io {
pub context: *mut flow_c,
pub mode: flow_io_mode,
pub read_func: flow_io_read_function,
pub write_func: flow_io_write_function,
pub position_func: flow_io_position_function,
pub seek_function: flow_io_seek_function,
pub dispose_func: flow_destructor_function,
pub user_data: *mut libc::c_void,
pub optional_file_length: i64,
}
pub type flow_io_seek_function =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut flow_io, _: i64) -> bool>;
pub type flow_io_position_function =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut flow_io) -> i64>;
pub type flow_io_write_function =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut flow_io, _: *const u8, _: usize) -> i64>;
pub type flow_io_read_function =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut flow_io, _: *mut u8, _: usize) -> i64>;
pub type codec_get_frame_info_fn = Option<
unsafe extern "C" fn(
_: *mut flow_c,
_: *mut libc::c_void,
_: *mut flow_decoder_frame_info,
) -> bool,
>;
pub type codec_get_info_fn = Option<
unsafe extern "C" fn(_: *mut flow_c, _: *mut libc::c_void, _: *mut flow_decoder_info) -> bool,
>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_decoder_info {
pub codec_id: i64,
pub preferred_mime_type: *const libc::c_char,
pub preferred_extension: *const libc::c_char,
pub frame_count: usize,
pub current_frame_index: i64,
pub image_width: i32,
pub image_height: i32,
pub frame_decodes_into: flow_pixel_format,
}
pub type codec_initialize =
Option<unsafe extern "C" fn(_: *mut flow_c, _: *mut flow_codec_instance) -> bool>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_bitmap_float {
pub w: u32,
pub h: u32,
pub channels: u32,
pub pixels: *mut f32,
pub pixels_borrowed: bool,
pub float_count: u32,
pub float_stride: u32,
pub alpha_premultiplied: bool,
pub alpha_meaningful: bool,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_interpolation_details {
pub window: f64,
pub p1: f64,
pub p2: f64,
pub p3: f64,
pub q1: f64,
pub q2: f64,
pub q3: f64,
pub q4: f64,
pub blur: f64,
pub filter: flow_detailed_interpolation_method,
pub sharpen_percent_goal: f32,
}
pub type flow_detailed_interpolation_method =
Option<unsafe extern "C" fn(_: *const flow_interpolation_details, _: f64) -> f64>;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_interpolation_pixel_contributions {
pub Weights: *mut f32,
pub Left: i32,
pub Right: i32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_interpolation_line_contributions {
pub ContribRow: *mut flow_interpolation_pixel_contributions,
pub WindowSize: u32,
pub LineLength: u32,
pub percent_negative: f64,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_convolution_kernel {
pub kernel: *mut f32,
pub width: u32,
pub radius: u32,
pub threshold_min_change: f32,
pub threshold_max_change: f32,
pub buffer: *mut f32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_colorcontext_info {
pub byte_to_float: [f32; 256],
pub floatspace: flow_working_floatspace,
pub apply_srgb: bool,
pub apply_gamma: bool,
pub gamma: f32,
pub gamma_inverse: f32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub union C2RustUnnamed {
pub i: u32,
pub f: f32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub union C2RustUnnamed_0 {
pub i: u32,
pub f: f32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub union C2RustUnnamed_1 {
pub f: f32,
pub i: u32,
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct flow_nodeinfo_scale2d_render_to_canvas1d {
pub x: u32,
pub y: u32,
pub w: u32,
pub h: u32,
pub sharpen_percent_goal: f32,
pub interpolation_filter: flow_interpolation_filter,
pub scale_in_colorspace: flow_working_floatspace,
}
/*
* Copyright (c) Imazen LLC.
* No part of this project, including this file, may be copied, modified,
* propagated, or distributed except as permitted in COPYRIGHT.txt.
* Licensed under the GNU Affero General Public License, Version 3.0.
* Commercial licenses available at http://imageresizing.net/
*/
pub const BESSEL_01: unsafe extern "C" fn(_: f64) -> f64 = j1;
#[inline]
unsafe extern "C" fn flow_colorcontext_srgb_to_floatspace_uncached(
colorcontext: *mut flow_colorcontext_info,
value: u8,
) -> f32 {
let mut v: f32 = value as f32 * (1.0f32 / 255.0f32);
if (*colorcontext).apply_srgb {
v = srgb_to_linear(v)
} else if (*colorcontext).apply_gamma {
v = flow_colorcontext_remove_gamma(colorcontext, v)
}
return v;
}
#[inline]
unsafe extern "C" fn flow_colorcontext_remove_gamma(
colorcontext: *mut flow_colorcontext_info,
value: f32,
) -> f32 {
return pow(value as f64, (*colorcontext).gamma as f64) as f32;
}
#[inline]
unsafe extern "C" fn srgb_to_linear(s: f32) -> f32 {
if s <= 0.04045f32 {
return s / 12.92f32;
} else {
return pow(
((s + 0.055f32) / (1 as i32 as f32 + 0.055f32)) as f64,
2.4f32 as f64,
) as f32;
};
}
pub const NULL: i32 = 0 as i32;
pub const FLOW_ERROR_MESSAGE_SIZE: i32 = 1023 as i32;
pub const IR_PI: f64 = 3.1415926535897932384626433832795f64;
#[inline]
unsafe extern "C" fn int_max(a: i32, b: i32) -> i32 {
return if a >= b { a } else { b };
}
#[inline]
unsafe extern "C" fn int_min(a: i32, b: i32) -> i32 {
return if a <= b { a } else { b };
}
#[inline]
unsafe extern "C" fn ir_gaussian(x: f64, stdDev: f64) -> f64 {
return exp(-x * x / (2 as i32 as f64 * stdDev * stdDev))
/ (sqrt(2 as i32 as f64 * IR_PI) * stdDev);
}
#[inline]
unsafe extern "C" fn uchar_clamp_ff(clr: f32) -> u8 {
let mut result: u16 = 0;
result = (clr as f64 + 0.5f64) as i16 as u16;
if result as i32 > 255 as i32 {
result = if clr < 0 as i32 as f32 {
0 as i32
} else {
255 as i32
} as u16
}
return result as u8;
}
#[inline]
unsafe extern "C" fn fastpow2(p: f32) -> f32 {
let offset: f32 = if p < 0 as i32 as f32 { 1.0f32 } else { 0.0f32 };
let clipp: f32 = if p < -(126 as i32) as f32 {
-126.0f32
} else {
p
};
let w: i32 = clipp as i32;
let z: f32 = clipp - w as f32 + offset;
let v: C2RustUnnamed = C2RustUnnamed {
i: (((1 as i32) << 23 as i32) as f32
* (clipp + 121.2740575f32 + 27.7280233f32 / (4.84252568f32 - z) - 1.49012907f32 * z))
as u32,
};
return v.f;
}
#[inline]
unsafe extern "C" fn fastlog2(x: f32) -> f32 {
let vx: C2RustUnnamed_1 = C2RustUnnamed_1 { f: x };
let mx: C2RustUnnamed_0 = C2RustUnnamed_0 {
i: vx.i & 0x7fffff as i32 as u32 | 0x3f000000 as i32 as u32,
};
let mut y: f32 = vx.i as f32;
y *= 1.1920928955078125e-7f32;
return y - 124.22551499f32 - 1.498030302f32 * mx.f - 1.72587999f32 / (0.3520887068f32 + mx.f);
}
#[inline]
unsafe extern "C" fn fastpow(x: f32, p: f32) -> f32 {
return fastpow2(p * fastlog2(x));
}
#[inline]
unsafe extern "C" fn linear_to_srgb(clr: f32) -> f32 {
if clr <= 0.0031308f32 {
return 12.92f32 * clr * 255.0f32;
}
return 1.055f32 * 255.0f32 * fastpow(clr, 0.41666666f32) - 14.025f32;
}
#[inline]
unsafe extern "C" fn flow_colorcontext_apply_gamma(
colorcontext: *mut flow_colorcontext_info,
value: f32,
) -> f32 {
return pow(value as f64, (*colorcontext).gamma_inverse as f64) as f32;
}
#[inline]
unsafe extern "C" fn flow_colorcontext_srgb_to_floatspace(
colorcontext: *mut flow_colorcontext_info,
value: u8,
) -> f32 {
return (*colorcontext).byte_to_float[value as usize];
}
#[inline]
unsafe extern "C" fn flow_colorcontext_floatspace_to_srgb(
color: *mut flow_colorcontext_info,
space_value: f32,
) -> u8 {
let v: f32 = space_value;
if (*color).apply_gamma {
return uchar_clamp_ff(flow_colorcontext_apply_gamma(color, v) * 255.0f32);
}
if (*color).apply_srgb {
return uchar_clamp_ff(linear_to_srgb(v));
}
return uchar_clamp_ff(255.0f32 * v);
}
#[inline]
unsafe extern "C" fn linear_to_luv(bgr: *mut f32) {
let xn: f32 = 0.312713f32;
let yn: f32 = 0.329016f32;
let Yn: f32 = 1.0f32;
let un: f32 =
4 as i32 as f32 * xn / (-(2 as i32) as f32 * xn + 12 as i32 as f32 * yn + 3 as i32 as f32);
let vn: f32 =
9 as i32 as f32 * yn / (-(2 as i32) as f32 * xn + 12 as i32 as f32 * yn + 3 as i32 as f32);
let y_split: f32 = 0.00885645f32;
let y_adjust: f32 = 903.3f32;
let R: f32 = *bgr.offset(2);
let G: f32 = *bgr.offset(1);
let B: f32 = *bgr.offset(0);
if R == 0 as i32 as f32 && G == 0 as i32 as f32 && B == 0 as i32 as f32 {
*bgr.offset(0) = 0 as i32 as f32;
let ref mut fresh0 = *bgr.offset(2);
*fresh0 = 100 as i32 as f32;
*bgr.offset(1) = *fresh0;
return;
}
let X: f32 = 0.412453f32 * R + 0.35758f32 * G + 0.180423f32 * B;
let Y: f32 = 0.212671f32 * R + 0.71516f32 * G + 0.072169f32 * B;
let Z: f32 = 0.019334f32 * R + 0.119193f32 * G + 0.950227f32 * B;
let Yd: f32 = Y / Yn;
let u: f32 = 4 as i32 as f32 * X / (X + 15 as i32 as f32 * Y + 3 as i32 as f32 * Z);
let v: f32 = 9 as i32 as f32 * Y / (X + 15 as i32 as f32 * Y + 3 as i32 as f32 * Z);
let ref mut fresh1 = *bgr.offset(0);
*fresh1 = if Yd > y_split {
(116 as i32 as f32 * pow(Yd as f64, (1.0f32 / 3.0f32) as f64) as f32) - 16 as i32 as f32
} else {
(y_adjust) * Yd
};
let L: f32 = *fresh1;
*bgr.offset(1) = 13 as i32 as f32 * L * (u - un) + 100 as i32 as f32;
*bgr.offset(2) = 13 as i32 as f32 * L * (v - vn) + 100 as i32 as f32;
}
#[inline]
unsafe extern "C" fn luv_to_linear(luv: *mut f32) {
let L: f32 = *luv.offset(0);
let U: f32 = *luv.offset(1) - 100.0f32;
let V: f32 = *luv.offset(2) - 100.0f32;
if L == 0 as i32 as f32 {
let ref mut fresh2 = *luv.offset(2);
*fresh2 = 0 as i32 as f32;
let ref mut fresh3 = *luv.offset(1);
*fresh3 = *fresh2;
*luv.offset(0) = *fresh3;
return;
}
let xn: f32 = 0.312713f32;
let yn: f32 = 0.329016f32;
let Yn: f32 = 1.0f32;
let un: f32 =
4 as i32 as f32 * xn / (-(2 as i32) as f32 * xn + 12 as i32 as f32 * yn + 3 as i32 as f32);
let vn: f32 =
9 as i32 as f32 * yn / (-(2 as i32) as f32 * xn + 12 as i32 as f32 * yn + 3 as i32 as f32);
let y_adjust_2: f32 = 0.00110705645f32;
let u: f32 = U / (13 as i32 as f32 * L) + un;
let v: f32 = V / (13 as i32 as f32 * L) + vn;
let Y: f32 = if L > 8 as i32 as f32 {
(Yn) * pow(
((L + 16 as i32 as f32) / 116 as i32 as f32) as f64,
3 as i32 as f64,
) as f32
} else {
(Yn * L) * y_adjust_2
};
let X: f32 = 9 as i32 as f32 / 4.0f32 * Y * u / v;
let Z: f32 = (9 as i32 as f32 * Y - 15 as i32 as f32 * v * Y - v * X) / (3 as i32 as f32 * v);
let r: f32 = 3.240479f32 * X - 1.53715f32 * Y - 0.498535f32 * Z;
let g: f32 = -0.969256f32 * X + 1.875991f32 * Y + 0.041556f32 * Z;
let b: f32 = 0.055648f32 * X - 0.204043f32 * Y + 1.057311f32 * Z;
*luv.offset(0) = b;
*luv.offset(1) = g;
*luv.offset(2) = r;
}
unsafe extern "C" fn derive_cubic_coefficients(
B: f64,
C: f64,
out: *mut flow_interpolation_details,
) {
let bx2: f64 = B + B;
(*out).p1 = 1.0f64 - 1.0f64 / 3.0f64 * B;
(*out).p2 = -3.0f64 + bx2 + C;
(*out).p3 = 2.0f64 - 1.5f64 * B - C;
(*out).q1 = 4.0f64 / 3.0f64 * B + 4.0f64 * C;
(*out).q2 = -8.0f64 * C - bx2;
(*out).q3 = B + 5.0f64 * C;
(*out).q4 = -1.0f64 / 6.0f64 * B - C;
}
unsafe extern "C" fn filter_flex_cubic(d: *const flow_interpolation_details, x: f64) -> f64 {
let t: f64 = fabs(x) / (*d).blur;
if t < 1.0f64 {
return (*d).p1 + t * (t * ((*d).p2 + t * (*d).p3));
}
if t < 2.0f64 {
return (*d).q1 + t * ((*d).q2 + t * ((*d).q3 + t * (*d).q4));
}
return 0.0f64;
}
unsafe extern "C" fn filter_bicubic_fast(d: *const flow_interpolation_details, t: f64) -> f64 {
let abs_t: f64 = fabs(t) / (*d).blur;
let abs_t_sq: f64 = abs_t * abs_t;
if abs_t < 1 as i32 as f64 {
return 1 as i32 as f64 - 2 as i32 as f64 * abs_t_sq + abs_t_sq * abs_t;
}
if abs_t < 2 as i32 as f64 {
return 4 as i32 as f64 - 8 as i32 as f64 * abs_t + 5 as i32 as f64 * abs_t_sq
- abs_t_sq * abs_t;
}
return 0 as i32 as f64;
}
unsafe extern "C" fn filter_sinc(d: *const flow_interpolation_details, t: f64) -> f64 {
let abs_t: f64 = fabs(t) / (*d).blur;
if abs_t == 0 as i32 as f64 {
return 1 as i32 as f64;
// Avoid division by zero
}
if abs_t > (*d).window {
return 0 as i32 as f64;
}
let a = abs_t * IR_PI;
return a.sin() / a;
}
unsafe extern "C" fn filter_box(d: *const flow_interpolation_details, t: f64) -> f64 {
let x: f64 = t / (*d).blur;
return if x >= -(1 as i32) as f64 * (*d).window && x < (*d).window {
1 as i32
} else {
0 as i32
} as f64;
}
unsafe extern "C" fn filter_triangle(d: *const flow_interpolation_details, t: f64) -> f64 {
let x: f64 = fabs(t) / (*d).blur;
if x < 1.0f64 {
return 1.0f64 - x;
}
return 0.0f64;
}
unsafe extern "C" fn filter_sinc_windowed(d: *const flow_interpolation_details, t: f64) -> f64 {
let x: f64 = t / (*d).blur;
let abs_t: f64 = fabs(x);
if abs_t == 0 as i32 as f64 {
return 1 as i32 as f64;
// Avoid division by zero
}
if abs_t > (*d).window {
return 0 as i32 as f64;
}
return (*d).window * (IR_PI * x / (*d).window).sin() * (x * IR_PI).sin()
/ (IR_PI * IR_PI * x * x);
}
unsafe extern "C" fn filter_jinc(d: *const flow_interpolation_details, t: f64) -> f64 {
let x: f64 = fabs(t) / (*d).blur;
if x == 0.0f64 {
return 0.5f64 * IR_PI;
}
return j1(IR_PI * x) / x;
// //x crossing #1 1.2196698912665045
}
/*
static inline double window_jinc (double x) {
double x_a = x * 1.2196698912665045;
if (x == 0.0)
return 1;
return (BesselOrderOne (IR_PI*x_a) / (x_a * IR_PI * 0.5));
// //x crossing #1 1.2196698912665045
}
static double filter_window_jinc (const struct flow_interpolation_details * d, double t) {
return window_jinc (t / (d->blur * d->window));
}
*/
unsafe extern "C" fn filter_ginseng(d: *const flow_interpolation_details, t: f64) -> f64 {
// Sinc windowed by jinc
let abs_t: f64 = fabs(t) / (*d).blur;
let t_pi: f64 = abs_t * IR_PI;
if abs_t == 0 as i32 as f64 {
return 1 as i32 as f64;
// Avoid division by zero
}
if abs_t > 3 as i32 as f64 {
return 0 as i32 as f64;
}
let jinc_input: f64 = 1.2196698912665045f64 * t_pi / (*d).window;
let jinc_output: f64 = j1(jinc_input) / (jinc_input * 0.5f64);
return jinc_output * (t_pi).sin() / t_pi;
}
pub const TONY: f64 = 0.00001f64;
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_percent_negative_weight(
details: *const flow_interpolation_details,
) -> f64 {
let samples: i32 = 50 as i32;
let step: f64 = (*details).window / samples as f64;
let mut last_height: f64 =
(*details).filter.expect("non-null function pointer")(details, -step);
let mut positive_area: f64 = 0 as i32 as f64;
let mut negative_area: f64 = 0 as i32 as f64;
let mut i: i32 = 0 as i32;
while i <= samples + 2 as i32 {
let height: f64 =
(*details).filter.expect("non-null function pointer")(details, i as f64 * step);
let area: f64 = (height + last_height) / 2.0f64 * step;
last_height = height;
if area > 0 as i32 as f64 {
positive_area += area
} else {
negative_area -= area
}
i += 1
}
return negative_area / positive_area;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_create(
context: *mut flow_c,
) -> *mut flow_interpolation_details {
let mut d: *mut flow_interpolation_details = flow_context_calloc(
context,
1 as i32 as usize,
::std::mem::size_of::<flow_interpolation_details>(),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
189 as i32,
) as *mut flow_interpolation_details;
if d.is_null() {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
191 as i32,
(*::std::mem::transmute::<&[u8; 34], &[libc::c_char; 34]>(
b"flow_interpolation_details_create\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_details;
}
(*d).blur = 1 as i32 as f64;
(*d).window = 2 as i32 as f64;
(*d).q1 = 0 as i32 as f64;
(*d).p1 = (*d).q1;
(*d).q4 = 1 as i32 as f64;
(*d).q3 = (*d).q4;
(*d).p3 = (*d).q3;
(*d).q2 = (*d).p3;
(*d).p2 = (*d).q2;
(*d).sharpen_percent_goal = 0 as i32 as f32;
return d;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_create_bicubic_custom(
context: *mut flow_c,
window: f64,
blur: f64,
B: f64,
C: f64,
) -> *mut flow_interpolation_details {
let mut d: *mut flow_interpolation_details = flow_interpolation_details_create(context);
if !d.is_null() {
(*d).blur = blur;
derive_cubic_coefficients(B, C, d);
(*d).filter = Some(
filter_flex_cubic
as unsafe extern "C" fn(_: *const flow_interpolation_details, _: f64) -> f64,
);
(*d).window = window
} else {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
212 as i32,
(*::std::mem::transmute::<&[u8; 49], &[libc::c_char; 49]>(
b"flow_interpolation_details_create_bicubic_custom\x00",
))
.as_ptr(),
);
}
return d;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_create_custom(
context: *mut flow_c,
window: f64,
blur: f64,
filter: flow_detailed_interpolation_method,
) -> *mut flow_interpolation_details {
let mut d: *mut flow_interpolation_details = flow_interpolation_details_create(context);
if !d.is_null() {
(*d).blur = blur;
(*d).filter = filter;
(*d).window = window
} else {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
226 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_interpolation_details_create_custom\x00",
))
.as_ptr(),
);
}
return d;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_destroy(
context: *mut flow_c,
details: *mut flow_interpolation_details,
) {
flow_deprecated_free(
context,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
233 as i32,
);
}
unsafe extern "C" fn InterpolationDetails_create_from_internal(
context: *mut flow_c,
filter: flow_interpolation_filter,
checkExistenceOnly: bool,
) -> *mut flow_interpolation_details {
let ex: bool = checkExistenceOnly;
let truePtr: *mut flow_interpolation_details = -(1 as i32) as *mut flow_interpolation_details;
match filter as u32 {
23 | 22 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
1 as i32 as f64,
1 as i32 as f64,
Some(
filter_triangle
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
20 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
Some(
filter_sinc
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
18 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
1 as i32 as f64,
Some(
filter_sinc
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
21 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
2 as i32 as f64,
0.9549963639785485f64,
Some(
filter_sinc
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
19 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
0.9812505644269356f64,
Some(
filter_sinc
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
15 => {
// Hermite and BSpline no negative weights
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
1 as i32 as f64,
0 as i32 as f64,
)
};
}
8 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
Some(
filter_sinc_windowed
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
6 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
1 as i32 as f64,
Some(
filter_sinc_windowed
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
9 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
2 as i32 as f64,
0.9549963639785485f64,
Some(
filter_sinc_windowed
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
7 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
0.9812505644269356f64,
Some(
filter_sinc_windowed
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
10 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
Some(
filter_bicubic_fast
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
11 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
0 as i32 as f64,
1 as i32 as f64,
)
}
}
12 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
0.9549963639785485f64,
0 as i32 as f64,
1 as i32 as f64,
)
}
}
13 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
0 as i32 as f64,
0.5f64,
)
}
}
25 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
1 as i32 as f64,
1 as i32 as f64,
0 as i32 as f64,
0.5f64,
)
}
}
26 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
1 as i32 as f64,
13.0f64 / 16.0f64,
0 as i32 as f64,
0.5f64,
)
}
}
14 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
1.0f64 / 3.0f64,
1.0f64 / 3.0f64,
)
}
}
28 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
1 as i32 as f64,
1 as i32 as f64,
1.0f64 / 3.0f64,
1.0f64 / 3.0f64,
)
}
}
29 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2.5f64,
1.0f64 / 1.1685777620836932f64,
0.37821575509399867f64,
0.31089212245300067f64,
)
}
}
30 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2.5f64,
1.0f64 / 1.105822933719019f64,
0.2620145123990142f64,
0.3689927438004929f64,
)
}
}
2 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
0.37821575509399867f64,
0.31089212245300067f64,
)
}
}
27 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
0.74f64,
0.74f64,
0.37821575509399867f64,
0.31089212245300067f64,
)
}
}
1 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
1.05f64,
1 as i32 as f64,
0.37821575509399867f64,
0.31089212245300067f64,
)
}
}
3 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
2 as i32 as f64,
1 as i32 as f64,
0.2620145123990142f64,
0.3689927438004929f64,
)
}
}
16 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_bicubic_custom(
context,
1 as i32 as f64,
1 as i32 as f64,
0 as i32 as f64,
0 as i32 as f64,
)
}
}
24 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
0.5f64,
1 as i32 as f64,
Some(
filter_box
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
4 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
1 as i32 as f64,
Some(
filter_ginseng
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
5 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
3 as i32 as f64,
0.9812505644269356f64,
Some(
filter_ginseng
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
17 => {
return if ex as i32 != 0 {
truePtr
} else {
flow_interpolation_details_create_custom(
context,
6 as i32 as f64,
1.0f64,
Some(
filter_jinc
as unsafe extern "C" fn(
_: *const flow_interpolation_details,
_: f64,
) -> f64,
),
)
}
}
_ => {}
}
if !checkExistenceOnly {
flow_snprintf(
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
323 as i32,
(*::std::mem::transmute::<&[u8; 42], &[libc::c_char; 42]>(
b"InterpolationDetails_create_from_internal\x00",
))
.as_ptr(),
),
FLOW_ERROR_MESSAGE_SIZE as usize,
b"Invalid interpolation filter %d\x00" as *const u8 as *const libc::c_char,
filter as i32,
);
}
return NULL as *mut flow_interpolation_details;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_details_create_from(
context: *mut flow_c,
filter: flow_interpolation_filter,
) -> *mut flow_interpolation_details {
return InterpolationDetails_create_from_internal(context, filter, false);
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_filter_exists(
filter: flow_interpolation_filter,
) -> bool {
return !InterpolationDetails_create_from_internal(NULL as *mut flow_c, filter, true).is_null();
}
unsafe extern "C" fn LineContributions_alloc(
context: *mut flow_c,
line_length: u32,
windows_size: u32,
) -> *mut flow_interpolation_line_contributions {
let mut res: *mut flow_interpolation_line_contributions = flow_context_malloc(
context,
::std::mem::size_of::<flow_interpolation_line_contributions>(),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
343 as i32,
)
as *mut flow_interpolation_line_contributions;
if res.is_null() {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
345 as i32,
(*::std::mem::transmute::<&[u8; 24], &[libc::c_char; 24]>(
b"LineContributions_alloc\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_line_contributions;
}
(*res).WindowSize = windows_size;
(*res).LineLength = line_length;
(*res).ContribRow = flow_context_malloc(
context,
(line_length as usize)
.wrapping_mul(::std::mem::size_of::<flow_interpolation_pixel_contributions>()),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
351 as i32,
) as *mut flow_interpolation_pixel_contributions;
if (*res).ContribRow.is_null() {
flow_deprecated_free(
context,
res as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
353 as i32,
);
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
354 as i32,
(*::std::mem::transmute::<&[u8; 24], &[libc::c_char; 24]>(
b"LineContributions_alloc\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_line_contributions;
}
let allWeights: *mut f32 = flow_context_calloc(
context,
windows_size.wrapping_mul(line_length) as usize,
::std::mem::size_of::<f32>(),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
358 as i32,
) as *mut f32;
if allWeights.is_null() {
flow_deprecated_free(
context,
(*res).ContribRow as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
360 as i32,
);
flow_deprecated_free(
context,
res as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
361 as i32,
);
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
362 as i32,
(*::std::mem::transmute::<&[u8; 24], &[libc::c_char; 24]>(
b"LineContributions_alloc\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_line_contributions;
}
let mut i: u32 = 0 as i32 as u32;
while i < line_length {
let ref mut fresh4 = (*(*res).ContribRow.offset(i as isize)).Weights;
*fresh4 = allWeights.offset(i.wrapping_mul(windows_size) as isize);
i = i.wrapping_add(1)
}
return res;
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_line_contributions_destroy(
context: *mut flow_c,
p: *mut flow_interpolation_line_contributions,
) {
if !p.is_null() {
if !(*p).ContribRow.is_null() {
flow_deprecated_free(
context,
(*(*p).ContribRow.offset(0)).Weights as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
377 as i32,
);
}
flow_deprecated_free(
context,
(*p).ContribRow as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
379 as i32,
);
}
flow_deprecated_free(
context,
p as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
381 as i32,
);
}
#[no_mangle]
pub unsafe extern "C" fn flow_interpolation_line_contributions_create(
context: *mut flow_c,
output_line_size: u32,
input_line_size: u32,
details: *const flow_interpolation_details,
) -> *mut flow_interpolation_line_contributions {
let sharpen_ratio: f64 = flow_interpolation_details_percent_negative_weight(details);
let desired_sharpen_ratio: f64 = fmin(
0.999999999f32 as f64,
fmax(
sharpen_ratio,
(*details).sharpen_percent_goal as f64 / 100.0f64,
),
);
let scale_factor: f64 = output_line_size as f64 / input_line_size as f64;
let downscale_factor: f64 = fmin(1.0f64, scale_factor);
let half_source_window: f64 = ((*details).window + 0.5f64) / downscale_factor;
let allocated_window_size: u32 =
(ceil(2 as i32 as f64 * (half_source_window - TONY)) as i32 + 1 as i32) as u32;
let mut u: u32 = 0;
let mut ix: u32 = 0;
let mut res: *mut flow_interpolation_line_contributions =
LineContributions_alloc(context, output_line_size, allocated_window_size);
if res.is_null() {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
401 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_interpolation_line_contributions_create\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_line_contributions;
}
let mut negative_area: f64 = 0 as i32 as f64;
let mut positive_area: f64 = 0 as i32 as f64;
u = 0 as i32 as u32;
while u < output_line_size {
let center_src_pixel: f64 = (u as f64 + 0.5f64) / scale_factor - 0.5f64;
let left_edge: i32 = (floor(center_src_pixel) as i32 as u32)
.wrapping_sub(allocated_window_size.wrapping_sub(1u32).wrapping_div(2u32))
as i32;
let right_edge: i32 = (left_edge as u32)
.wrapping_add(allocated_window_size)
.wrapping_sub(1u32) as i32;
let left_src_pixel: u32 = int_max(0 as i32, left_edge) as u32;
let right_src_pixel: u32 = int_min(right_edge, input_line_size as i32 - 1 as i32) as u32;
// Net weight
let mut total_weight: f64 = 0.0f64;
// Sum of negative and positive weights
let mut total_negative_weight: f64 = 0.0f64;
let mut total_positive_weight: f64 = 0.0f64;
let source_pixel_count: u32 = right_src_pixel
.wrapping_sub(left_src_pixel)
.wrapping_add(1u32);
if source_pixel_count > allocated_window_size {
flow_interpolation_line_contributions_destroy(context, res);
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
426 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_interpolation_line_contributions_create\x00",
))
.as_ptr(),
);
return NULL as *mut flow_interpolation_line_contributions;
}
(*(*res).ContribRow.offset(u as isize)).Left = left_src_pixel as i32;
(*(*res).ContribRow.offset(u as isize)).Right = right_src_pixel as i32;
let mut weights: *mut f32 = (*(*res).ContribRow.offset(u as isize)).Weights;
ix = left_src_pixel;
while ix <= right_src_pixel {
let tx: i32 = ix.wrapping_sub(left_src_pixel) as i32;
let mut add: f64 = Some((*details).filter.expect("non-null function pointer"))
.expect("non-null function pointer")(
details,
downscale_factor * (ix as f64 - center_src_pixel),
);
if fabs(add) <= 0.00000002f64 {
add = 0.0f64
// Weights below a certain threshold make consistent x-plat
// integration test results impossible. pos/neg zero, etc.
// They should be rounded down to zero at the threshold at which results are consistent.
}
*weights.offset(tx as isize) = add as f32;
total_weight += add;
total_negative_weight += fmin(0 as i32 as f64, add);
total_positive_weight += fmax(0 as i32 as f64, add);
ix = ix.wrapping_add(1)
}
let mut neg_factor: f32 = 0.;
let mut pos_factor: f32 = 0.;
pos_factor = (1.0f32 as f64 / total_weight) as f32;
neg_factor = pos_factor;
//printf("cur= %f cur+= %f cur-= %f desired_sharpen_ratio=%f sharpen_ratio-=%f\n", total_weight, total_positive_weight, total_negative_weight, desired_sharpen_ratio, sharpen_ratio);
if total_weight <= 0.0f32 as f64 || desired_sharpen_ratio > sharpen_ratio {
if total_negative_weight < 0.0f32 as f64 {
if desired_sharpen_ratio < 1.0f32 as f64 {
let target_positive_weight: f64 =
1.0f32 as f64 / (1.0f32 as f64 - desired_sharpen_ratio);
let target_negative_weight: f64 =
desired_sharpen_ratio * -target_positive_weight;
pos_factor = (target_positive_weight / total_positive_weight) as f32;
neg_factor = (target_negative_weight / total_negative_weight) as f32;
if total_negative_weight == 0 as i32 as f64 {
neg_factor = 1.0f32
}
//printf("target=%f target-=%f, pos_factor=%f neg_factor=%f\n", total_positive_weight - target_negative_weight, target_negative_weight, pos_factor, neg_factor);
}
} else if total_weight == 0.0 {
// In this situation we have a problem to report
}
}
//printf("\n");
ix = 0 as i32 as u32;
while ix < source_pixel_count {
if *weights.offset(ix as isize) < 0 as i32 as f32 {
*weights.offset(ix as isize) *= neg_factor;
negative_area -= *weights.offset(ix as isize) as f64
} else {
*weights.offset(ix as isize) *= pos_factor;
positive_area += *weights.offset(ix as isize) as f64
}
ix = ix.wrapping_add(1)
}
// Shrink to improve perf & result consistency
let mut iix: i32 = 0;
// Shrink region from the right
iix = source_pixel_count.wrapping_sub(1u32) as i32;
while iix >= 0 as i32 {
if *weights.offset(iix as isize) != 0 as i32 as f32 {
break;
}
let ref mut fresh5 = (*(*res).ContribRow.offset(u as isize)).Right;
*fresh5 -= 1;
iix -= 1
}
// Shrink region from the left
iix = 0 as i32;
while iix < source_pixel_count as i32 {
if *weights.offset(0) != 0 as i32 as f32 {
break;
}
let ref mut fresh6 = (*(*res).ContribRow.offset(u as isize)).Weights;
*fresh6 = (*fresh6).offset(1);
weights = weights.offset(1);
let ref mut fresh7 = (*(*res).ContribRow.offset(u as isize)).Left;
*fresh7 += 1;
iix += 1
}
u = u.wrapping_add(1)
}
(*res).percent_negative = negative_area / positive_area;
return res;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_scale_rows(
context: *mut flow_c,
from: *mut flow_bitmap_float,
from_row: u32,
to: *mut flow_bitmap_float,
to_row: u32,
row_count: u32,
weights: *mut flow_interpolation_pixel_contributions,
) -> bool {
let from_step: u32 = (*from).channels;
let to_step: u32 = (*to).channels;
let dest_buffer_count: u32 = (*to).w;
let min_channels: u32 = from_step.min(to_step);
let mut ndx: u32 = 0;
if min_channels > 4 as i32 as u32 {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
520 as i32,
(*::std::mem::transmute::<&[u8; 29], &[libc::c_char; 29]>(
b"flow_bitmap_float_scale_rows\x00",
))
.as_ptr(),
);
return false;
}
let mut avg: [f32; 4] = [0.; 4];
// if both have alpha, process it
if from_step == 4 && to_step == 4 {
let mut row: u32 = 0;
while row < row_count {
let source_offset = ((from_row + row) * (*from).float_stride) as isize;
let source_buffer: *const __m128 =
(*from).pixels.offset(source_offset) as *const __m128;
let dest_offset = ((to_row + row) * (*to).float_stride) as isize;
let dest_buffer: *mut __m128 = (*to).pixels.offset(dest_offset) as *mut __m128;
let dest_buffer: &mut [__m128] =
std::slice::from_raw_parts_mut(dest_buffer, dest_buffer_count as usize);
ndx = 0;
while ndx < dest_buffer_count {
let mut sums: __m128 = _mm_set1_ps(0.0);
let left: i32 = (*weights.offset(ndx as isize)).Left;
let right: i32 = (*weights.offset(ndx as isize)).Right;
let weightArray: *const f32 = (*weights.offset(ndx as isize)).Weights;
let source_buffer: &[__m128] =
std::slice::from_raw_parts(source_buffer, (right + 1) as usize);
/* Accumulate each channel */
let mut i = left;
while i <= right {
let factor: __m128 = _mm_set1_ps(*weightArray.offset((i - left) as isize));
// sums += factor * *source_buffer[i as usize];
let mid = _mm_mul_ps(factor, source_buffer[i as usize]);
sums = _mm_add_ps(sums, mid);
i += 1
}
dest_buffer[ndx as usize] = sums;
ndx += 1
}
row += 1
}
} else if from_step == 3 as i32 as u32 && to_step == 3 as i32 as u32 {
let mut row_0: u32 = 0 as i32 as u32;
while row_0 < row_count {
let source_buffer_0: *const f32 = (*from).pixels.offset(
from_row
.wrapping_add(row_0)
.wrapping_mul((*from).float_stride) as isize,
);
let dest_buffer_0: *mut f32 = (*to)
.pixels
.offset(to_row.wrapping_add(row_0).wrapping_mul((*to).float_stride) as isize);
ndx = 0 as i32 as u32;
while ndx < dest_buffer_count {
let mut bgr: [f32; 3] = [0.0f32, 0.0f32, 0.0f32];
let left_0: i32 = (*weights.offset(ndx as isize)).Left;
let right_0: i32 = (*weights.offset(ndx as isize)).Right;
let weightArray_0: *const f32 = (*weights.offset(ndx as isize)).Weights;
let mut i_0: i32 = 0;
/* Accumulate each channel */
i_0 = left_0;
while i_0 <= right_0 {
let weight: f32 = *weightArray_0.offset((i_0 - left_0) as isize);
bgr[0] += weight
* *source_buffer_0.offset((i_0 as u32).wrapping_mul(from_step) as isize);
bgr[1] += weight
* *source_buffer_0.offset(
(i_0 as u32).wrapping_mul(from_step).wrapping_add(1u32) as isize,
);
bgr[2] += weight
* *source_buffer_0.offset(
(i_0 as u32).wrapping_mul(from_step).wrapping_add(2u32) as isize,
);
i_0 += 1
}
*dest_buffer_0.offset(ndx.wrapping_mul(to_step) as isize) = bgr[0];
*dest_buffer_0.offset(ndx.wrapping_mul(to_step).wrapping_add(1u32) as isize) =
bgr[1];
*dest_buffer_0.offset(ndx.wrapping_mul(to_step).wrapping_add(2u32) as isize) =
bgr[2];
ndx = ndx.wrapping_add(1)
}
row_0 = row_0.wrapping_add(1)
}
} else {
let mut row_1: u32 = 0 as i32 as u32;
while row_1 < row_count {
let source_buffer_1: *const f32 = (*from).pixels.offset(
from_row
.wrapping_add(row_1)
.wrapping_mul((*from).float_stride) as isize,
);
let dest_buffer_1: *mut f32 = (*to)
.pixels
.offset(to_row.wrapping_add(row_1).wrapping_mul((*to).float_stride) as isize);
ndx = 0 as i32 as u32;
while ndx < dest_buffer_count {
avg[0] = 0 as i32 as f32;
avg[1] = 0 as i32 as f32;
avg[2] = 0 as i32 as f32;
avg[3 as i32 as usize] = 0 as i32 as f32;
let left_1: i32 = (*weights.offset(ndx as isize)).Left;
let right_1: i32 = (*weights.offset(ndx as isize)).Right;
let weightArray_1: *const f32 = (*weights.offset(ndx as isize)).Weights;
/* Accumulate each channel */
let mut i_1: i32 = left_1;
while i_1 <= right_1 {
let weight_0: f32 = *weightArray_1.offset((i_1 - left_1) as isize);
let mut j: u32 = 0 as i32 as u32;
while j < min_channels {
avg[j as usize] += weight_0
* *source_buffer_1.offset(
(i_1 as u32).wrapping_mul(from_step).wrapping_add(j) as isize,
);
j = j.wrapping_add(1)
}
i_1 += 1
}
let mut j_0: u32 = 0 as i32 as u32;
while j_0 < min_channels {
*dest_buffer_1.offset(ndx.wrapping_mul(to_step).wrapping_add(j_0) as isize) =
avg[j_0 as usize];
j_0 = j_0.wrapping_add(1)
}
ndx = ndx.wrapping_add(1)
}
row_1 = row_1.wrapping_add(1)
}
}
return true;
}
unsafe extern "C" fn multiply_row(row: *mut f32, length: usize, coefficient: f32) {
let mut i: usize = 0 as i32 as usize;
while i < length {
*row.offset(i as isize) *= coefficient;
i = i.wrapping_add(1)
}
}
unsafe extern "C" fn add_row(mutate_row: *mut f32, input_row: *mut f32, length: usize) {
let mut i: usize = 0 as i32 as usize;
while i < length {
*mutate_row.offset(i as isize) += *input_row.offset(i as isize);
i = i.wrapping_add(1)
}
}
unsafe extern "C" fn crop(
c: *mut flow_c,
b: *mut flow_bitmap_bgra,
x: u32,
y: u32,
w: u32,
h: u32,
) -> *mut flow_bitmap_bgra {
if h.wrapping_add(y) > (*b).h || w.wrapping_add(x) > (*b).w {
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
632 as i32,
(*::std::mem::transmute::<&[u8; 5], &[libc::c_char; 5]>(b"crop\x00")).as_ptr(),
);
return NULL as *mut flow_bitmap_bgra;
}
let mut cropped_canvas: *mut flow_bitmap_bgra =
flow_bitmap_bgra_create_header(c, w as i32, h as i32);
let bpp: u32 = flow_pixel_format_bytes_per_pixel((*b).fmt);
if cropped_canvas.is_null() {
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
640 as i32,
(*::std::mem::transmute::<&[u8; 5], &[libc::c_char; 5]>(b"crop\x00")).as_ptr(),
);
return NULL as *mut flow_bitmap_bgra;
}
(*cropped_canvas).fmt = (*b).fmt;
memcpy(
&mut *(*cropped_canvas).matte_color.as_mut_ptr().offset(0) as *mut u8 as *mut libc::c_void,
&mut *(*b).matte_color.as_mut_ptr().offset(0) as *mut u8 as *const libc::c_void,
::std::mem::size_of::<[u8; 4]>() as u64,
);
(*cropped_canvas).compositing_mode = (*b).compositing_mode;
(*cropped_canvas).pixels = (*b)
.pixels
.offset(y.wrapping_mul((*b).stride) as isize)
.offset(x.wrapping_mul(bpp) as isize);
(*cropped_canvas).stride = (*b).stride;
return cropped_canvas;
}
/// Note: Rust version of `FLOW_error` takes the name of the caller as its third parameter since
/// there does not seem to be a way to get the name of the current or calling function in Rust.
fn FLOW_error(
context: *mut flow_context,
status_code: flow_status_code,
caller: &str,
) -> *mut libc::c_char {
let file = CString::new(file!()).unwrap().as_ptr();
let func = CString::new(caller).unwrap().as_ptr();
unsafe {
flow_context_set_error_get_message_buffer(
context,
status_code,
file as *const libc::c_char,
line!() as i32,
func as *const libc::c_char, // was __func__ in C macro
)
}
}
#[no_mangle]
pub unsafe extern "C" fn flow_node_execute_scale2d_render1d(
c: *mut flow_c,
input: *mut flow_bitmap_bgra,
uncropped_canvas: *mut flow_bitmap_bgra,
info: *mut flow_nodeinfo_scale2d_render_to_canvas1d,
) -> bool {
if (*info).h.wrapping_add((*info).y) > (*uncropped_canvas).h
|| (*info).w.wrapping_add((*info).x) > (*uncropped_canvas).w
{
FLOW_error(
c,
flow_status_code::Invalid_argument,
"flow_node_execute_scale2d_render1d",
);
return false;
}
let cropped_canvas: *mut flow_bitmap_bgra = if (*info).x == 0
&& (*info).y == 0
&& (*info).w == (*uncropped_canvas).w
&& (*info).h == (*uncropped_canvas).h
{
uncropped_canvas
} else {
crop(
c,
uncropped_canvas,
(*info).x,
(*info).y,
(*info).w,
(*info).h,
)
};
if cropped_canvas.is_null() {
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
665 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
let input_fmt: flow_pixel_format = flow_effective_pixel_format(input);
let canvas_fmt: flow_pixel_format = flow_effective_pixel_format(cropped_canvas);
if input_fmt as u32 != flow_bgra32 as i32 as u32 && input_fmt as u32 != flow_bgr32 as i32 as u32
{
FLOW_error(
c,
flow_status_code::Not_implemented,
"flow_node_execute_scale2d_render1d",
);
return false;
}
if canvas_fmt as u32 != flow_bgra32 as i32 as u32
&& canvas_fmt as u32 != flow_bgr32 as i32 as u32
{
FLOW_error(
c,
flow_status_code::Not_implemented,
"flow_node_execute_scale2d_render1d",
);
return false;
}
let mut colorcontext: flow_colorcontext_info = flow_colorcontext_info {
byte_to_float: [0.; 256],
floatspace: flow_working_floatspace_srgb,
apply_srgb: false,
apply_gamma: false,
gamma: 0.,
gamma_inverse: 0.,
};
flow_colorcontext_init(
c,
&mut colorcontext,
(*info).scale_in_colorspace,
0 as i32 as f32,
0 as i32 as f32,
0 as i32 as f32,
);
// Use details as a parent structure to ensure everything gets freed
let mut details: *mut flow_interpolation_details =
flow_interpolation_details_create_from(c, (*info).interpolation_filter);
if details.is_null() {
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
686 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
(*details).sharpen_percent_goal = (*info).sharpen_percent_goal;
let mut contrib_v: *mut flow_interpolation_line_contributions =
NULL as *mut flow_interpolation_line_contributions;
let mut contrib_h: *mut flow_interpolation_line_contributions =
NULL as *mut flow_interpolation_line_contributions;
flow_context_profiler_start(
c,
b"contributions_calc\x00" as *const u8 as *const libc::c_char,
0 as i32 != 0,
);
contrib_v = flow_interpolation_line_contributions_create(c, (*info).h, (*input).h, details);
if contrib_v.is_null()
|| !flow_set_owner(
c,
contrib_v as *mut libc::c_void,
details as *mut libc::c_void,
)
{
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
697 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
698 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
contrib_h = flow_interpolation_line_contributions_create(c, (*info).w, (*input).w, details);
if contrib_h.is_null()
|| !flow_set_owner(
c,
contrib_h as *mut libc::c_void,
details as *mut libc::c_void,
)
{
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
702 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
703 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
flow_context_profiler_stop(
c,
b"contributions_calc\x00" as *const u8 as *const libc::c_char,
1 as i32 != 0,
0 as i32 != 0,
);
flow_context_profiler_start(
c,
b"create_bitmap_float (buffers)\x00" as *const u8 as *const libc::c_char,
0 as i32 != 0,
);
let mut source_buf: *mut flow_bitmap_float =
flow_bitmap_float_create_header(c, (*input).w as i32, 1 as i32, 4 as i32);
if source_buf.is_null()
|| !flow_set_owner(
c,
source_buf as *mut libc::c_void,
details as *mut libc::c_void,
)
{
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
711 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
712 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
let mut dest_buf: *mut flow_bitmap_float =
flow_bitmap_float_create(c, (*info).w as i32, 1 as i32, 4 as i32, true);
if dest_buf.is_null()
|| !flow_set_owner(
c,
dest_buf as *mut libc::c_void,
details as *mut libc::c_void,
)
{
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
716 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
717 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
(*source_buf).alpha_meaningful = input_fmt as u32 == flow_bgra32 as i32 as u32;
(*dest_buf).alpha_meaningful = (*source_buf).alpha_meaningful;
(*source_buf).alpha_premultiplied = (*source_buf).channels == 4 as i32 as u32;
(*dest_buf).alpha_premultiplied = (*source_buf).alpha_premultiplied;
flow_context_profiler_stop(
c,
b"create_bitmap_float (buffers)\x00" as *const u8 as *const libc::c_char,
1 as i32 != 0,
0 as i32 != 0,
);
// Determine how many rows we need to buffer
let mut max_input_rows: i32 = 0 as i32;
let mut i: u32 = 0 as i32 as u32;
while i < (*contrib_v).LineLength {
let inputs: i32 = (*(*contrib_v).ContribRow.offset(i as isize)).Right
- (*(*contrib_v).ContribRow.offset(i as isize)).Left
+ 1 as i32;
if inputs > max_input_rows {
max_input_rows = inputs
}
i = i.wrapping_add(1)
}
// Allocate space
let row_floats: usize = (4u32).wrapping_mul((*input).w) as usize;
let buf: *mut f32 = flow_context_malloc(
c,
::std::mem::size_of::<f32>()
.wrapping_mul(row_floats)
.wrapping_mul((max_input_rows + 1) as usize),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
737 as i32,
) as *mut f32;
let rows: *mut *mut f32 = flow_context_malloc(
c,
(::std::mem::size_of::<*mut f32>()).wrapping_mul(max_input_rows as usize),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
738 as i32,
) as *mut *mut f32;
let row_coefficients: *mut f32 = flow_context_malloc(
c,
::std::mem::size_of::<f32>().wrapping_mul(max_input_rows as usize),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
739 as i32,
) as *mut f32;
let row_indexes: *mut i32 = flow_context_malloc(
c,
::std::mem::size_of::<i32>().wrapping_mul(max_input_rows as usize),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
740 as i32,
) as *mut i32;
if buf.is_null() || rows.is_null() || row_coefficients.is_null() || row_indexes.is_null() {
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
742 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
743 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
let output_address: *mut f32 =
&mut *buf.offset(row_floats.wrapping_mul(max_input_rows as usize) as isize) as *mut f32;
let mut i_0: i32 = 0 as i32;
while i_0 < max_input_rows {
let ref mut fresh8 = *rows.offset(i_0 as isize);
*fresh8 = &mut *buf
.offset((4u32).wrapping_mul((*input).w).wrapping_mul(i_0 as u32) as isize)
as *mut f32;
*row_coefficients.offset(i_0 as isize) = 1 as i32 as f32;
*row_indexes.offset(i_0 as isize) = -(1 as i32);
i_0 += 1
}
let mut out_row: u32 = 0 as i32 as u32;
while out_row < (*cropped_canvas).h {
let contrib: flow_interpolation_pixel_contributions =
*(*contrib_v).ContribRow.offset(out_row as isize);
// Clear output row
::libc::memset(
output_address as *mut libc::c_void,
0 as i32,
::std::mem::size_of::<f32>().wrapping_mul(row_floats),
);
let mut input_row: i32 = contrib.Left;
while input_row <= contrib.Right {
// Try to find row in buffer if already loaded
let mut loaded: bool = false;
let mut active_buf_ix: i32 = -(1 as i32);
let mut buf_row: i32 = 0 as i32;
while buf_row < max_input_rows {
if *row_indexes.offset(buf_row as isize) == input_row {
active_buf_ix = buf_row;
loaded = true;
break;
} else {
buf_row += 1
}
}
// Not loaded?
if !loaded {
let mut buf_row_0: i32 = 0 as i32; // Buffer too small!
while buf_row_0 < max_input_rows {
if *row_indexes.offset(buf_row_0 as isize) < contrib.Left {
active_buf_ix = buf_row_0;
loaded = false;
break;
} else {
buf_row_0 += 1
}
}
}
if active_buf_ix < 0 as i32 {
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
779 as i32,
);
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
780 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
if !loaded {
// Load row
(*source_buf).pixels = *rows.offset(active_buf_ix as isize);
flow_context_profiler_start(
c,
b"convert_srgb_to_linear\x00" as *const u8 as *const libc::c_char,
0 as i32 != 0,
);
if !flow_bitmap_float_convert_srgb_to_linear(
c,
&mut colorcontext,
input,
input_row as u32,
source_buf,
0 as i32 as u32,
1 as i32 as u32,
) {
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
789 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
790 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
flow_context_profiler_stop(
c,
b"convert_srgb_to_linear\x00" as *const u8 as *const libc::c_char,
1 as i32 != 0,
0 as i32 != 0,
);
*row_coefficients.offset(active_buf_ix as isize) = 1 as i32 as f32;
*row_indexes.offset(active_buf_ix as isize) = input_row;
loaded = true
}
let weight: f32 = *contrib.Weights.offset((input_row - contrib.Left) as isize);
if fabs(weight as f64) > 0.00000002f64 {
// Apply coefficient, update tracking
let delta_coefficient: f32 =
weight / *row_coefficients.offset(active_buf_ix as isize);
multiply_row(
*rows.offset(active_buf_ix as isize),
row_floats,
delta_coefficient,
);
*row_coefficients.offset(active_buf_ix as isize) = weight;
// Add row
add_row(
output_address,
*rows.offset(active_buf_ix as isize),
row_floats,
);
}
input_row += 1
}
// The container now points to the row which has been vertically scaled
(*source_buf).pixels = output_address;
// Now scale horizontally!
flow_context_profiler_start(
c,
b"ScaleBgraFloatRows\x00" as *const u8 as *const libc::c_char,
0 as i32 != 0,
);
if !flow_bitmap_float_scale_rows(
c,
source_buf,
0 as i32 as u32,
dest_buf,
0 as i32 as u32,
1 as i32 as u32,
(*contrib_h).ContribRow,
) {
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
816 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
817 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
flow_context_profiler_stop(
c,
b"ScaleBgraFloatRows\x00" as *const u8 as *const libc::c_char,
1 as i32 != 0,
0 as i32 != 0,
);
if !flow_bitmap_float_composite_linear_over_srgb(
c,
&mut colorcontext,
dest_buf,
0 as i32 as u32,
cropped_canvas,
out_row,
1 as i32 as u32,
false,
) {
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
822 as i32,
);
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
823 as i32,
(*::std::mem::transmute::<&[u8; 35], &[libc::c_char; 35]>(
b"flow_node_execute_scale2d_render1d\x00",
))
.as_ptr(),
);
return false;
}
out_row = out_row.wrapping_add(1)
}
flow_destroy(
c,
if cropped_canvas == uncropped_canvas {
0 as *mut flow_bitmap_bgra
} else {
cropped_canvas
} as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
826 as i32,
);
flow_destroy(
c,
details as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
827 as i32,
);
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_create(
context: *mut flow_c,
radius: u32,
) -> *mut flow_convolution_kernel {
let mut k: *mut flow_convolution_kernel = flow_context_calloc(
context,
1 as i32 as usize,
::std::mem::size_of::<flow_convolution_kernel>(),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
832 as i32,
) as *mut flow_convolution_kernel;
// For the actual array;
let a: *mut f32 = flow_context_calloc(
context,
radius.wrapping_mul(2u32).wrapping_add(1u32) as usize,
::std::mem::size_of::<f32>(),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
834 as i32,
) as *mut f32;
// we assume a maximum of 4 channels are going to need buffering during convolution
let buf: *mut f32 = flow_context_malloc(
context,
(radius as usize)
.wrapping_add(2)
.wrapping_mul(4)
.wrapping_mul(::std::mem::size_of::<f32>()),
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
836 as i32,
) as *mut f32; // nothing to do here, zeroes are as normalized as you can get ;)
if k.is_null() || a.is_null() || buf.is_null() {
flow_deprecated_free(
context,
k as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
839 as i32,
);
flow_deprecated_free(
context,
a as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
840 as i32,
);
flow_deprecated_free(
context,
buf as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
841 as i32,
);
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
842 as i32,
(*::std::mem::transmute::<&[u8; 31], &[libc::c_char; 31]>(
b"flow_convolution_kernel_create\x00",
))
.as_ptr(),
);
return NULL as *mut flow_convolution_kernel;
}
(*k).kernel = a;
(*k).width = radius.wrapping_mul(2u32).wrapping_add(1u32);
(*k).buffer = buf;
(*k).radius = radius;
return k;
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_destroy(
context: *mut flow_c,
mut kernel: *mut flow_convolution_kernel,
) {
if !kernel.is_null() {
flow_deprecated_free(
context,
(*kernel).kernel as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
854 as i32,
);
flow_deprecated_free(
context,
(*kernel).buffer as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
855 as i32,
);
(*kernel).kernel = NULL as *mut f32;
(*kernel).buffer = NULL as *mut f32
}
flow_deprecated_free(
context,
kernel as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
859 as i32,
);
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_create_gaussian(
context: *mut flow_c,
stdDev: f64,
radius: u32,
) -> *mut flow_convolution_kernel {
let k: *mut flow_convolution_kernel = flow_convolution_kernel_create(context, radius);
if !k.is_null() {
let mut i: u32 = 0 as i32 as u32;
while i < (*k).width {
*(*k).kernel.offset(i as isize) =
ir_gaussian((radius as i32 - i as i32).abs() as f64, stdDev) as f32;
i = i.wrapping_add(1)
}
}
return k;
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_sum(kernel: *mut flow_convolution_kernel) -> f64 {
let mut sum: f64 = 0 as i32 as f64;
let mut i: u32 = 0 as i32 as u32;
while i < (*kernel).width {
sum += *(*kernel).kernel.offset(i as isize) as f64;
i = i.wrapping_add(1)
}
return sum;
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_normalize(
kernel: *mut flow_convolution_kernel,
desiredSum: f32,
) {
let sum: f64 = flow_convolution_kernel_sum(kernel);
if sum == 0 as i32 as f64 {
return;
}
let factor: f32 = (desiredSum as f64 / sum) as f32;
let mut i: u32 = 0 as i32 as u32;
while i < (*kernel).width {
*(*kernel).kernel.offset(i as isize) *= factor;
i = i.wrapping_add(1)
}
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_create_gaussian_normalized(
context: *mut flow_c,
stdDev: f64,
radius: u32,
) -> *mut flow_convolution_kernel {
let kernel: *mut flow_convolution_kernel =
flow_convolution_kernel_create_gaussian(context, stdDev, radius);
if !kernel.is_null() {
flow_convolution_kernel_normalize(kernel, 1 as i32 as f32);
}
return kernel;
}
#[no_mangle]
pub unsafe extern "C" fn flow_convolution_kernel_create_gaussian_sharpen(
context: *mut flow_c,
stdDev: f64,
radius: u32,
) -> *mut flow_convolution_kernel {
let kernel: *mut flow_convolution_kernel =
flow_convolution_kernel_create_gaussian(context, stdDev, radius);
if !kernel.is_null() {
let sum: f64 = flow_convolution_kernel_sum(kernel);
let mut i: u32 = 0 as i32 as u32;
while i < (*kernel).width {
if i == radius {
*(*kernel).kernel.offset(i as isize) =
(2 as i32 as f64 * sum - *(*kernel).kernel.offset(i as isize) as f64) as f32
} else {
*(*kernel).kernel.offset(i as isize) *= -(1 as i32) as f32
}
i = i.wrapping_add(1)
}
flow_convolution_kernel_normalize(kernel, 1 as i32 as f32);
}
return kernel;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_convolve_rows(
_context: *mut flow_c,
buf: *mut flow_bitmap_float,
kernel: *mut flow_convolution_kernel,
convolve_channels: u32,
from_row: u32,
row_count: i32,
) -> bool {
let radius: u32 = (*kernel).radius;
let threshold_min: f32 = (*kernel).threshold_min_change;
let threshold_max: f32 = (*kernel).threshold_max_change;
// Do nothing unless the image is at least half as wide as the kernel.
if (*buf).w < radius.wrapping_add(1u32) {
return true;
}
let buffer_count: u32 = radius.wrapping_add(1u32);
let w: u32 = (*buf).w;
let int_w: i32 = (*buf).w as i32;
let step: u32 = (*buf).channels;
let until_row: u32 = if row_count < 0 as i32 {
(*buf).h
} else {
from_row.wrapping_add(row_count as u32)
};
let ch_used: u32 = convolve_channels;
let buffer: *mut f32 = (*kernel).buffer;
let avg: *mut f32 = &mut *(*kernel)
.buffer
.offset(buffer_count.wrapping_mul(ch_used) as isize) as *mut f32;
let kern: *const f32 = (*kernel).kernel;
let wrap_mode: i32 = 0 as i32;
let mut row: u32 = from_row;
while row < until_row {
let source_buffer: *mut f32 = &mut *(*buf)
.pixels
.offset(row.wrapping_mul((*buf).float_stride) as isize)
as *mut f32;
let mut circular_idx: i32 = 0 as i32;
let mut ndx: u32 = 0 as i32 as u32;
while ndx < w.wrapping_add(buffer_count) {
// Flush old value
if ndx >= buffer_count {
memcpy(
&mut *source_buffer
.offset(ndx.wrapping_sub(buffer_count).wrapping_mul(step) as isize)
as *mut f32 as *mut libc::c_void,
&mut *buffer.offset((circular_idx as u32).wrapping_mul(ch_used) as isize)
as *mut f32 as *const libc::c_void,
(ch_used as u64).wrapping_mul(::std::mem::size_of::<f32>() as u64),
);
}
// Calculate and enqueue new value
if ndx < w {
let left: i32 = ndx.wrapping_sub(radius) as i32;
let right: i32 = ndx.wrapping_add(radius) as i32;
let mut i: i32 = 0;
memset(
avg as *mut libc::c_void,
0 as i32,
(::std::mem::size_of::<f32>() as u64).wrapping_mul(ch_used as u64),
);
if left < 0 as i32 || right >= w as i32 {
if wrap_mode == 0 as i32 {
// Only sample what's present, and fix the average later.
let mut total_weight: f32 = 0 as i32 as f32;
/* Accumulate each channel */
i = left;
while i <= right {
if i > 0 as i32 && i < int_w {
let weight: f32 = *kern.offset((i - left) as isize);
total_weight += weight;
let mut j: u32 = 0 as i32 as u32;
while j < ch_used {
*avg.offset(j as isize) += weight
* *source_buffer
.offset((i as u32).wrapping_mul(step).wrapping_add(j)
as isize);
j = j.wrapping_add(1)
}
}
i += 1
}
let mut j_0: u32 = 0 as i32 as u32;
while j_0 < ch_used {
*avg.offset(j_0 as isize) = *avg.offset(j_0 as isize) / total_weight;
j_0 = j_0.wrapping_add(1)
}
} else if wrap_mode == 1 as i32 {
// Extend last pixel to be used for all missing inputs
/* Accumulate each channel */
i = left;
while i <= right {
let weight_0: f32 = *kern.offset((i - left) as isize);
let ix: u32 = if i > int_w - 1 as i32 {
(int_w) - 1 as i32
} else if i < 0 as i32 {
0 as i32
} else {
i
} as u32;
let mut j_1: u32 = 0 as i32 as u32;
while j_1 < ch_used {
*avg.offset(j_1 as isize) += weight_0
* *source_buffer
.offset(ix.wrapping_mul(step).wrapping_add(j_1) as isize);
j_1 = j_1.wrapping_add(1)
}
i += 1
}
}
} else {
/* Accumulate each channel */
i = left;
while i <= right {
let weight_1: f32 = *kern.offset((i - left) as isize);
let mut j_2: u32 = 0 as i32 as u32;
while j_2 < ch_used {
*avg.offset(j_2 as isize) += weight_1
* *source_buffer.offset(
(i as u32).wrapping_mul(step).wrapping_add(j_2) as isize,
);
j_2 = j_2.wrapping_add(1)
}
i += 1
}
}
// Enqueue difference
memcpy(
&mut *buffer.offset((circular_idx as u32).wrapping_mul(ch_used) as isize)
as *mut f32 as *mut libc::c_void,
avg as *const libc::c_void,
(ch_used as u64).wrapping_mul(::std::mem::size_of::<f32>() as u64),
);
if threshold_min > 0 as i32 as f32 || threshold_max > 0 as i32 as f32 {
let mut change: f32 = 0 as i32 as f32;
let mut j_3: u32 = 0 as i32 as u32;
while j_3 < ch_used {
change += fabs(
(*source_buffer
.offset(ndx.wrapping_mul(step).wrapping_add(j_3) as isize)
- *avg.offset(j_3 as isize)) as f64,
) as f32;
j_3 = j_3.wrapping_add(1)
}
if change < threshold_min || change > threshold_max {
memcpy(
&mut *buffer
.offset((circular_idx as u32).wrapping_mul(ch_used) as isize)
as *mut f32 as *mut libc::c_void,
&mut *source_buffer.offset(ndx.wrapping_mul(step) as isize) as *mut f32
as *const libc::c_void,
(ch_used as u64).wrapping_mul(::std::mem::size_of::<f32>() as u64),
);
}
}
}
circular_idx = ((circular_idx + 1 as i32) as u32).wrapping_rem(buffer_count) as i32;
ndx = ndx.wrapping_add(1)
}
row = row.wrapping_add(1)
}
return true;
}
unsafe extern "C" fn BitmapFloat_boxblur_rows(
_context: *mut flow_c,
image: *mut flow_bitmap_float,
radius: u32,
passes: u32,
convolve_channels: u32,
work_buffer: *mut f32,
from_row: u32,
row_count: i32,
) -> bool {
let buffer_count: u32 = radius.wrapping_add(1u32);
let w: u32 = (*image).w;
let step: u32 = (*image).channels;
let until_row: u32 = if row_count < 0 as i32 {
(*image).h
} else {
from_row.wrapping_add(row_count as u32)
};
let ch_used: u32 = (*image).channels;
let buffer: *mut f32 = work_buffer;
let std_count: u32 = radius.wrapping_mul(2u32).wrapping_add(1u32);
let std_factor: f32 = 1.0f32 / std_count as f32;
let mut row: u32 = from_row;
while row < until_row {
let source_buffer: *mut f32 = &mut *(*image)
.pixels
.offset(row.wrapping_mul((*image).float_stride) as isize)
as *mut f32;
let mut pass_index: u32 = 0 as i32 as u32;
while pass_index < passes {
let mut circular_idx: i32 = 0 as i32;
let mut sum: [f32; 4] = [
0 as i32 as f32,
0 as i32 as f32,
0 as i32 as f32,
0 as i32 as f32,
];
let mut count: u32 = 0 as i32 as u32;
let mut ndx: u32 = 0 as i32 as u32;
while ndx < radius {
let mut ch: u32 = 0 as i32 as u32;
while ch < convolve_channels {
sum[ch as usize] +=
*source_buffer.offset(ndx.wrapping_mul(step).wrapping_add(ch) as isize);
ch = ch.wrapping_add(1)
}
count = count.wrapping_add(1);
ndx = ndx.wrapping_add(1)
}
let mut ndx_0: u32 = 0 as i32 as u32;
while ndx_0 < w.wrapping_add(buffer_count) {
// Pixels
if ndx_0 >= buffer_count {
// same as ndx > radius
// Remove trailing item from average
let mut ch_0: u32 = 0 as i32 as u32;
while ch_0 < convolve_channels {
sum[ch_0 as usize] -= *source_buffer.offset(
ndx_0
.wrapping_sub(radius)
.wrapping_sub(1u32)
.wrapping_mul(step)
.wrapping_add(ch_0) as isize,
);
ch_0 = ch_0.wrapping_add(1)
}
count = count.wrapping_sub(1);
// Flush old value
memcpy(
&mut *source_buffer
.offset(ndx_0.wrapping_sub(buffer_count).wrapping_mul(step) as isize)
as *mut f32 as *mut libc::c_void,
&mut *buffer.offset((circular_idx as u32).wrapping_mul(ch_used) as isize)
as *mut f32 as *const libc::c_void,
(ch_used as u64).wrapping_mul(::std::mem::size_of::<f32>() as u64),
);
}
// Calculate and enqueue new value
if ndx_0 < w {
if ndx_0 < w.wrapping_sub(radius) {
let mut ch_1: u32 = 0 as i32 as u32;
while ch_1 < convolve_channels {
sum[ch_1 as usize] += *source_buffer.offset(
ndx_0
.wrapping_add(radius)
.wrapping_mul(step)
.wrapping_add(ch_1) as isize,
);
ch_1 = ch_1.wrapping_add(1)
}
count = count.wrapping_add(1)
}
// Enqueue averaged value
if count != std_count {
let mut ch_2: u32 = 0 as i32 as u32;
while ch_2 < convolve_channels {
*buffer.offset(
(circular_idx as u32)
.wrapping_mul(ch_used)
.wrapping_add(ch_2) as isize,
) = sum[ch_2 as usize] / count as f32;
ch_2 = ch_2.wrapping_add(1)
// Recompute factor
}
} else {
let mut ch_3: u32 = 0 as i32 as u32;
while ch_3 < convolve_channels {
*buffer.offset(
(circular_idx as u32)
.wrapping_mul(ch_used)
.wrapping_add(ch_3) as isize,
) = sum[ch_3 as usize] * std_factor;
ch_3 = ch_3.wrapping_add(1)
}
}
}
circular_idx = ((circular_idx + 1 as i32) as u32).wrapping_rem(buffer_count) as i32;
ndx_0 = ndx_0.wrapping_add(1)
}
pass_index = pass_index.wrapping_add(1)
}
row = row.wrapping_add(1)
}
return true;
}
unsafe extern "C" fn BitmapFloat_boxblur_misaligned_rows(
context: *mut flow_c,
image: *mut flow_bitmap_float,
radius: u32,
align: i32,
convolve_channels: u32,
work_buffer: *mut f32,
from_row: u32,
row_count: i32,
) -> bool {
if align != 1 as i32 && align != -(1 as i32) {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1088 as i32,
(*::std::mem::transmute::<&[u8; 36], &[libc::c_char; 36]>(
b"BitmapFloat_boxblur_misaligned_rows\x00",
))
.as_ptr(),
);
return false;
}
let buffer_count: u32 = radius.wrapping_add(2u32);
let w: u32 = (*image).w;
let step: u32 = (*image).channels;
let until_row: u32 = if row_count < 0 as i32 {
(*image).h
} else {
from_row.wrapping_add(row_count as u32)
};
let ch_used: u32 = (*image).channels;
let buffer: *mut f32 = work_buffer;
let write_offset: u32 = if align == -(1 as i32) {
0 as i32
} else {
1 as i32
} as u32;
let mut row: u32 = from_row;
while row < until_row {
let source_buffer: *mut f32 = &mut *(*image)
.pixels
.offset(row.wrapping_mul((*image).float_stride) as isize)
as *mut f32;
let mut circular_idx: i32 = 0 as i32;
let mut sum: [f32; 4] = [
0 as i32 as f32,
0 as i32 as f32,
0 as i32 as f32,
0 as i32 as f32,
];
let mut count: f32 = 0 as i32 as f32;
let mut ndx: u32 = 0 as i32 as u32;
while ndx < radius {
let factor: f32 = if ndx == radius.wrapping_sub(1u32) {
0.5f32
} else {
1 as i32 as f32
};
let mut ch: u32 = 0 as i32 as u32;
while ch < convolve_channels {
sum[ch as usize] += *source_buffer
.offset(ndx.wrapping_mul(step).wrapping_add(ch) as isize)
* factor;
ch = ch.wrapping_add(1)
}
count += factor;
ndx = ndx.wrapping_add(1)
}
let mut ndx_0: u32 = 0 as i32 as u32;
while ndx_0 < w.wrapping_add(buffer_count).wrapping_sub(write_offset) {
// Pixels
// Calculate new value
if ndx_0 < w {
if ndx_0 < w.wrapping_sub(radius) {
let mut ch_0: u32 = 0 as i32 as u32;
while ch_0 < convolve_channels {
sum[ch_0 as usize] += *source_buffer.offset(
ndx_0
.wrapping_add(radius)
.wrapping_mul(step)
.wrapping_add(ch_0) as isize,
) * 0.5f32;
ch_0 = ch_0.wrapping_add(1)
}
count += 0.5f32
}
if ndx_0 < w.wrapping_sub(radius).wrapping_add(1u32) {
let mut ch_1: u32 = 0 as i32 as u32;
while ch_1 < convolve_channels {
sum[ch_1 as usize] += *source_buffer.offset(
ndx_0
.wrapping_sub(1u32)
.wrapping_add(radius)
.wrapping_mul(step)
.wrapping_add(ch_1) as isize,
) * 0.5f32;
ch_1 = ch_1.wrapping_add(1)
}
count += 0.5f32
}
// Remove trailing items from average
if ndx_0 >= radius {
let mut ch_2: u32 = 0 as i32 as u32;
while ch_2 < convolve_channels {
sum[ch_2 as usize] -= *source_buffer.offset(
ndx_0
.wrapping_sub(radius)
.wrapping_mul(step)
.wrapping_add(ch_2) as isize,
) * 0.5f32;
ch_2 = ch_2.wrapping_add(1)
}
count -= 0.5f32
}
if ndx_0 >= radius.wrapping_add(1u32) {
let mut ch_3: u32 = 0 as i32 as u32;
while ch_3 < convolve_channels {
sum[ch_3 as usize] -= *source_buffer.offset(
ndx_0
.wrapping_sub(1u32)
.wrapping_sub(radius)
.wrapping_mul(step)
.wrapping_add(ch_3) as isize,
) * 0.5f32;
ch_3 = ch_3.wrapping_add(1)
}
count -= 0.5f32
}
}
// Flush old value
if ndx_0 >= buffer_count.wrapping_sub(write_offset) {
memcpy(
&mut *source_buffer.offset(
ndx_0
.wrapping_add(write_offset)
.wrapping_sub(buffer_count)
.wrapping_mul(step) as isize,
) as *mut f32 as *mut libc::c_void,
&mut *buffer.offset((circular_idx as u32).wrapping_mul(ch_used) as isize)
as *mut f32 as *const libc::c_void,
(ch_used as u64).wrapping_mul(::std::mem::size_of::<f32>() as u64),
);
}
// enqueue new value
if ndx_0 < w {
let mut ch_4: u32 = 0 as i32 as u32; // Never exceed half the size of the buffer.
while ch_4 < convolve_channels {
*buffer.offset(
(circular_idx as u32)
.wrapping_mul(ch_used)
.wrapping_add(ch_4) as isize,
) = sum[ch_4 as usize] / count;
ch_4 = ch_4.wrapping_add(1)
}
}
circular_idx = ((circular_idx + 1 as i32) as u32).wrapping_rem(buffer_count) as i32;
ndx_0 = ndx_0.wrapping_add(1)
}
row = row.wrapping_add(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_approx_gaussian_calculate_d(
sigma: f32,
bitmap_width: u32,
) -> u32 {
let mut d: u32 = (1.8799712059732503768118239636082839397552400554574537f32 * sigma + 0.5f32)
.floor() as i32 as u32;
d = d.min(bitmap_width.wrapping_sub(1u32).wrapping_div(2u32));
return d;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_approx_gaussian_buffer_element_count_required(
sigma: f32,
bitmap_width: u32,
) -> u32 {
return flow_bitmap_float_approx_gaussian_calculate_d(sigma, bitmap_width)
.wrapping_mul(2u32)
.wrapping_add(12 as i32 as u32);
// * sizeof(float);
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_approx_gaussian_blur_rows(
context: *mut flow_c,
image: *mut flow_bitmap_float,
sigma: f32,
buffer: *mut f32,
buffer_element_count: usize,
from_row: u32,
row_count: i32,
) -> bool {
// Ensure sigma is large enough for approximation to be accurate.
if sigma < 2 as i32 as f32 {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1173 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
// Ensure the buffer is large enough
if flow_bitmap_float_approx_gaussian_buffer_element_count_required(sigma, (*image).w) as usize
> buffer_element_count
{
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1179 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
// http://www.w3.org/TR/SVG11/filters.html#feGaussianBlur
// For larger values of 's' (s >= 2.0), an approximation can be used :
// Three successive box - blurs build a piece - wise quadratic convolution kernel, which approximates the Gaussian
// kernel to within roughly 3 % .
let d: u32 = flow_bitmap_float_approx_gaussian_calculate_d(sigma, (*image).w);
//... if d is odd, use three box - blurs of size 'd', centered on the output pixel.
if d.wrapping_rem(2u32) > 0 as i32 as u32 {
if !BitmapFloat_boxblur_rows(
context,
image,
d.wrapping_div(2u32),
3 as i32 as u32,
(*image).channels,
buffer,
from_row,
row_count,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1191 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
} else {
// ... if d is even, two box - blurs of size 'd'
// (the first one centered on the pixel boundary between the output pixel and the one to the left,
// the second one centered on the pixel boundary between the output pixel and the one to the right)
// and one box blur of size 'd+1' centered on the output pixel.
if !BitmapFloat_boxblur_misaligned_rows(
context,
image,
d.wrapping_div(2u32),
-(1 as i32),
(*image).channels,
buffer,
from_row,
row_count,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1200 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
if !BitmapFloat_boxblur_misaligned_rows(
context,
image,
d.wrapping_div(2u32),
1 as i32,
(*image).channels,
buffer,
from_row,
row_count,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1204 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
if !BitmapFloat_boxblur_rows(
context,
image,
d.wrapping_div(2u32).wrapping_add(1u32),
1 as i32 as u32,
(*image).channels,
buffer,
from_row,
row_count,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1207 as i32,
(*::std::mem::transmute::<&[u8; 44], &[libc::c_char; 44]>(
b"flow_bitmap_float_approx_gaussian_blur_rows\x00",
))
.as_ptr(),
);
return false;
}
}
return true;
}
#[inline]
unsafe extern "C" fn transpose4x4_SSE(A: *mut f32, B: *mut f32, lda: i32, ldb: i32) {
let mut row1: __m128 = _mm_loadu_ps(&mut *A.offset((0 as i32 * lda) as isize));
let mut row2: __m128 = _mm_loadu_ps(&mut *A.offset((1 as i32 * lda) as isize));
let mut row3: __m128 = _mm_loadu_ps(&mut *A.offset((2 as i32 * lda) as isize));
let mut row4: __m128 = _mm_loadu_ps(&mut *A.offset((3 as i32 * lda) as isize));
let mut tmp3: __m128 = _mm_setzero_ps();
let mut tmp2: __m128 = _mm_setzero_ps();
let mut tmp1: __m128 = _mm_setzero_ps();
let mut tmp0: __m128 = _mm_setzero_ps();
tmp0 = _mm_unpacklo_ps(row1, row2);
tmp2 = _mm_unpacklo_ps(row3, row4);
tmp1 = _mm_unpackhi_ps(row1, row2);
tmp3 = _mm_unpackhi_ps(row3, row4);
row1 = _mm_movelh_ps(tmp0, tmp2);
row2 = _mm_movehl_ps(tmp2, tmp0);
row3 = _mm_movelh_ps(tmp1, tmp3);
row4 = _mm_movehl_ps(tmp3, tmp1);
_mm_storeu_ps(&mut *B.offset((0 as i32 * ldb) as isize), row1);
_mm_storeu_ps(&mut *B.offset((1 as i32 * ldb) as isize), row2);
_mm_storeu_ps(&mut *B.offset((2 as i32 * ldb) as isize), row3);
_mm_storeu_ps(&mut *B.offset((3 as i32 * ldb) as isize), row4);
}
#[inline]
unsafe extern "C" fn transpose_block_SSE4x4(
A: *mut f32,
B: *mut f32,
n: i32,
m: i32,
lda: i32,
ldb: i32,
block_size: i32,
) {
//#pragma omp parallel for collapse(2)
let mut i: i32 = 0 as i32;
while i < n {
let mut j: i32 = 0 as i32;
while j < m {
let max_i2: i32 = if i + block_size < n {
(i) + block_size
} else {
n
};
let max_j2: i32 = if j + block_size < m {
(j) + block_size
} else {
m
};
let mut i2: i32 = i;
while i2 < max_i2 {
let mut j2: i32 = j;
while j2 < max_j2 {
transpose4x4_SSE(
&mut *A.offset((i2 * lda + j2) as isize),
&mut *B.offset((j2 * ldb + i2) as isize),
lda,
ldb,
);
j2 += 4 as i32
}
i2 += 4 as i32
}
j += block_size
}
i += block_size
}
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_transpose(
c: *mut flow_c,
from: *mut flow_bitmap_bgra,
to: *mut flow_bitmap_bgra,
) -> bool {
if (*from).w != (*to).h || (*from).h != (*to).w || (*from).fmt as u32 != (*to).fmt as u32 {
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1252 as i32,
(*::std::mem::transmute::<&[u8; 27], &[libc::c_char; 27]>(
b"flow_bitmap_bgra_transpose\x00",
))
.as_ptr(),
);
return false;
}
if (*from).fmt as u32 != flow_bgra32 as i32 as u32
&& (*from).fmt as u32 != flow_bgr32 as i32 as u32
{
if !flow_bitmap_bgra_transpose_slow(c, from, to) {
flow_context_add_to_callstack(
c,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1258 as i32,
(*::std::mem::transmute::<&[u8; 27], &[libc::c_char; 27]>(
b"flow_bitmap_bgra_transpose\x00",
))
.as_ptr(),
);
return false;
}
return true;
}
// We require 8 when we only need 4 - in case we ever want to enable avx (like if we make it faster)
let min_block_size: i32 = 8 as i32;
// Strides must be multiple of required alignments
if (*from).stride.wrapping_rem(min_block_size as u32) != 0 as i32 as u32
|| (*to).stride.wrapping_rem(min_block_size as u32) != 0 as i32 as u32
{
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1269 as i32,
(*::std::mem::transmute::<&[u8; 27], &[libc::c_char; 27]>(
b"flow_bitmap_bgra_transpose\x00",
))
.as_ptr(),
);
return false;
}
// 256 (1024x1024 bytes) at 18.18ms, 128 at 18.6ms, 64 at 20.4ms, 16 at 25.71ms
let block_size: i32 = 128 as i32;
let cropped_h: i32 = (*from)
.h
.wrapping_sub((*from).h.wrapping_rem(min_block_size as u32))
as i32;
let cropped_w: i32 = (*from)
.w
.wrapping_sub((*from).w.wrapping_rem(min_block_size as u32))
as i32;
transpose_block_SSE4x4(
(*from).pixels as *mut f32,
(*to).pixels as *mut f32,
cropped_h,
cropped_w,
(*from).stride.wrapping_div(4u32) as i32,
(*to).stride.wrapping_div(4u32) as i32,
block_size,
);
// Copy missing bits
let mut x: u32 = cropped_h as u32;
while x < (*to).w {
let mut y: u32 = 0 as i32 as u32;
while y < (*to).h {
*(&mut *(*to).pixels.offset(
x.wrapping_mul(4u32)
.wrapping_add(y.wrapping_mul((*to).stride)) as isize,
) as *mut libc::c_uchar as *mut u32) = *(&mut *(*from).pixels.offset(
x.wrapping_mul((*from).stride)
.wrapping_add(y.wrapping_mul(4u32)) as isize,
) as *mut libc::c_uchar
as *mut u32);
y = y.wrapping_add(1)
}
x = x.wrapping_add(1)
}
let mut x_0: u32 = 0 as i32 as u32;
while x_0 < cropped_h as u32 {
let mut y_0: u32 = cropped_w as u32;
while y_0 < (*to).h {
*(&mut *(*to).pixels.offset(
x_0.wrapping_mul(4u32)
.wrapping_add(y_0.wrapping_mul((*to).stride)) as isize,
) as *mut libc::c_uchar as *mut u32) = *(&mut *(*from).pixels.offset(
x_0.wrapping_mul((*from).stride)
.wrapping_add(y_0.wrapping_mul(4u32)) as isize,
) as *mut libc::c_uchar
as *mut u32);
y_0 = y_0.wrapping_add(1)
}
x_0 = x_0.wrapping_add(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_transpose_slow(
c: *mut flow_c,
from: *mut flow_bitmap_bgra,
to: *mut flow_bitmap_bgra,
) -> bool {
if (*from).w != (*to).h || (*from).h != (*to).w || (*from).fmt as u32 != (*to).fmt as u32 {
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1300 as i32,
(*::std::mem::transmute::<&[u8; 32], &[libc::c_char; 32]>(
b"flow_bitmap_bgra_transpose_slow\x00",
))
.as_ptr(),
);
return false;
}
if (*from).fmt as u32 == flow_bgra32 as i32 as u32
|| (*from).fmt as u32 == flow_bgr32 as i32 as u32
{
let mut x: u32 = 0 as i32 as u32;
while x < (*to).w {
let mut y: u32 = 0 as i32 as u32;
while y < (*to).h {
*(&mut *(*to).pixels.offset(
x.wrapping_mul(4u32)
.wrapping_add(y.wrapping_mul((*to).stride)) as isize,
) as *mut libc::c_uchar as *mut u32) = *(&mut *(*from).pixels.offset(
x.wrapping_mul((*from).stride)
.wrapping_add(y.wrapping_mul(4u32)) as isize,
) as *mut libc::c_uchar
as *mut u32);
y = y.wrapping_add(1)
}
x = x.wrapping_add(1)
}
return true;
} else if (*from).fmt as u32 == flow_bgr24 as i32 as u32 {
let from_stride: i32 = (*from).stride as i32;
let to_stride: i32 = (*to).stride as i32;
let mut x_0: u32 = 0 as i32 as u32;
let mut x_stride: u32 = 0 as i32 as u32;
let mut x_3: u32 = 0 as i32 as u32;
while x_0 < (*to).w {
let mut y_0: u32 = 0 as i32 as u32;
let mut y_stride: u32 = 0 as i32 as u32;
let mut y_3: u32 = 0 as i32 as u32;
while y_0 < (*to).h {
*(*to).pixels.offset(x_3.wrapping_add(y_stride) as isize) =
*(*from).pixels.offset(x_stride.wrapping_add(y_3) as isize);
*(*to)
.pixels
.offset(x_3.wrapping_add(y_stride).wrapping_add(1u32) as isize) = *(*from)
.pixels
.offset(x_stride.wrapping_add(y_3).wrapping_add(1u32) as isize);
*(*to)
.pixels
.offset(x_3.wrapping_add(y_stride).wrapping_add(2u32) as isize) = *(*from)
.pixels
.offset(x_stride.wrapping_add(y_3).wrapping_add(2u32) as isize);
y_0 = y_0.wrapping_add(1);
y_stride = (y_stride as u32).wrapping_add(to_stride as u32) as u32 as u32;
y_3 = (y_3 as u32).wrapping_add(3u32) as u32 as u32
}
x_0 = x_0.wrapping_add(1);
x_stride = (x_stride as u32).wrapping_add(from_stride as u32) as u32 as u32;
x_3 = (x_3 as u32).wrapping_add(3u32) as u32 as u32
}
return true;
} else {
flow_context_set_error_get_message_buffer(
c,
flow_status_code::Invalid_argument,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1325 as i32,
(*::std::mem::transmute::<&[u8; 32], &[libc::c_char; 32]>(
b"flow_bitmap_bgra_transpose_slow\x00",
))
.as_ptr(),
);
return false;
};
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_convert_srgb_to_linear(
context: *mut flow_c,
colorcontext: *mut flow_colorcontext_info,
src: *mut flow_bitmap_bgra,
from_row: u32,
dest: *mut flow_bitmap_float,
dest_row: u32,
row_count: u32,
) -> bool {
if ((*src).w != (*dest).w) as i32 as libc::c_long != 0 {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1339 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_bitmap_float_convert_srgb_to_linear\x00",
))
.as_ptr(),
);
return false;
}
if !(from_row.wrapping_add(row_count) <= (*src).h
&& dest_row.wrapping_add(row_count) <= (*dest).h) as i32 as libc::c_long
!= 0
{
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1345 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_bitmap_float_convert_srgb_to_linear\x00",
))
.as_ptr(),
);
return false;
}
let w = (*src).w;
let units: u32 = w * flow_pixel_format_bytes_per_pixel((*src).fmt);
let from_step: u32 = flow_pixel_format_bytes_per_pixel((*src).fmt);
let from_copy: u32 = flow_pixel_format_channels(flow_effective_pixel_format(src));
let to_step: u32 = (*dest).channels;
let copy_step: u32 = from_copy.min(to_step);
if copy_step != 3 && copy_step != 4 {
flow_snprintf(
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1361 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_bitmap_float_convert_srgb_to_linear\x00",
))
.as_ptr(),
),
FLOW_ERROR_MESSAGE_SIZE as usize,
b"copy_step=%d\x00" as *const u8 as *const libc::c_char,
copy_step,
);
return false;
}
if copy_step == 4 && from_step != 4 && to_step != 4 {
flow_snprintf(
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1368 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_bitmap_float_convert_srgb_to_linear\x00",
))
.as_ptr(),
),
FLOW_ERROR_MESSAGE_SIZE as usize,
b"copy_step=%d, from_step=%d, to_step=%d\x00" as *const u8 as *const libc::c_char,
copy_step,
from_step,
to_step,
);
return false;
}
if copy_step == 4 {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_start: *mut u8 = (*src)
.pixels
.offset(from_row.wrapping_add(row).wrapping_mul((*src).stride) as isize);
let buf: *mut f32 = (*dest).pixels.offset(
(*dest)
.float_stride
.wrapping_mul(row.wrapping_add(dest_row)) as isize,
);
let mut to_x: u32 = 0 as i32 as u32;
let mut bix: u32 = 0 as i32 as u32;
while bix < units {
let alpha: f32 =
*src_start.offset(bix.wrapping_add(3u32) as isize) as f32 / 255.0f32;
*buf.offset(to_x as isize) = alpha
* flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix as isize),
);
*buf.offset(to_x.wrapping_add(1u32) as isize) = alpha
* flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix.wrapping_add(1u32) as isize),
);
*buf.offset(to_x.wrapping_add(2u32) as isize) = alpha
* flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix.wrapping_add(2u32) as isize),
);
*buf.offset(to_x.wrapping_add(3u32) as isize) = alpha;
to_x = (to_x as u32).wrapping_add(4u32) as u32 as u32;
bix = (bix as u32).wrapping_add(4u32) as u32 as u32
}
row = row.wrapping_add(1)
}
} else if from_step == 3 && to_step == 3 {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_start_0: *mut u8 = (*src)
.pixels
.offset(from_row.wrapping_add(row).wrapping_mul((*src).stride) as isize);
let buf: *mut f32 = (*dest).pixels.offset(
(*dest)
.float_stride
.wrapping_mul(row.wrapping_add(dest_row)) as isize,
);
let mut to_x: u32 = 0 as i32 as u32;
let mut bix: u32 = 0 as i32 as u32;
while bix < units {
*buf.offset(to_x as isize) = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start_0.offset(bix as isize),
);
*buf.offset(to_x.wrapping_add(1u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start_0.offset(bix.wrapping_add(1u32) as isize),
);
*buf.offset(to_x.wrapping_add(2u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start_0.offset(bix.wrapping_add(2u32) as isize),
);
to_x = (to_x as u32).wrapping_add(3u32) as u32 as u32;
bix = (bix as u32).wrapping_add(3u32) as u32 as u32
}
row += 1
}
} else if from_step == 4 && to_step == 3 {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_start: *mut u8 = (*src)
.pixels
.offset(from_row.wrapping_add(row).wrapping_mul((*src).stride) as isize);
let buf: *mut f32 = (*dest).pixels.offset(
(*dest)
.float_stride
.wrapping_mul(row.wrapping_add(dest_row)) as isize,
);
let mut to_x: u32 = 0 as i32 as u32;
let mut bix: u32 = 0 as i32 as u32;
while bix < units {
*buf.offset(to_x as isize) = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix as isize),
);
*buf.offset(to_x.wrapping_add(1u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix.wrapping_add(1u32) as isize),
);
*buf.offset(to_x.wrapping_add(2u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix.wrapping_add(2u32) as isize),
);
to_x = (to_x as u32).wrapping_add(3u32) as u32 as u32;
bix = (bix as u32).wrapping_add(4u32) as u32 as u32
}
row += 1
}
} else if from_step == 3 && to_step == 4 {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_start: *mut u8 = (*src)
.pixels
.offset(from_row.wrapping_add(row).wrapping_mul((*src).stride) as isize);
let buf: *mut f32 = (*dest).pixels.offset(
(*dest)
.float_stride
.wrapping_mul(row.wrapping_add(dest_row)) as isize,
);
let mut to_x: u32 = 0 as i32 as u32;
let mut bix: u32 = 0 as i32 as u32;
while bix < units {
*buf.offset(to_x as isize) = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix as isize),
);
*buf.offset(to_x.wrapping_add(1u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix.wrapping_add(1u32) as isize),
);
*buf.offset(to_x.wrapping_add(2u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix.wrapping_add(2u32) as isize),
);
to_x = (to_x as u32).wrapping_add(4u32) as u32 as u32;
bix = (bix as u32).wrapping_add(3u32) as u32 as u32
}
row += 1
}
} else if from_step == 4 && to_step == 4 {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_start: *mut u8 = (*src)
.pixels
.offset(from_row.wrapping_add(row).wrapping_mul((*src).stride) as isize);
let buf: *mut f32 = (*dest).pixels.offset(
(*dest)
.float_stride
.wrapping_mul(row.wrapping_add(dest_row)) as isize,
);
let mut to_x: u32 = 0 as i32 as u32;
let mut bix: u32 = 0 as i32 as u32;
while bix < units {
*buf.offset(to_x as isize) = flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix as isize),
);
*buf.offset(to_x.wrapping_add(1u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix.wrapping_add(1u32) as isize),
);
*buf.offset(to_x.wrapping_add(2u32) as isize) =
flow_colorcontext_srgb_to_floatspace(
colorcontext,
*src_start.offset(bix.wrapping_add(2u32) as isize),
);
to_x = (to_x as u32).wrapping_add(4u32) as u32 as u32;
bix = (bix as u32).wrapping_add(4u32) as u32 as u32
}
row += 1
}
} else {
flow_snprintf(
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1411 as i32,
(*::std::mem::transmute::<&[u8; 41], &[libc::c_char; 41]>(
b"flow_bitmap_float_convert_srgb_to_linear\x00",
))
.as_ptr(),
),
FLOW_ERROR_MESSAGE_SIZE as usize,
b"copy_step=%d, from_step=%d, to_step=%d\x00" as *const u8 as *const libc::c_char,
copy_step,
from_step,
to_step,
);
return false;
}
return true;
}
/*
static void unpack24bitRow(u32 width, unsigned char* sourceLine, unsigned char* destArray){
for (u32 i = 0; i < width; i++){
memcpy(destArray + i * 4, sourceLine + i * 3, 3);
destArray[i * 4 + 3] = 255;
}
}
*/
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_flip_vertical(
context: *mut flow_c,
b: *mut flow_bitmap_bgra,
) -> bool {
let swap: *mut libc::c_void = flow_context_malloc(
context,
(*b).stride as usize,
::std::mem::transmute::<libc::intptr_t, flow_destructor_function>(NULL as libc::intptr_t),
context as *mut libc::c_void,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1430 as i32,
);
if swap.is_null() {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Out_of_memory,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1432 as i32,
(*::std::mem::transmute::<&[u8; 31], &[libc::c_char; 31]>(
b"flow_bitmap_bgra_flip_vertical\x00",
))
.as_ptr(),
);
return false;
}
// Dont' copy the full stride (padding), it could be windowed!
// Todo: try multiple swap rows? 5ms isn't bad, but could be better
let row_length: u32 = (*b).stride.min(
(*b).w
.wrapping_mul(flow_pixel_format_bytes_per_pixel((*b).fmt)),
);
let mut i: u32 = 0 as i32 as u32;
while i < (*b).h.wrapping_div(2u32) {
let top: *mut libc::c_void =
(*b).pixels.offset(i.wrapping_mul((*b).stride) as isize) as *mut libc::c_void;
let bottom: *mut libc::c_void = (*b).pixels.offset(
(*b).h
.wrapping_sub(1u32)
.wrapping_sub(i)
.wrapping_mul((*b).stride) as isize,
) as *mut libc::c_void;
memcpy(swap, top, row_length as u64);
memcpy(top, bottom, row_length as u64);
memcpy(bottom, swap, row_length as u64);
i = i.wrapping_add(1)
}
flow_deprecated_free(
context,
swap,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1445 as i32,
);
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_flip_horizontal(
_context: *mut flow_c,
b: *mut flow_bitmap_bgra,
) -> bool {
if (*b).fmt as u32 == flow_bgra32 as i32 as u32 || (*b).fmt as u32 == flow_bgr32 as i32 as u32 {
// 12ms simple
let mut y: u32 = 0 as i32 as u32;
while y < (*b).h {
let mut left: *mut u32 =
(*b).pixels.offset(y.wrapping_mul((*b).stride) as isize) as *mut u32;
let mut right: *mut u32 = (*b)
.pixels
.offset(y.wrapping_mul((*b).stride) as isize)
.offset((4u32).wrapping_mul((*b).w.wrapping_sub(1u32)) as isize)
as *mut u32;
while left < right {
let swap: u32 = *left;
*left = *right;
*right = swap;
left = left.offset(1);
right = right.offset(-1)
}
y = y.wrapping_add(1)
}
} else if (*b).fmt as u32 == flow_bgr24 as i32 as u32 {
let mut swap_0: [u32; 4] = [0; 4];
// Dont' copy the full stride (padding), it could be windowed!
let mut y_0: u32 = 0 as i32 as u32;
while y_0 < (*b).h {
let mut left_0: *mut u8 = (*b).pixels.offset(y_0.wrapping_mul((*b).stride) as isize);
let mut right_0: *mut u8 = (*b)
.pixels
.offset(y_0.wrapping_mul((*b).stride) as isize)
.offset((3u32).wrapping_mul((*b).w.wrapping_sub(1u32)) as isize);
while left_0 < right_0 {
memcpy(
&mut swap_0 as *mut [u32; 4] as *mut libc::c_void,
left_0 as *const libc::c_void,
3 as i32 as u64,
);
memcpy(
left_0 as *mut libc::c_void,
right_0 as *const libc::c_void,
3 as i32 as u64,
);
memcpy(
right_0 as *mut libc::c_void,
&mut swap_0 as *mut [u32; 4] as *const libc::c_void,
3 as i32 as u64,
);
left_0 = left_0.offset(3 as i32 as isize);
right_0 = right_0.offset(-(3 as i32 as isize))
}
y_0 = y_0.wrapping_add(1)
}
} else {
let mut swap_1: [u32; 4] = [0; 4];
// Dont' copy the full stride (padding), it could be windowed!
let mut y_1: u32 = 0 as i32 as u32;
while y_1 < (*b).h {
let mut left_1: *mut u8 = (*b).pixels.offset(y_1.wrapping_mul((*b).stride) as isize);
let mut right_1: *mut u8 = (*b)
.pixels
.offset(y_1.wrapping_mul((*b).stride) as isize)
.offset(
flow_pixel_format_bytes_per_pixel((*b).fmt)
.wrapping_mul((*b).w.wrapping_sub(1u32)) as isize,
);
while left_1 < right_1 {
memcpy(
&mut swap_1 as *mut [u32; 4] as *mut libc::c_void,
left_1 as *const libc::c_void,
flow_pixel_format_bytes_per_pixel((*b).fmt) as u64,
);
memcpy(
left_1 as *mut libc::c_void,
right_1 as *const libc::c_void,
flow_pixel_format_bytes_per_pixel((*b).fmt) as u64,
);
memcpy(
right_1 as *mut libc::c_void,
&mut swap_1 as *mut [u32; 4] as *const libc::c_void,
flow_pixel_format_bytes_per_pixel((*b).fmt) as u64,
);
left_1 = left_1.offset(flow_pixel_format_bytes_per_pixel((*b).fmt) as isize);
right_1 = right_1.offset(-(flow_pixel_format_bytes_per_pixel((*b).fmt) as isize))
}
y_1 = y_1.wrapping_add(1)
}
}
return true;
}
unsafe extern "C" fn flow_bitmap_float_blend_matte(
_context: *mut flow_c,
colorcontext: *mut flow_colorcontext_info,
src: *mut flow_bitmap_float,
from_row: u32,
row_count: u32,
matte: *const u8,
) -> bool {
// We assume that matte is BGRA, regardless.
let matte_a: f32 = *matte.offset(3 as i32 as isize) as f32 / 255.0f32;
let b: f32 = flow_colorcontext_srgb_to_floatspace(colorcontext, *matte.offset(0));
let g: f32 = flow_colorcontext_srgb_to_floatspace(colorcontext, *matte.offset(1));
let r: f32 = flow_colorcontext_srgb_to_floatspace(colorcontext, *matte.offset(2));
let mut row: u32 = from_row;
while row < from_row.wrapping_add(row_count) {
let start_ix: u32 = row.wrapping_mul((*src).float_stride);
let end_ix: u32 = start_ix.wrapping_add((*src).w.wrapping_mul((*src).channels));
let mut ix: u32 = start_ix;
while ix < end_ix {
let src_a: f32 = *(*src).pixels.offset(ix.wrapping_add(3u32) as isize);
let a: f32 = (1.0f32 - src_a) * matte_a;
let final_alpha: f32 = src_a + a;
*(*src).pixels.offset(ix as isize) =
(*(*src).pixels.offset(ix as isize) + b * a) / final_alpha;
*(*src).pixels.offset(ix.wrapping_add(1u32) as isize) =
(*(*src).pixels.offset(ix.wrapping_add(1u32) as isize) + g * a) / final_alpha;
*(*src).pixels.offset(ix.wrapping_add(2u32) as isize) =
(*(*src).pixels.offset(ix.wrapping_add(2u32) as isize) + r * a) / final_alpha;
*(*src).pixels.offset(ix.wrapping_add(3u32) as isize) = final_alpha;
ix = (ix as u32).wrapping_add(4u32) as u32 as u32
}
row = row.wrapping_add(1)
}
// Ensure alpha is demultiplied
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_demultiply_alpha(
_context: *mut flow_c,
src: *mut flow_bitmap_float,
from_row: u32,
row_count: u32,
) -> bool {
let mut row: u32 = from_row;
while row < from_row.wrapping_add(row_count) {
let start_ix: u32 = row.wrapping_mul((*src).float_stride);
let end_ix: u32 = start_ix.wrapping_add((*src).w.wrapping_mul((*src).channels));
let mut ix: u32 = start_ix;
while ix < end_ix {
let alpha: f32 = *(*src).pixels.offset(ix.wrapping_add(3u32) as isize);
if alpha > 0 as i32 as f32 {
*(*src).pixels.offset(ix as isize) /= alpha;
*(*src).pixels.offset(ix.wrapping_add(1u32) as isize) /= alpha;
*(*src).pixels.offset(ix.wrapping_add(2u32) as isize) /= alpha
}
ix = (ix as u32).wrapping_add(4u32) as u32 as u32
}
row = row.wrapping_add(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_copy_linear_over_srgb(
_context: *mut flow_c,
colorcontext: *mut flow_colorcontext_info,
src: *mut flow_bitmap_float,
from_row: u32,
dest: *mut flow_bitmap_bgra,
dest_row: u32,
row_count: u32,
from_col: u32,
col_count: u32,
transpose: bool,
) -> bool {
let dest_bytes_pp: u32 = flow_pixel_format_bytes_per_pixel((*dest).fmt);
let srcitems: u32 = from_col
.wrapping_add(col_count)
.min((*src).w)
.wrapping_mul((*src).channels);
let dest_fmt: flow_pixel_format = flow_effective_pixel_format(dest);
let ch: u32 = (*src).channels;
let copy_alpha: bool = dest_fmt as u32 == flow_bgra32 as i32 as u32
&& ch == 4 as i32 as u32
&& (*src).alpha_meaningful as i32 != 0;
let clean_alpha: bool = !copy_alpha && dest_fmt as u32 == flow_bgra32 as i32 as u32;
let dest_row_stride: u32 = if transpose as i32 != 0 {
dest_bytes_pp
} else {
(*dest).stride
};
let dest_pixel_stride: u32 = if transpose as i32 != 0 {
(*dest).stride
} else {
dest_bytes_pp
};
if dest_pixel_stride == 4 as i32 as u32 {
if ch == 3 as i32 as u32 {
if copy_alpha && !clean_alpha {
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
let src_row: *mut f32 =
(*src)
.pixels
.offset(row.wrapping_add(from_row).wrapping_mul((*src).float_stride)
as isize);
let mut dest_row_bytes: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix: u32 = from_col.wrapping_mul(3u32);
while ix < srcitems {
*dest_row_bytes.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row.offset(ix as isize),
);
*dest_row_bytes.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row.offset(ix.wrapping_add(1u32) as isize),
);
*dest_row_bytes.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row.offset(ix.wrapping_add(2u32) as isize),
);
*dest_row_bytes.offset(3 as i32 as isize) = uchar_clamp_ff(
*src_row.offset(ix.wrapping_add(3u32) as isize) * 255.0f32,
);
dest_row_bytes = dest_row_bytes.offset(4 as i32 as isize);
ix = (ix as u32).wrapping_add(3u32) as u32 as u32
}
row = row.wrapping_add(1)
}
}
if !copy_alpha && !clean_alpha {
let mut row_0: u32 = 0 as i32 as u32;
while row_0 < row_count {
let src_row_0: *mut f32 = (*src).pixels.offset(
row_0
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_0: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_0).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix_0: u32 = from_col.wrapping_mul(3u32);
while ix_0 < srcitems {
*dest_row_bytes_0.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_0.offset(ix_0 as isize),
);
*dest_row_bytes_0.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_0.offset(ix_0.wrapping_add(1u32) as isize),
);
*dest_row_bytes_0.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_0.offset(ix_0.wrapping_add(2u32) as isize),
);
dest_row_bytes_0 = dest_row_bytes_0.offset(4 as i32 as isize);
ix_0 = (ix_0 as u32).wrapping_add(3u32) as u32 as u32
}
row_0 = row_0.wrapping_add(1)
}
}
if !copy_alpha && clean_alpha {
let mut row_1: u32 = 0 as i32 as u32;
while row_1 < row_count {
let src_row_1: *mut f32 = (*src).pixels.offset(
row_1
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_1: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_1).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix_1: u32 = from_col.wrapping_mul(3u32);
while ix_1 < srcitems {
*dest_row_bytes_1.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_1.offset(ix_1 as isize),
);
*dest_row_bytes_1.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_1.offset(ix_1.wrapping_add(1u32) as isize),
);
*dest_row_bytes_1.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_1.offset(ix_1.wrapping_add(2u32) as isize),
);
*dest_row_bytes_1.offset(3 as i32 as isize) = 0xff as i32 as u8;
dest_row_bytes_1 = dest_row_bytes_1.offset(4 as i32 as isize);
ix_1 = (ix_1 as u32).wrapping_add(3u32) as u32 as u32
}
row_1 = row_1.wrapping_add(1)
}
}
}
if ch == 4 as i32 as u32 {
if copy_alpha && !clean_alpha {
let mut row_2: u32 = 0 as i32 as u32;
while row_2 < row_count {
let src_row_2: *mut f32 = (*src).pixels.offset(
row_2
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_2: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_2).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix_2: u32 = from_col.wrapping_mul(4u32);
while ix_2 < srcitems {
*dest_row_bytes_2.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_2.offset(ix_2 as isize),
);
*dest_row_bytes_2.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_2.offset(ix_2.wrapping_add(1u32) as isize),
);
*dest_row_bytes_2.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_2.offset(ix_2.wrapping_add(2u32) as isize),
);
*dest_row_bytes_2.offset(3 as i32 as isize) = uchar_clamp_ff(
*src_row_2.offset(ix_2.wrapping_add(3u32) as isize) * 255.0f32,
);
dest_row_bytes_2 = dest_row_bytes_2.offset(4 as i32 as isize);
ix_2 = (ix_2 as u32).wrapping_add(4u32) as u32 as u32
}
row_2 = row_2.wrapping_add(1)
}
}
if !copy_alpha && !clean_alpha {
let mut row_3: u32 = 0 as i32 as u32;
while row_3 < row_count {
let src_row_3: *mut f32 = (*src).pixels.offset(
row_3
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_3: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_3).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix_3: u32 = from_col.wrapping_mul(4u32);
while ix_3 < srcitems {
*dest_row_bytes_3.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_3.offset(ix_3 as isize),
);
*dest_row_bytes_3.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_3.offset(ix_3.wrapping_add(1u32) as isize),
);
*dest_row_bytes_3.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_3.offset(ix_3.wrapping_add(2u32) as isize),
);
dest_row_bytes_3 = dest_row_bytes_3.offset(4 as i32 as isize);
ix_3 = (ix_3 as u32).wrapping_add(4u32) as u32 as u32
}
row_3 = row_3.wrapping_add(1)
}
}
if !copy_alpha && clean_alpha {
let mut row_4: u32 = 0 as i32 as u32;
while row_4 < row_count {
let src_row_4: *mut f32 = (*src).pixels.offset(
row_4
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_4: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_4).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(4u32) as isize);
let mut ix_4: u32 = from_col.wrapping_mul(4u32);
while ix_4 < srcitems {
*dest_row_bytes_4.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_4.offset(ix_4 as isize),
);
*dest_row_bytes_4.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_4.offset(ix_4.wrapping_add(1u32) as isize),
);
*dest_row_bytes_4.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_4.offset(ix_4.wrapping_add(2u32) as isize),
);
*dest_row_bytes_4.offset(3 as i32 as isize) = 0xff as i32 as u8;
dest_row_bytes_4 = dest_row_bytes_4.offset(4 as i32 as isize);
ix_4 = (ix_4 as u32).wrapping_add(4u32) as u32 as u32
}
row_4 = row_4.wrapping_add(1)
}
}
}
} else {
if ch == 3 as i32 as u32 {
if copy_alpha && !clean_alpha {
let mut row_5: u32 = 0 as i32 as u32;
while row_5 < row_count {
let src_row_5: *mut f32 = (*src).pixels.offset(
row_5
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_5: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_5).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_5: u32 = from_col.wrapping_mul(3u32);
while ix_5 < srcitems {
*dest_row_bytes_5.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_5.offset(ix_5 as isize),
);
*dest_row_bytes_5.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_5.offset(ix_5.wrapping_add(1u32) as isize),
);
*dest_row_bytes_5.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_5.offset(ix_5.wrapping_add(2u32) as isize),
);
*dest_row_bytes_5.offset(3 as i32 as isize) = uchar_clamp_ff(
*src_row_5.offset(ix_5.wrapping_add(3u32) as isize) * 255.0f32,
);
dest_row_bytes_5 = dest_row_bytes_5.offset(dest_pixel_stride as isize);
ix_5 = (ix_5 as u32).wrapping_add(3u32) as u32 as u32
}
row_5 = row_5.wrapping_add(1)
}
}
if !copy_alpha && !clean_alpha {
let mut row_6: u32 = 0 as i32 as u32;
while row_6 < row_count {
let src_row_6: *mut f32 = (*src).pixels.offset(
row_6
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_6: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_6).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_6: u32 = from_col.wrapping_mul(3u32);
while ix_6 < srcitems {
*dest_row_bytes_6.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_6.offset(ix_6 as isize),
);
*dest_row_bytes_6.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_6.offset(ix_6.wrapping_add(1u32) as isize),
);
*dest_row_bytes_6.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_6.offset(ix_6.wrapping_add(2u32) as isize),
);
dest_row_bytes_6 = dest_row_bytes_6.offset(dest_pixel_stride as isize);
ix_6 = (ix_6 as u32).wrapping_add(3u32) as u32 as u32
}
row_6 = row_6.wrapping_add(1)
}
}
if !copy_alpha && clean_alpha {
let mut row_7: u32 = 0 as i32 as u32;
while row_7 < row_count {
let src_row_7: *mut f32 = (*src).pixels.offset(
row_7
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_7: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_7).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_7: u32 = from_col.wrapping_mul(3u32);
while ix_7 < srcitems {
*dest_row_bytes_7.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_7.offset(ix_7 as isize),
);
*dest_row_bytes_7.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_7.offset(ix_7.wrapping_add(1u32) as isize),
);
*dest_row_bytes_7.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_7.offset(ix_7.wrapping_add(2u32) as isize),
);
*dest_row_bytes_7.offset(3 as i32 as isize) = 0xff as i32 as u8;
dest_row_bytes_7 = dest_row_bytes_7.offset(dest_pixel_stride as isize);
ix_7 = (ix_7 as u32).wrapping_add(3u32) as u32 as u32
}
row_7 = row_7.wrapping_add(1)
}
}
}
if ch == 4 as i32 as u32 {
if copy_alpha && !clean_alpha {
let mut row_8: u32 = 0 as i32 as u32;
while row_8 < row_count {
let src_row_8: *mut f32 = (*src).pixels.offset(
row_8
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_8: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_8).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_8: u32 = from_col.wrapping_mul(4u32);
while ix_8 < srcitems {
*dest_row_bytes_8.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_8.offset(ix_8 as isize),
);
*dest_row_bytes_8.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_8.offset(ix_8.wrapping_add(1u32) as isize),
);
*dest_row_bytes_8.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_8.offset(ix_8.wrapping_add(2u32) as isize),
);
*dest_row_bytes_8.offset(3 as i32 as isize) = uchar_clamp_ff(
*src_row_8.offset(ix_8.wrapping_add(3u32) as isize) * 255.0f32,
);
dest_row_bytes_8 = dest_row_bytes_8.offset(dest_pixel_stride as isize);
ix_8 = (ix_8 as u32).wrapping_add(4u32) as u32 as u32
}
row_8 = row_8.wrapping_add(1)
}
}
if !copy_alpha && !clean_alpha {
let mut row_9: u32 = 0 as i32 as u32;
while row_9 < row_count {
let src_row_9: *mut f32 = (*src).pixels.offset(
row_9
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_9: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row_9).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_9: u32 = from_col.wrapping_mul(4u32);
while ix_9 < srcitems {
*dest_row_bytes_9.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_9.offset(ix_9 as isize),
);
*dest_row_bytes_9.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_9.offset(ix_9.wrapping_add(1u32) as isize),
);
*dest_row_bytes_9.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_9.offset(ix_9.wrapping_add(2u32) as isize),
);
dest_row_bytes_9 = dest_row_bytes_9.offset(dest_pixel_stride as isize);
ix_9 = (ix_9 as u32).wrapping_add(4u32) as u32 as u32
}
row_9 = row_9.wrapping_add(1)
}
}
if !copy_alpha && clean_alpha {
let mut row_10: u32 = 0 as i32 as u32;
while row_10 < row_count {
let src_row_10: *mut f32 = (*src).pixels.offset(
row_10
.wrapping_add(from_row)
.wrapping_mul((*src).float_stride) as isize,
);
let mut dest_row_bytes_10: *mut u8 =
(*dest)
.pixels
.offset(dest_row.wrapping_add(row_10).wrapping_mul(dest_row_stride)
as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix_10: u32 = from_col.wrapping_mul(4u32);
while ix_10 < srcitems {
*dest_row_bytes_10.offset(0) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_10.offset(ix_10 as isize),
);
*dest_row_bytes_10.offset(1) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_10.offset(ix_10.wrapping_add(1u32) as isize),
);
*dest_row_bytes_10.offset(2) = flow_colorcontext_floatspace_to_srgb(
colorcontext,
*src_row_10.offset(ix_10.wrapping_add(2u32) as isize),
);
*dest_row_bytes_10.offset(3 as i32 as isize) = 0xff as i32 as u8;
dest_row_bytes_10 = dest_row_bytes_10.offset(dest_pixel_stride as isize);
ix_10 = (ix_10 as u32).wrapping_add(4u32) as u32 as u32
}
row_10 = row_10.wrapping_add(1)
}
}
}
}
return true;
}
unsafe extern "C" fn BitmapFloat_compose_linear_over_srgb(
_context: *mut flow_c,
colorcontext: *mut flow_colorcontext_info,
src: *mut flow_bitmap_float,
from_row: u32,
dest: *mut flow_bitmap_bgra,
dest_row: u32,
row_count: u32,
from_col: u32,
col_count: u32,
transpose: bool,
) -> bool {
let dest_bytes_pp: u32 = flow_pixel_format_bytes_per_pixel((*dest).fmt);
let dest_row_stride: u32 = if transpose as i32 != 0 {
dest_bytes_pp
} else {
(*dest).stride
};
let dest_pixel_stride: u32 = if transpose as i32 != 0 {
(*dest).stride
} else {
dest_bytes_pp
};
let srcitems: u32 = from_col
.wrapping_add(col_count)
.min((*src).w)
.wrapping_mul((*src).channels);
let ch: u32 = (*src).channels;
let dest_effective_format: flow_pixel_format = flow_effective_pixel_format(dest);
let dest_alpha: bool = dest_effective_format as u32 == flow_bgra32 as i32 as u32;
let dest_alpha_index: u8 = if dest_alpha as i32 != 0 {
3 as i32
} else {
0 as i32
} as u8;
let dest_alpha_to_float_coeff: f32 = if dest_alpha as i32 != 0 {
(1.0f32) / 255.0f32
} else {
0.0f32
};
let dest_alpha_to_float_offset: f32 = if dest_alpha as i32 != 0 {
0.0f32
} else {
1.0f32
};
let mut row: u32 = 0 as i32 as u32;
while row < row_count {
// const float * const __restrict src_row = src->pixels + (row + from_row) * src->float_stride;
let src_row: *mut f32 = (*src)
.pixels
.offset(row.wrapping_add(from_row).wrapping_mul((*src).float_stride) as isize);
let mut dest_row_bytes: *mut u8 = (*dest)
.pixels
.offset(dest_row.wrapping_add(row).wrapping_mul(dest_row_stride) as isize)
.offset(from_col.wrapping_mul(dest_pixel_stride) as isize);
let mut ix: u32 = from_col.wrapping_mul(ch);
while ix < srcitems {
let dest_b: u8 = *dest_row_bytes.offset(0);
let dest_g: u8 = *dest_row_bytes.offset(1);
let dest_r: u8 = *dest_row_bytes.offset(2);
let dest_a: u8 = *dest_row_bytes.offset(dest_alpha_index as isize);
let src_b: f32 = *src_row.offset(ix.wrapping_add(0u32) as isize);
let src_g: f32 = *src_row.offset(ix.wrapping_add(1u32) as isize);
let src_r: f32 = *src_row.offset(ix.wrapping_add(2u32) as isize);
let src_a: f32 = *src_row.offset(ix.wrapping_add(3u32) as isize);
let a: f32 = (1.0f32 - src_a)
* (dest_alpha_to_float_coeff * dest_a as i32 as f32 + dest_alpha_to_float_offset);
let b: f32 = flow_colorcontext_srgb_to_floatspace(colorcontext, dest_b) * a + src_b;
let g: f32 = flow_colorcontext_srgb_to_floatspace(colorcontext, dest_g) * a + src_g;
let r: f32 = flow_colorcontext_srgb_to_floatspace(colorcontext, dest_r) * a + src_r;
let final_alpha: f32 = src_a + a;
*dest_row_bytes.offset(0) =
flow_colorcontext_floatspace_to_srgb(colorcontext, b / final_alpha);
*dest_row_bytes.offset(1) =
flow_colorcontext_floatspace_to_srgb(colorcontext, g / final_alpha);
*dest_row_bytes.offset(2) =
flow_colorcontext_floatspace_to_srgb(colorcontext, r / final_alpha);
if dest_alpha {
*dest_row_bytes.offset(3 as i32 as isize) =
uchar_clamp_ff(final_alpha * 255 as i32 as f32)
}
// TODO: split out 4 and 3 so compiler can vectorize maybe?
dest_row_bytes = dest_row_bytes.offset(dest_pixel_stride as isize);
ix = (ix as u32).wrapping_add(ch) as u32 as u32
}
row = row.wrapping_add(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_composite_linear_over_srgb(
context: *mut flow_c,
colorcontext: *mut flow_colorcontext_info,
src_mut: *mut flow_bitmap_float,
from_row: u32,
dest: *mut flow_bitmap_bgra,
dest_row: u32,
row_count: u32,
transpose: bool,
) -> bool {
if if transpose as i32 != 0 {
((*src_mut).w != (*dest).h) as i32
} else {
((*src_mut).w != (*dest).w) as i32
} != 0
{
// TODO: Add more bounds checks
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1699 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
);
return false;
}
if (*dest).compositing_mode as u32 == flow_bitmap_compositing_blend_with_self as i32 as u32
&& (*src_mut).alpha_meaningful as i32 != 0
&& (*src_mut).channels == 4 as i32 as u32
{
if !(*src_mut).alpha_premultiplied {
// Something went wrong. We should always have alpha premultiplied.
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1706 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
);
return false;
}
// Compose
if !BitmapFloat_compose_linear_over_srgb(
context,
colorcontext,
src_mut,
from_row,
dest,
dest_row,
row_count,
0 as i32 as u32,
(*src_mut).w,
transpose,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1712 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
);
return false;
}
} else {
if (*src_mut).channels == 4 as i32 as u32 && (*src_mut).alpha_meaningful as i32 != 0 {
let mut demultiply: bool = (*src_mut).alpha_premultiplied;
if (*dest).compositing_mode as u32
== flow_bitmap_compositing_blend_with_matte as i32 as u32
{
if !flow_bitmap_float_blend_matte(
context,
colorcontext,
src_mut,
from_row,
row_count,
(*dest).matte_color.as_mut_ptr(),
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1722 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
);
return false;
}
demultiply = false
}
if demultiply {
// Demultiply before copy
if !flow_bitmap_float_demultiply_alpha(context, src_mut, from_row, row_count) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1730 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
);
return false;
}
}
}
// Copy/overwrite
if !flow_bitmap_float_copy_linear_over_srgb(
context,
colorcontext,
src_mut,
from_row,
dest,
dest_row,
row_count,
0 as i32 as u32,
(*src_mut).w,
transpose,
) {
flow_context_add_to_callstack(
context,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1738 as i32,
(*::std::mem::transmute::<&[u8; 45], &[libc::c_char; 45]>(
b"flow_bitmap_float_composite_linear_over_srgb\x00",
))
.as_ptr(),
); // Don't access rows past the end of the bitmap
return false;
}
} // This algorithm can't handle padding, if present
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_linear_to_luv_rows(
context: *mut flow_c,
bit: *mut flow_bitmap_float,
start_row: u32,
row_count: u32,
) -> bool {
if !(start_row.wrapping_add(row_count) <= (*bit).h) {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1751 as i32,
(*::std::mem::transmute::<&[u8; 37], &[libc::c_char; 37]>(
b"flow_bitmap_float_linear_to_luv_rows\x00",
))
.as_ptr(),
);
return false;
}
if (*bit).w.wrapping_mul((*bit).channels) != (*bit).float_stride {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1755 as i32,
(*::std::mem::transmute::<&[u8; 37], &[libc::c_char; 37]>(
b"flow_bitmap_float_linear_to_luv_rows\x00",
))
.as_ptr(),
);
return false;
}
let start_at: *mut f32 = (*bit)
.pixels
.offset((*bit).float_stride.wrapping_mul(start_row) as isize);
let end_at: *const f32 = (*bit).pixels.offset(
(*bit)
.float_stride
.wrapping_mul(start_row.wrapping_add(row_count)) as isize,
);
let mut pix: *mut f32 = start_at;
while pix < end_at as *mut f32 {
linear_to_luv(pix);
pix = pix.offset(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_luv_to_linear_rows(
context: *mut flow_c,
bit: *mut flow_bitmap_float,
start_row: u32,
row_count: u32,
) -> bool {
if !(start_row.wrapping_add(row_count) <= (*bit).h) {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1772 as i32,
(*::std::mem::transmute::<&[u8; 37], &[libc::c_char; 37]>(
b"flow_bitmap_float_luv_to_linear_rows\x00",
))
.as_ptr(),
);
return false;
}
if (*bit).w.wrapping_mul((*bit).channels) != (*bit).float_stride {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1776 as i32,
(*::std::mem::transmute::<&[u8; 37], &[libc::c_char; 37]>(
b"flow_bitmap_float_luv_to_linear_rows\x00",
))
.as_ptr(),
);
return false;
}
let start_at: *mut f32 = (*bit)
.pixels
.offset((*bit).float_stride.wrapping_mul(start_row) as isize);
let end_at: *const f32 = (*bit).pixels.offset(
(*bit)
.float_stride
.wrapping_mul(start_row.wrapping_add(row_count)) as isize,
);
let mut pix: *mut f32 = start_at;
while pix < end_at as *mut f32 {
luv_to_linear(pix);
pix = pix.offset(1)
}
return true;
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_apply_color_matrix(
context: *mut flow_c,
bmp: *mut flow_bitmap_bgra,
row: u32,
count: u32,
m: *const *mut f32,
) -> bool {
let stride: u32 = (*bmp).stride;
let ch: u32 = flow_pixel_format_bytes_per_pixel((*bmp).fmt);
let w: u32 = (*bmp).w;
let h: u32 = row.wrapping_add(count).min((*bmp).h);
let m40: f32 = *(*m.offset(4 as i32 as isize)).offset(0) * 255.0f32;
let m41: f32 = *(*m.offset(4 as i32 as isize)).offset(1) * 255.0f32;
let m42: f32 = *(*m.offset(4 as i32 as isize)).offset(2) * 255.0f32;
let m43: f32 = *(*m.offset(4 as i32 as isize)).offset(3 as i32 as isize) * 255.0f32;
if ch == 4 as i32 as u32 {
let mut y: u32 = row;
while y < h {
let mut x: u32 = 0 as i32 as u32;
while x < w {
let data: *mut u8 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y) as isize)
.offset(x.wrapping_mul(ch) as isize);
let r: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(0) * *data.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(0) * *data.offset(1) as i32 as f32
+ *(*m.offset(2)).offset(0) * *data.offset(0) as i32 as f32
+ *(*m.offset(3 as i32 as isize)).offset(0)
* *data.offset(3 as i32 as isize) as i32 as f32
+ m40,
);
let g: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(1) * *data.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(1) * *data.offset(1) as i32 as f32
+ *(*m.offset(2)).offset(1) * *data.offset(0) as i32 as f32
+ *(*m.offset(3 as i32 as isize)).offset(1)
* *data.offset(3 as i32 as isize) as i32 as f32
+ m41,
);
let b: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(2) * *data.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(2) * *data.offset(1) as i32 as f32
+ *(*m.offset(2)).offset(2) * *data.offset(0) as i32 as f32
+ *(*m.offset(3 as i32 as isize)).offset(2)
* *data.offset(3 as i32 as isize) as i32 as f32
+ m42,
);
let a: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(3 as i32 as isize) * *data.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(3 as i32 as isize) * *data.offset(1) as i32 as f32
+ *(*m.offset(2)).offset(3 as i32 as isize) * *data.offset(0) as i32 as f32
+ *(*m.offset(3 as i32 as isize)).offset(3 as i32 as isize)
* *data.offset(3 as i32 as isize) as i32 as f32
+ m43,
);
let newdata: *mut u8 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y) as isize)
.offset(x.wrapping_mul(ch) as isize);
*newdata.offset(0) = b;
*newdata.offset(1) = g;
*newdata.offset(2) = r;
*newdata.offset(3 as i32 as isize) = a;
x = x.wrapping_add(1)
}
y = y.wrapping_add(1)
}
} else if ch == 3 as i32 as u32 {
let mut y_0: u32 = row;
while y_0 < h {
let mut x_0: u32 = 0 as i32 as u32;
while x_0 < w {
let data_0: *mut libc::c_uchar = (*bmp)
.pixels
.offset(stride.wrapping_mul(y_0) as isize)
.offset(x_0.wrapping_mul(ch) as isize);
let r_0: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(0) * *data_0.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(0) * *data_0.offset(1) as i32 as f32
+ *(*m.offset(2)).offset(0) * *data_0.offset(0) as i32 as f32
+ m40,
);
let g_0: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(1) * *data_0.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(1) * *data_0.offset(1) as i32 as f32
+ *(*m.offset(2)).offset(1) * *data_0.offset(0) as i32 as f32
+ m41,
);
let b_0: u8 = uchar_clamp_ff(
*(*m.offset(0)).offset(2) * *data_0.offset(2) as i32 as f32
+ *(*m.offset(1)).offset(2) * *data_0.offset(1) as i32 as f32
+ *(*m.offset(2)).offset(2) * *data_0.offset(0) as i32 as f32
+ m42,
);
let newdata_0: *mut u8 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y_0) as isize)
.offset(x_0.wrapping_mul(ch) as isize);
*newdata_0.offset(0) = b_0;
*newdata_0.offset(1) = g_0;
*newdata_0.offset(2) = r_0;
x_0 = x_0.wrapping_add(1)
}
y_0 = y_0.wrapping_add(1)
}
} else {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1838 as i32,
(*::std::mem::transmute::<&[u8; 36], &[libc::c_char; 36]>(
b"flow_bitmap_bgra_apply_color_matrix\x00",
))
.as_ptr(),
);
return false;
}
return true;
}
// note: this file isn't exercised by test suite
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_float_apply_color_matrix(
context: *mut flow_c,
bmp: *mut flow_bitmap_float,
row: u32,
count: u32,
m: *mut *mut f32,
) -> bool {
let stride: u32 = (*bmp).float_stride;
let ch: u32 = (*bmp).channels;
let w: u32 = (*bmp).w;
let h: u32 = row.wrapping_add(count).min((*bmp).h);
match ch {
4 => {
let mut y: u32 = row;
while y < h {
let mut x: u32 = 0 as i32 as u32;
while x < w {
let data: *mut f32 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y) as isize)
.offset(x.wrapping_mul(ch) as isize);
let r: f32 = *(*m.offset(0)).offset(0) * *data.offset(2)
+ *(*m.offset(1)).offset(0) * *data.offset(1)
+ *(*m.offset(2)).offset(0) * *data.offset(0)
+ *(*m.offset(3 as i32 as isize)).offset(0)
* *data.offset(3 as i32 as isize)
+ *(*m.offset(4 as i32 as isize)).offset(0);
let g: f32 = *(*m.offset(0)).offset(1) * *data.offset(2)
+ *(*m.offset(1)).offset(1) * *data.offset(1)
+ *(*m.offset(2)).offset(1) * *data.offset(0)
+ *(*m.offset(3 as i32 as isize)).offset(1)
* *data.offset(3 as i32 as isize)
+ *(*m.offset(4 as i32 as isize)).offset(1);
let b: f32 = *(*m.offset(0)).offset(2) * *data.offset(2)
+ *(*m.offset(1)).offset(2) * *data.offset(1)
+ *(*m.offset(2)).offset(2) * *data.offset(0)
+ *(*m.offset(3 as i32 as isize)).offset(2)
* *data.offset(3 as i32 as isize)
+ *(*m.offset(4 as i32 as isize)).offset(2);
let a: f32 = *(*m.offset(0)).offset(3 as i32 as isize) * *data.offset(2)
+ *(*m.offset(1)).offset(3 as i32 as isize) * *data.offset(1)
+ *(*m.offset(2)).offset(3 as i32 as isize) * *data.offset(0)
+ *(*m.offset(3 as i32 as isize)).offset(3 as i32 as isize)
* *data.offset(3 as i32 as isize)
+ *(*m.offset(4 as i32 as isize)).offset(3 as i32 as isize);
let newdata: *mut f32 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y) as isize)
.offset(x.wrapping_mul(ch) as isize);
*newdata.offset(0) = b;
*newdata.offset(1) = g;
*newdata.offset(2) = r;
*newdata.offset(3 as i32 as isize) = a;
x = x.wrapping_add(1)
}
y = y.wrapping_add(1)
}
return true;
}
3 => {
let mut y_0: u32 = row;
while y_0 < h {
let mut x_0: u32 = 0 as i32 as u32;
while x_0 < w {
let data_0: *mut f32 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y_0) as isize)
.offset(x_0.wrapping_mul(ch) as isize);
let r_0: f32 = *(*m.offset(0)).offset(0) * *data_0.offset(2)
+ *(*m.offset(1)).offset(0) * *data_0.offset(1)
+ *(*m.offset(2)).offset(0) * *data_0.offset(0)
+ *(*m.offset(4 as i32 as isize)).offset(0);
let g_0: f32 = *(*m.offset(0)).offset(1) * *data_0.offset(2)
+ *(*m.offset(1)).offset(1) * *data_0.offset(1)
+ *(*m.offset(2)).offset(1) * *data_0.offset(0)
+ *(*m.offset(4 as i32 as isize)).offset(1);
let b_0: f32 = *(*m.offset(0)).offset(2) * *data_0.offset(2)
+ *(*m.offset(1)).offset(2) * *data_0.offset(1)
+ *(*m.offset(2)).offset(2) * *data_0.offset(0)
+ *(*m.offset(4 as i32 as isize)).offset(2);
let newdata_0: *mut f32 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y_0) as isize)
.offset(x_0.wrapping_mul(ch) as isize);
*newdata_0.offset(0) = b_0;
*newdata_0.offset(1) = g_0;
*newdata_0.offset(2) = r_0;
x_0 = x_0.wrapping_add(1)
}
y_0 = y_0.wrapping_add(1)
}
return true;
}
_ => {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1893 as i32,
(*::std::mem::transmute::<&[u8; 37], &[libc::c_char; 37]>(
b"flow_bitmap_float_apply_color_matrix\x00",
))
.as_ptr(),
);
return false;
}
};
}
#[no_mangle]
pub unsafe extern "C" fn flow_bitmap_bgra_populate_histogram(
context: *mut flow_c,
bmp: *const flow_bitmap_bgra,
histograms: *mut u64,
histogram_size_per_channel: u32,
histogram_count: u32,
pixels_sampled: *mut u64,
) -> bool {
let row: u32 = 0;
let count: u32 = (*bmp).h;
let stride: u32 = (*bmp).stride;
let ch: u32 = flow_pixel_format_bytes_per_pixel((*bmp).fmt);
let w: u32 = (*bmp).w;
let h: u32 = (row.wrapping_add(count)).min((*bmp).h);
if histogram_size_per_channel != 256 {
// We're restricting it to this for speed
FLOW_error(
context,
flow_status_code::Invalid_argument,
"flow_bitmap_bgra_populate_histogram",
);
return false;
}
let shift = 0; // 8 - intlog2(histogram_size_per_channel);
if ch == 4 || ch == 3 {
if histogram_count == 1 {
let mut y: u32 = row;
while y < h {
let mut x: u32 = 0;
while x < w {
let data: *const u8 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y) as isize)
.offset(x.wrapping_mul(ch) as isize);
let ref mut fresh9 = *histograms.offset(
(306 as i32 * *data.offset(2) as i32
+ 601 as i32 * *data.offset(1) as i32
+ 117 as i32 * *data.offset(0) as i32
>> shift) as isize,
);
*fresh9 = (*fresh9).wrapping_add(1);
x = x.wrapping_add(1)
}
y = y.wrapping_add(1)
}
} else if histogram_count == 3 {
let mut y: u32 = row;
while y < h {
let mut x: u32 = 0;
while x < w {
let data: *const u8 = (*bmp)
.pixels
.offset((stride * y) as isize)
.offset((x * ch) as isize);
let ref mut fresh10 =
*histograms.offset((*data.offset(2) as i32 >> shift) as isize);
*fresh10 = (*fresh10).wrapping_add(1);
let ref mut fresh11 = *histograms.offset(
((*data.offset(1) as i32 >> shift) as u32)
.wrapping_add(histogram_size_per_channel)
as isize,
);
*fresh11 = (*fresh11).wrapping_add(1);
let ref mut fresh12 = *histograms.offset(
((*data.offset(0) as i32 >> shift) as u32)
.wrapping_add((2u32).wrapping_mul(histogram_size_per_channel))
as isize,
);
*fresh12 = (*fresh12).wrapping_add(1);
x = x.wrapping_add(1)
}
y = y.wrapping_add(1)
}
} else if histogram_count == 2 {
let mut y_1: u32 = row;
while y_1 < h {
let mut x_1: u32 = 0 as i32 as u32;
while x_1 < w {
let data_1: *const u8 = (*bmp)
.pixels
.offset(stride.wrapping_mul(y_1) as isize)
.offset(x_1.wrapping_mul(ch) as isize);
// Calculate luminosity and saturation
let ref mut fresh13 = *histograms.offset(
(306 as i32 * *data_1.offset(2) as i32
+ 601 as i32 * *data_1.offset(1) as i32
+ 117 as i32 * *data_1.offset(0) as i32
>> shift) as isize,
);
*fresh13 = (*fresh13).wrapping_add(1);
let ref mut fresh14 =
*histograms.offset(histogram_size_per_channel.wrapping_add(
(int_max(
255 as i32,
int_max(
(*data_1.offset(2) as i32 - *data_1.offset(1) as i32).abs(),
(*data_1.offset(1) as i32 - *data_1.offset(0) as i32).abs(),
),
) >> shift) as u32,
) as isize);
*fresh14 = (*fresh14).wrapping_add(1);
x_1 = x_1.wrapping_add(1)
}
y_1 = y_1.wrapping_add(1)
}
} else {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Invalid_internal_state,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1950 as i32,
(*::std::mem::transmute::<&[u8; 36], &[libc::c_char; 36]>(
b"flow_bitmap_bgra_populate_histogram\x00",
))
.as_ptr(),
);
return false;
}
*pixels_sampled = h.wrapping_sub(row).wrapping_mul(w) as u64
} else {
flow_context_set_error_get_message_buffer(
context,
flow_status_code::Unsupported_pixel_format,
b"lib/graphics.c\x00" as *const u8 as *const libc::c_char,
1956 as i32,
(*::std::mem::transmute::<&[u8; 36], &[libc::c_char; 36]>(
b"flow_bitmap_bgra_populate_histogram\x00",
))
.as_ptr(),
);
return false;
}
return true;
}
// Gamma correction http://www.4p8.com/eric.brasseur/gamma.html#formulas
#[no_mangle]
pub unsafe extern "C" fn flow_colorcontext_init(
_context: *mut flow_c,
mut colorcontext: *mut flow_colorcontext_info,
space: flow_working_floatspace,
a: f32,
_b: f32,
_c: f32,
) {
(*colorcontext).floatspace = space;
(*colorcontext).apply_srgb = (space & flow_working_floatspace_linear) > 0;
(*colorcontext).apply_gamma = (space & flow_working_floatspace_gamma) > 0;
/* Code guarded by #ifdef EXPOSE_SIGMOID not translated */
if (*colorcontext).apply_gamma {
(*colorcontext).gamma = a;
(*colorcontext).gamma_inverse = (1.0f64 / a as f64) as f32
}
for n in 0..256 {
(*colorcontext).byte_to_float[n] =
flow_colorcontext_srgb_to_floatspace_uncached(colorcontext, n as u8);
}
}
|
//! Cranelift compilation context and main entry point.
//!
//! When compiling many small functions, it is important to avoid repeatedly allocating and
//! deallocating the data structures needed for compilation. The `Context` struct is used to hold
//! on to memory allocations between function compilations.
//!
//! The context does not hold a `TargetIsa` instance which has to be provided as an argument
//! instead. This is because an ISA instance is immutable and can be used by multiple compilation
//! contexts concurrently. Typically, you would have one context per compilation thread and only a
//! single ISA instance.
use crate::binemit::{
relax_branches, shrink_instructions, CodeInfo, MemoryCodeSink, RelocSink, StackmapSink,
TrapSink,
};
use crate::dce::do_dce;
use crate::dominator_tree::DominatorTree;
use crate::flowgraph::ControlFlowGraph;
use crate::ir::Function;
use crate::isa::TargetIsa;
use crate::legalize_function;
use crate::licm::do_licm;
use crate::loop_analysis::LoopAnalysis;
use crate::nan_canonicalization::do_nan_canonicalization;
use crate::postopt::do_postopt;
use crate::redundant_reload_remover::RedundantReloadRemover;
use crate::regalloc;
use crate::result::CodegenResult;
use crate::settings::{FlagsOrIsa, OptLevel};
use crate::simple_gvn::do_simple_gvn;
use crate::simple_preopt::do_preopt;
use crate::timing;
use crate::unreachable_code::eliminate_unreachable_code;
use crate::value_label::{build_value_labels_ranges, ComparableSourceLoc, ValueLabelsRanges};
use crate::verifier::{verify_context, verify_locations, VerifierErrors, VerifierResult};
use std::vec::Vec;
/// Persistent data structures and compilation pipeline.
pub struct Context {
/// The function we're compiling.
pub func: Function,
/// The control flow graph of `func`.
pub cfg: ControlFlowGraph,
/// Dominator tree for `func`.
pub domtree: DominatorTree,
/// Register allocation context.
pub regalloc: regalloc::Context,
/// Loop analysis of `func`.
pub loop_analysis: LoopAnalysis,
/// Redundant-reload remover context.
pub redundant_reload_remover: RedundantReloadRemover,
}
impl Context {
/// Allocate a new compilation context.
///
/// The returned instance should be reused for compiling multiple functions in order to avoid
/// needless allocator thrashing.
pub fn new() -> Self {
Self::for_function(Function::new())
}
/// Allocate a new compilation context with an existing Function.
///
/// The returned instance should be reused for compiling multiple functions in order to avoid
/// needless allocator thrashing.
pub fn for_function(func: Function) -> Self {
Self {
func,
cfg: ControlFlowGraph::new(),
domtree: DominatorTree::new(),
regalloc: regalloc::Context::new(),
loop_analysis: LoopAnalysis::new(),
redundant_reload_remover: RedundantReloadRemover::new(),
}
}
/// Clear all data structures in this context.
pub fn clear(&mut self) {
self.func.clear();
self.cfg.clear();
self.domtree.clear();
self.regalloc.clear();
self.loop_analysis.clear();
self.redundant_reload_remover.clear();
}
/// Compile the function, and emit machine code into a `Vec<u8>`.
///
/// Run the function through all the passes necessary to generate code for the target ISA
/// represented by `isa`, as well as the final step of emitting machine code into a
/// `Vec<u8>`. The machine code is not relocated. Instead, any relocations are emitted
/// into `relocs`.
///
/// This function calls `compile` and `emit_to_memory`, taking care to resize `mem` as
/// needed, so it provides a safe interface.
///
/// Returns information about the function's code and read-only data.
pub fn compile_and_emit(
&mut self,
isa: &dyn TargetIsa,
mem: &mut Vec<u8>,
relocs: &mut dyn RelocSink,
traps: &mut dyn TrapSink,
stackmaps: &mut dyn StackmapSink,
) -> CodegenResult<CodeInfo> {
let info = self.compile(isa)?;
let old_len = mem.len();
mem.resize(old_len + info.total_size as usize, 0);
let new_info = unsafe {
self.emit_to_memory(isa, mem.as_mut_ptr().add(old_len), relocs, traps, stackmaps)
};
debug_assert!(new_info == info);
Ok(info)
}
/// Compile the function.
///
/// Run the function through all the passes necessary to generate code for the target ISA
/// represented by `isa`. This does not include the final step of emitting machine code into a
/// code sink.
///
/// Returns information about the function's code and read-only data.
pub fn compile(&mut self, isa: &dyn TargetIsa) -> CodegenResult<CodeInfo> {
let _tt = timing::compile();
self.verify_if(isa)?;
self.compute_cfg();
if isa.flags().opt_level() != OptLevel::Fastest {
self.preopt(isa)?;
}
if isa.flags().enable_nan_canonicalization() {
self.canonicalize_nans(isa)?;
}
self.legalize(isa)?;
if isa.flags().opt_level() != OptLevel::Fastest {
self.postopt(isa)?;
}
if isa.flags().opt_level() == OptLevel::Best {
self.compute_domtree();
self.compute_loop_analysis();
self.licm(isa)?;
self.simple_gvn(isa)?;
}
self.compute_domtree();
self.eliminate_unreachable_code(isa)?;
if isa.flags().opt_level() != OptLevel::Fastest {
self.dce(isa)?;
}
self.regalloc(isa)?;
self.prologue_epilogue(isa)?;
if isa.flags().opt_level() == OptLevel::Best {
self.redundant_reload_remover(isa)?;
self.shrink_instructions(isa)?;
}
self.relax_branches(isa)
}
/// Emit machine code directly into raw memory.
///
/// Write all of the function's machine code to the memory at `mem`. The size of the machine
/// code is returned by `compile` above.
///
/// The machine code is not relocated. Instead, any relocations are emitted into `relocs`.
///
/// This function is unsafe since it does not perform bounds checking on the memory buffer,
/// and it can't guarantee that the `mem` pointer is valid.
///
/// Returns information about the emitted code and data.
pub unsafe fn emit_to_memory(
&self,
isa: &dyn TargetIsa,
mem: *mut u8,
relocs: &mut dyn RelocSink,
traps: &mut dyn TrapSink,
stackmaps: &mut dyn StackmapSink,
) -> CodeInfo {
let _tt = timing::binemit();
let mut sink = MemoryCodeSink::new(mem, relocs, traps, stackmaps);
isa.emit_function_to_memory(&self.func, &mut sink);
sink.info
}
/// Run the verifier on the function.
///
/// Also check that the dominator tree and control flow graph are consistent with the function.
pub fn verify<'a, FOI: Into<FlagsOrIsa<'a>>>(&self, fisa: FOI) -> VerifierResult<()> {
let mut errors = VerifierErrors::default();
let _ = verify_context(&self.func, &self.cfg, &self.domtree, fisa, &mut errors);
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
/// Run the verifier only if the `enable_verifier` setting is true.
pub fn verify_if<'a, FOI: Into<FlagsOrIsa<'a>>>(&self, fisa: FOI) -> CodegenResult<()> {
let fisa = fisa.into();
if fisa.flags.enable_verifier() {
self.verify(fisa)?;
}
Ok(())
}
/// Run the locations verifier on the function.
pub fn verify_locations(&self, isa: &dyn TargetIsa) -> VerifierResult<()> {
let mut errors = VerifierErrors::default();
let _ = verify_locations(isa, &self.func, &self.cfg, None, &mut errors);
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
/// Run the locations verifier only if the `enable_verifier` setting is true.
pub fn verify_locations_if(&self, isa: &dyn TargetIsa) -> CodegenResult<()> {
if isa.flags().enable_verifier() {
self.verify_locations(isa)?;
}
Ok(())
}
/// Perform dead-code elimination on the function.
pub fn dce<'a, FOI: Into<FlagsOrIsa<'a>>>(&mut self, fisa: FOI) -> CodegenResult<()> {
do_dce(&mut self.func, &mut self.domtree);
self.verify_if(fisa)?;
Ok(())
}
/// Perform pre-legalization rewrites on the function.
pub fn preopt(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
do_preopt(&mut self.func, &mut self.cfg);
self.verify_if(isa)?;
Ok(())
}
/// Perform NaN canonicalizing rewrites on the function.
pub fn canonicalize_nans(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
do_nan_canonicalization(&mut self.func);
self.verify_if(isa)
}
/// Run the legalizer for `isa` on the function.
pub fn legalize(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
// Legalization invalidates the domtree and loop_analysis by mutating the CFG.
// TODO: Avoid doing this when legalization doesn't actually mutate the CFG.
self.domtree.clear();
self.loop_analysis.clear();
legalize_function(&mut self.func, &mut self.cfg, isa);
self.verify_if(isa)
}
/// Perform post-legalization rewrites on the function.
pub fn postopt(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
do_postopt(&mut self.func, isa);
self.verify_if(isa)?;
Ok(())
}
/// Compute the control flow graph.
pub fn compute_cfg(&mut self) {
self.cfg.compute(&self.func)
}
/// Compute dominator tree.
pub fn compute_domtree(&mut self) {
self.domtree.compute(&self.func, &self.cfg)
}
/// Compute the loop analysis.
pub fn compute_loop_analysis(&mut self) {
self.loop_analysis
.compute(&self.func, &self.cfg, &self.domtree)
}
/// Compute the control flow graph and dominator tree.
pub fn flowgraph(&mut self) {
self.compute_cfg();
self.compute_domtree()
}
/// Perform simple GVN on the function.
pub fn simple_gvn<'a, FOI: Into<FlagsOrIsa<'a>>>(&mut self, fisa: FOI) -> CodegenResult<()> {
do_simple_gvn(&mut self.func, &mut self.domtree);
self.verify_if(fisa)
}
/// Perform LICM on the function.
pub fn licm(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
do_licm(
isa,
&mut self.func,
&mut self.cfg,
&mut self.domtree,
&mut self.loop_analysis,
);
self.verify_if(isa)
}
/// Perform unreachable code elimination.
pub fn eliminate_unreachable_code<'a, FOI>(&mut self, fisa: FOI) -> CodegenResult<()>
where
FOI: Into<FlagsOrIsa<'a>>,
{
eliminate_unreachable_code(&mut self.func, &mut self.cfg, &self.domtree);
self.verify_if(fisa)
}
/// Run the register allocator.
pub fn regalloc(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
self.regalloc
.run(isa, &mut self.func, &mut self.cfg, &mut self.domtree)
}
/// Insert prologue and epilogues after computing the stack frame layout.
pub fn prologue_epilogue(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
isa.prologue_epilogue(&mut self.func)?;
self.verify_if(isa)?;
self.verify_locations_if(isa)?;
Ok(())
}
/// Do redundant-reload removal after allocation of both registers and stack slots.
pub fn redundant_reload_remover(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
self.redundant_reload_remover
.run(isa, &mut self.func, &self.cfg);
self.verify_if(isa)?;
Ok(())
}
/// Run the instruction shrinking pass.
pub fn shrink_instructions(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
shrink_instructions(&mut self.func, isa);
self.verify_if(isa)?;
self.verify_locations_if(isa)?;
Ok(())
}
/// Run the branch relaxation pass and return information about the function's code and
/// read-only data.
pub fn relax_branches(&mut self, isa: &dyn TargetIsa) -> CodegenResult<CodeInfo> {
let info = relax_branches(&mut self.func, &mut self.cfg, &mut self.domtree, isa)?;
self.verify_if(isa)?;
self.verify_locations_if(isa)?;
Ok(info)
}
/// Builds ranges and location for specified value labels.
pub fn build_value_labels_ranges(
&self,
isa: &dyn TargetIsa,
) -> CodegenResult<ValueLabelsRanges> {
Ok(build_value_labels_ranges::<ComparableSourceLoc>(
&self.func,
&self.regalloc,
isa,
))
}
}
Log compiled and legalized functions
//! Cranelift compilation context and main entry point.
//!
//! When compiling many small functions, it is important to avoid repeatedly allocating and
//! deallocating the data structures needed for compilation. The `Context` struct is used to hold
//! on to memory allocations between function compilations.
//!
//! The context does not hold a `TargetIsa` instance which has to be provided as an argument
//! instead. This is because an ISA instance is immutable and can be used by multiple compilation
//! contexts concurrently. Typically, you would have one context per compilation thread and only a
//! single ISA instance.
use crate::binemit::{
relax_branches, shrink_instructions, CodeInfo, MemoryCodeSink, RelocSink, StackmapSink,
TrapSink,
};
use crate::dce::do_dce;
use crate::dominator_tree::DominatorTree;
use crate::flowgraph::ControlFlowGraph;
use crate::ir::Function;
use crate::isa::TargetIsa;
use crate::legalize_function;
use crate::licm::do_licm;
use crate::loop_analysis::LoopAnalysis;
use crate::nan_canonicalization::do_nan_canonicalization;
use crate::postopt::do_postopt;
use crate::redundant_reload_remover::RedundantReloadRemover;
use crate::regalloc;
use crate::result::CodegenResult;
use crate::settings::{FlagsOrIsa, OptLevel};
use crate::simple_gvn::do_simple_gvn;
use crate::simple_preopt::do_preopt;
use crate::timing;
use crate::unreachable_code::eliminate_unreachable_code;
use crate::value_label::{build_value_labels_ranges, ComparableSourceLoc, ValueLabelsRanges};
use crate::verifier::{verify_context, verify_locations, VerifierErrors, VerifierResult};
use log::debug;
use std::vec::Vec;
/// Persistent data structures and compilation pipeline.
pub struct Context {
/// The function we're compiling.
pub func: Function,
/// The control flow graph of `func`.
pub cfg: ControlFlowGraph,
/// Dominator tree for `func`.
pub domtree: DominatorTree,
/// Register allocation context.
pub regalloc: regalloc::Context,
/// Loop analysis of `func`.
pub loop_analysis: LoopAnalysis,
/// Redundant-reload remover context.
pub redundant_reload_remover: RedundantReloadRemover,
}
impl Context {
/// Allocate a new compilation context.
///
/// The returned instance should be reused for compiling multiple functions in order to avoid
/// needless allocator thrashing.
pub fn new() -> Self {
Self::for_function(Function::new())
}
/// Allocate a new compilation context with an existing Function.
///
/// The returned instance should be reused for compiling multiple functions in order to avoid
/// needless allocator thrashing.
pub fn for_function(func: Function) -> Self {
Self {
func,
cfg: ControlFlowGraph::new(),
domtree: DominatorTree::new(),
regalloc: regalloc::Context::new(),
loop_analysis: LoopAnalysis::new(),
redundant_reload_remover: RedundantReloadRemover::new(),
}
}
/// Clear all data structures in this context.
pub fn clear(&mut self) {
self.func.clear();
self.cfg.clear();
self.domtree.clear();
self.regalloc.clear();
self.loop_analysis.clear();
self.redundant_reload_remover.clear();
}
/// Compile the function, and emit machine code into a `Vec<u8>`.
///
/// Run the function through all the passes necessary to generate code for the target ISA
/// represented by `isa`, as well as the final step of emitting machine code into a
/// `Vec<u8>`. The machine code is not relocated. Instead, any relocations are emitted
/// into `relocs`.
///
/// This function calls `compile` and `emit_to_memory`, taking care to resize `mem` as
/// needed, so it provides a safe interface.
///
/// Returns information about the function's code and read-only data.
pub fn compile_and_emit(
&mut self,
isa: &dyn TargetIsa,
mem: &mut Vec<u8>,
relocs: &mut dyn RelocSink,
traps: &mut dyn TrapSink,
stackmaps: &mut dyn StackmapSink,
) -> CodegenResult<CodeInfo> {
let info = self.compile(isa)?;
let old_len = mem.len();
mem.resize(old_len + info.total_size as usize, 0);
let new_info = unsafe {
self.emit_to_memory(isa, mem.as_mut_ptr().add(old_len), relocs, traps, stackmaps)
};
debug_assert!(new_info == info);
Ok(info)
}
/// Compile the function.
///
/// Run the function through all the passes necessary to generate code for the target ISA
/// represented by `isa`. This does not include the final step of emitting machine code into a
/// code sink.
///
/// Returns information about the function's code and read-only data.
pub fn compile(&mut self, isa: &dyn TargetIsa) -> CodegenResult<CodeInfo> {
let _tt = timing::compile();
self.verify_if(isa)?;
debug!("Compiling:\n{}", self.func.display(isa));
self.compute_cfg();
if isa.flags().opt_level() != OptLevel::Fastest {
self.preopt(isa)?;
}
if isa.flags().enable_nan_canonicalization() {
self.canonicalize_nans(isa)?;
}
self.legalize(isa)?;
if isa.flags().opt_level() != OptLevel::Fastest {
self.postopt(isa)?;
}
if isa.flags().opt_level() == OptLevel::Best {
self.compute_domtree();
self.compute_loop_analysis();
self.licm(isa)?;
self.simple_gvn(isa)?;
}
self.compute_domtree();
self.eliminate_unreachable_code(isa)?;
if isa.flags().opt_level() != OptLevel::Fastest {
self.dce(isa)?;
}
self.regalloc(isa)?;
self.prologue_epilogue(isa)?;
if isa.flags().opt_level() == OptLevel::Best {
self.redundant_reload_remover(isa)?;
self.shrink_instructions(isa)?;
}
let result = self.relax_branches(isa);
debug!("Compiled:\n{}", self.func.display(isa));
result
}
/// Emit machine code directly into raw memory.
///
/// Write all of the function's machine code to the memory at `mem`. The size of the machine
/// code is returned by `compile` above.
///
/// The machine code is not relocated. Instead, any relocations are emitted into `relocs`.
///
/// This function is unsafe since it does not perform bounds checking on the memory buffer,
/// and it can't guarantee that the `mem` pointer is valid.
///
/// Returns information about the emitted code and data.
pub unsafe fn emit_to_memory(
&self,
isa: &dyn TargetIsa,
mem: *mut u8,
relocs: &mut dyn RelocSink,
traps: &mut dyn TrapSink,
stackmaps: &mut dyn StackmapSink,
) -> CodeInfo {
let _tt = timing::binemit();
let mut sink = MemoryCodeSink::new(mem, relocs, traps, stackmaps);
isa.emit_function_to_memory(&self.func, &mut sink);
sink.info
}
/// Run the verifier on the function.
///
/// Also check that the dominator tree and control flow graph are consistent with the function.
pub fn verify<'a, FOI: Into<FlagsOrIsa<'a>>>(&self, fisa: FOI) -> VerifierResult<()> {
let mut errors = VerifierErrors::default();
let _ = verify_context(&self.func, &self.cfg, &self.domtree, fisa, &mut errors);
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
/// Run the verifier only if the `enable_verifier` setting is true.
pub fn verify_if<'a, FOI: Into<FlagsOrIsa<'a>>>(&self, fisa: FOI) -> CodegenResult<()> {
let fisa = fisa.into();
if fisa.flags.enable_verifier() {
self.verify(fisa)?;
}
Ok(())
}
/// Run the locations verifier on the function.
pub fn verify_locations(&self, isa: &dyn TargetIsa) -> VerifierResult<()> {
let mut errors = VerifierErrors::default();
let _ = verify_locations(isa, &self.func, &self.cfg, None, &mut errors);
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
/// Run the locations verifier only if the `enable_verifier` setting is true.
pub fn verify_locations_if(&self, isa: &dyn TargetIsa) -> CodegenResult<()> {
if isa.flags().enable_verifier() {
self.verify_locations(isa)?;
}
Ok(())
}
/// Perform dead-code elimination on the function.
pub fn dce<'a, FOI: Into<FlagsOrIsa<'a>>>(&mut self, fisa: FOI) -> CodegenResult<()> {
do_dce(&mut self.func, &mut self.domtree);
self.verify_if(fisa)?;
Ok(())
}
/// Perform pre-legalization rewrites on the function.
pub fn preopt(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
do_preopt(&mut self.func, &mut self.cfg);
self.verify_if(isa)?;
Ok(())
}
/// Perform NaN canonicalizing rewrites on the function.
pub fn canonicalize_nans(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
do_nan_canonicalization(&mut self.func);
self.verify_if(isa)
}
/// Run the legalizer for `isa` on the function.
pub fn legalize(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
// Legalization invalidates the domtree and loop_analysis by mutating the CFG.
// TODO: Avoid doing this when legalization doesn't actually mutate the CFG.
self.domtree.clear();
self.loop_analysis.clear();
legalize_function(&mut self.func, &mut self.cfg, isa);
debug!("Legalized:\n{}", self.func.display(isa));
self.verify_if(isa)
}
/// Perform post-legalization rewrites on the function.
pub fn postopt(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
do_postopt(&mut self.func, isa);
self.verify_if(isa)?;
Ok(())
}
/// Compute the control flow graph.
pub fn compute_cfg(&mut self) {
self.cfg.compute(&self.func)
}
/// Compute dominator tree.
pub fn compute_domtree(&mut self) {
self.domtree.compute(&self.func, &self.cfg)
}
/// Compute the loop analysis.
pub fn compute_loop_analysis(&mut self) {
self.loop_analysis
.compute(&self.func, &self.cfg, &self.domtree)
}
/// Compute the control flow graph and dominator tree.
pub fn flowgraph(&mut self) {
self.compute_cfg();
self.compute_domtree()
}
/// Perform simple GVN on the function.
pub fn simple_gvn<'a, FOI: Into<FlagsOrIsa<'a>>>(&mut self, fisa: FOI) -> CodegenResult<()> {
do_simple_gvn(&mut self.func, &mut self.domtree);
self.verify_if(fisa)
}
/// Perform LICM on the function.
pub fn licm(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
do_licm(
isa,
&mut self.func,
&mut self.cfg,
&mut self.domtree,
&mut self.loop_analysis,
);
self.verify_if(isa)
}
/// Perform unreachable code elimination.
pub fn eliminate_unreachable_code<'a, FOI>(&mut self, fisa: FOI) -> CodegenResult<()>
where
FOI: Into<FlagsOrIsa<'a>>,
{
eliminate_unreachable_code(&mut self.func, &mut self.cfg, &self.domtree);
self.verify_if(fisa)
}
/// Run the register allocator.
pub fn regalloc(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
self.regalloc
.run(isa, &mut self.func, &mut self.cfg, &mut self.domtree)
}
/// Insert prologue and epilogues after computing the stack frame layout.
pub fn prologue_epilogue(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
isa.prologue_epilogue(&mut self.func)?;
self.verify_if(isa)?;
self.verify_locations_if(isa)?;
Ok(())
}
/// Do redundant-reload removal after allocation of both registers and stack slots.
pub fn redundant_reload_remover(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
self.redundant_reload_remover
.run(isa, &mut self.func, &self.cfg);
self.verify_if(isa)?;
Ok(())
}
/// Run the instruction shrinking pass.
pub fn shrink_instructions(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> {
shrink_instructions(&mut self.func, isa);
self.verify_if(isa)?;
self.verify_locations_if(isa)?;
Ok(())
}
/// Run the branch relaxation pass and return information about the function's code and
/// read-only data.
pub fn relax_branches(&mut self, isa: &dyn TargetIsa) -> CodegenResult<CodeInfo> {
let info = relax_branches(&mut self.func, &mut self.cfg, &mut self.domtree, isa)?;
self.verify_if(isa)?;
self.verify_locations_if(isa)?;
Ok(info)
}
/// Builds ranges and location for specified value labels.
pub fn build_value_labels_ranges(
&self,
isa: &dyn TargetIsa,
) -> CodegenResult<ValueLabelsRanges> {
Ok(build_value_labels_ranges::<ComparableSourceLoc>(
&self.func,
&self.regalloc,
isa,
))
}
}
|
//! Unification and canonicalization logic.
use std::{fmt, mem, sync::Arc};
use chalk_ir::{
cast::Cast, fold::Fold, interner::HasInterner, zip::Zip, FloatTy, IntTy, NoSolution,
TyVariableKind, UniverseIndex,
};
use chalk_solve::infer::ParameterEnaVariableExt;
use ena::unify::UnifyKey;
use hir_expand::name;
use stdx::never;
use super::{InferOk, InferResult, InferenceContext, TypeError};
use crate::{
db::HirDatabase, fold_tys, static_lifetime, traits::FnTrait, AliasEq, AliasTy, BoundVar,
Canonical, Const, DebruijnIndex, GenericArg, GenericArgData, Goal, Guidance, InEnvironment,
InferenceVar, Interner, Lifetime, ParamKind, ProjectionTy, ProjectionTyExt, Scalar, Solution,
Substitution, TraitEnvironment, Ty, TyBuilder, TyExt, TyKind, VariableKind,
};
impl<'a> InferenceContext<'a> {
pub(super) fn canonicalize<T: Fold<Interner> + HasInterner<Interner = Interner>>(
&mut self,
t: T,
) -> Canonicalized<T::Result>
where
T::Result: HasInterner<Interner = Interner>,
{
self.table.canonicalize(t)
}
}
#[derive(Debug, Clone)]
pub(crate) struct Canonicalized<T>
where
T: HasInterner<Interner = Interner>,
{
pub(crate) value: Canonical<T>,
free_vars: Vec<GenericArg>,
}
impl<T: HasInterner<Interner = Interner>> Canonicalized<T> {
pub(super) fn apply_solution(
&self,
ctx: &mut InferenceTable,
solution: Canonical<Substitution>,
) {
// the solution may contain new variables, which we need to convert to new inference vars
let new_vars = Substitution::from_iter(
Interner,
solution.binders.iter(Interner).map(|k| match &k.kind {
VariableKind::Ty(TyVariableKind::General) => ctx.new_type_var().cast(Interner),
VariableKind::Ty(TyVariableKind::Integer) => ctx.new_integer_var().cast(Interner),
VariableKind::Ty(TyVariableKind::Float) => ctx.new_float_var().cast(Interner),
// Chalk can sometimes return new lifetime variables. We just use the static lifetime everywhere
VariableKind::Lifetime => static_lifetime().cast(Interner),
VariableKind::Const(ty) => ctx.new_const_var(ty.clone()).cast(Interner),
}),
);
for (i, v) in solution.value.iter(Interner).enumerate() {
let var = self.free_vars[i].clone();
if let Some(ty) = v.ty(Interner) {
// eagerly replace projections in the type; we may be getting types
// e.g. from where clauses where this hasn't happened yet
let ty = ctx.normalize_associated_types_in(new_vars.apply(ty.clone(), Interner));
ctx.unify(var.assert_ty_ref(Interner), &ty);
} else {
let _ = ctx.try_unify(&var, &new_vars.apply(v.clone(), Interner));
}
}
}
}
pub fn could_unify(
db: &dyn HirDatabase,
env: Arc<TraitEnvironment>,
tys: &Canonical<(Ty, Ty)>,
) -> bool {
unify(db, env, tys).is_some()
}
pub(crate) fn unify(
db: &dyn HirDatabase,
env: Arc<TraitEnvironment>,
tys: &Canonical<(Ty, Ty)>,
) -> Option<Substitution> {
let mut table = InferenceTable::new(db, env);
let vars = Substitution::from_iter(
Interner,
tys.binders.iter(Interner).map(|x| match &x.kind {
chalk_ir::VariableKind::Ty(_) => {
GenericArgData::Ty(table.new_type_var()).intern(Interner)
}
chalk_ir::VariableKind::Lifetime => {
GenericArgData::Ty(table.new_type_var()).intern(Interner)
} // FIXME: maybe wrong?
chalk_ir::VariableKind::Const(ty) => {
GenericArgData::Const(table.new_const_var(ty.clone())).intern(Interner)
}
}),
);
let ty1_with_vars = vars.apply(tys.value.0.clone(), Interner);
let ty2_with_vars = vars.apply(tys.value.1.clone(), Interner);
if !table.unify(&ty1_with_vars, &ty2_with_vars) {
return None;
}
// default any type vars that weren't unified back to their original bound vars
// (kind of hacky)
let find_var = |iv| {
vars.iter(Interner).position(|v| match v.interned() {
chalk_ir::GenericArgData::Ty(ty) => ty.inference_var(Interner),
chalk_ir::GenericArgData::Lifetime(lt) => lt.inference_var(Interner),
chalk_ir::GenericArgData::Const(c) => c.inference_var(Interner),
} == Some(iv))
};
let fallback = |iv, kind, default, binder| match kind {
chalk_ir::VariableKind::Ty(_ty_kind) => find_var(iv)
.map_or(default, |i| BoundVar::new(binder, i).to_ty(Interner).cast(Interner)),
chalk_ir::VariableKind::Lifetime => find_var(iv)
.map_or(default, |i| BoundVar::new(binder, i).to_lifetime(Interner).cast(Interner)),
chalk_ir::VariableKind::Const(ty) => find_var(iv)
.map_or(default, |i| BoundVar::new(binder, i).to_const(Interner, ty).cast(Interner)),
};
Some(Substitution::from_iter(
Interner,
vars.iter(Interner).map(|v| table.resolve_with_fallback(v.clone(), &fallback)),
))
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct TypeVariableData {
diverging: bool,
}
type ChalkInferenceTable = chalk_solve::infer::InferenceTable<Interner>;
#[derive(Clone)]
pub(crate) struct InferenceTable<'a> {
pub(crate) db: &'a dyn HirDatabase,
pub(crate) trait_env: Arc<TraitEnvironment>,
var_unification_table: ChalkInferenceTable,
type_variable_table: Vec<TypeVariableData>,
pending_obligations: Vec<Canonicalized<InEnvironment<Goal>>>,
}
pub(crate) struct InferenceTableSnapshot {
var_table_snapshot: chalk_solve::infer::InferenceSnapshot<Interner>,
// FIXME: snapshot pending_obligations?
type_variable_table_snapshot: Vec<TypeVariableData>,
}
impl<'a> InferenceTable<'a> {
pub(crate) fn new(db: &'a dyn HirDatabase, trait_env: Arc<TraitEnvironment>) -> Self {
InferenceTable {
db,
trait_env,
var_unification_table: ChalkInferenceTable::new(),
type_variable_table: Vec::new(),
pending_obligations: Vec::new(),
}
}
/// Chalk doesn't know about the `diverging` flag, so when it unifies two
/// type variables of which one is diverging, the chosen root might not be
/// diverging and we have no way of marking it as such at that time. This
/// function goes through all type variables and make sure their root is
/// marked as diverging if necessary, so that resolving them gives the right
/// result.
pub(super) fn propagate_diverging_flag(&mut self) {
for i in 0..self.type_variable_table.len() {
if !self.type_variable_table[i].diverging {
continue;
}
let v = InferenceVar::from(i as u32);
let root = self.var_unification_table.inference_var_root(v);
if let Some(data) = self.type_variable_table.get_mut(root.index() as usize) {
data.diverging = true;
}
}
}
pub(super) fn set_diverging(&mut self, iv: InferenceVar, diverging: bool) {
self.type_variable_table[iv.index() as usize].diverging = diverging;
}
fn fallback_value(&self, iv: InferenceVar, kind: TyVariableKind) -> Ty {
match kind {
_ if self
.type_variable_table
.get(iv.index() as usize)
.map_or(false, |data| data.diverging) =>
{
TyKind::Never
}
TyVariableKind::General => TyKind::Error,
TyVariableKind::Integer => TyKind::Scalar(Scalar::Int(IntTy::I32)),
TyVariableKind::Float => TyKind::Scalar(Scalar::Float(FloatTy::F64)),
}
.intern(Interner)
}
pub(crate) fn canonicalize<T: Fold<Interner> + HasInterner<Interner = Interner>>(
&mut self,
t: T,
) -> Canonicalized<T::Result>
where
T::Result: HasInterner<Interner = Interner>,
{
// try to resolve obligations before canonicalizing, since this might
// result in new knowledge about variables
self.resolve_obligations_as_possible();
let result = self.var_unification_table.canonicalize(Interner, t);
let free_vars = result
.free_vars
.into_iter()
.map(|free_var| free_var.to_generic_arg(Interner))
.collect();
Canonicalized { value: result.quantified, free_vars }
}
/// Recurses through the given type, normalizing associated types mentioned
/// in it by replacing them by type variables and registering obligations to
/// resolve later. This should be done once for every type we get from some
/// type annotation (e.g. from a let type annotation, field type or function
/// call). `make_ty` handles this already, but e.g. for field types we need
/// to do it as well.
pub(crate) fn normalize_associated_types_in(&mut self, ty: Ty) -> Ty {
fold_tys(
ty,
|ty, _| match ty.kind(Interner) {
TyKind::Alias(AliasTy::Projection(proj_ty)) => {
self.normalize_projection_ty(proj_ty.clone())
}
_ => ty,
},
DebruijnIndex::INNERMOST,
)
}
pub(crate) fn normalize_projection_ty(&mut self, proj_ty: ProjectionTy) -> Ty {
let var = self.new_type_var();
let alias_eq = AliasEq { alias: AliasTy::Projection(proj_ty), ty: var.clone() };
let obligation = alias_eq.cast(Interner);
self.register_obligation(obligation);
var
}
fn extend_type_variable_table(&mut self, to_index: usize) {
self.type_variable_table.extend(
(0..1 + to_index - self.type_variable_table.len())
.map(|_| TypeVariableData { diverging: false }),
);
}
fn new_var(&mut self, kind: TyVariableKind, diverging: bool) -> Ty {
let var = self.var_unification_table.new_variable(UniverseIndex::ROOT);
// Chalk might have created some type variables for its own purposes that we don't know about...
self.extend_type_variable_table(var.index() as usize);
assert_eq!(var.index() as usize, self.type_variable_table.len() - 1);
self.type_variable_table[var.index() as usize].diverging = diverging;
var.to_ty_with_kind(Interner, kind)
}
pub(crate) fn new_type_var(&mut self) -> Ty {
self.new_var(TyVariableKind::General, false)
}
pub(crate) fn new_integer_var(&mut self) -> Ty {
self.new_var(TyVariableKind::Integer, false)
}
pub(crate) fn new_float_var(&mut self) -> Ty {
self.new_var(TyVariableKind::Float, false)
}
pub(crate) fn new_maybe_never_var(&mut self) -> Ty {
self.new_var(TyVariableKind::General, true)
}
pub(crate) fn new_const_var(&mut self, ty: Ty) -> Const {
let var = self.var_unification_table.new_variable(UniverseIndex::ROOT);
var.to_const(Interner, ty)
}
pub(crate) fn new_lifetime_var(&mut self) -> Lifetime {
let var = self.var_unification_table.new_variable(UniverseIndex::ROOT);
var.to_lifetime(Interner)
}
pub(crate) fn resolve_with_fallback<T>(
&mut self,
t: T,
fallback: &dyn Fn(InferenceVar, VariableKind, GenericArg, DebruijnIndex) -> GenericArg,
) -> T::Result
where
T: HasInterner<Interner = Interner> + Fold<Interner>,
{
self.resolve_with_fallback_inner(&mut Vec::new(), t, &fallback)
}
pub(crate) fn instantiate_canonical<T>(&mut self, canonical: Canonical<T>) -> T::Result
where
T: HasInterner<Interner = Interner> + Fold<Interner> + std::fmt::Debug,
{
self.var_unification_table.instantiate_canonical(Interner, canonical)
}
fn resolve_with_fallback_inner<T>(
&mut self,
var_stack: &mut Vec<InferenceVar>,
t: T,
fallback: &dyn Fn(InferenceVar, VariableKind, GenericArg, DebruijnIndex) -> GenericArg,
) -> T::Result
where
T: HasInterner<Interner = Interner> + Fold<Interner>,
{
t.fold_with(
&mut resolve::Resolver { table: self, var_stack, fallback },
DebruijnIndex::INNERMOST,
)
.expect("fold failed unexpectedly")
}
pub(crate) fn resolve_completely<T>(&mut self, t: T) -> T::Result
where
T: HasInterner<Interner = Interner> + Fold<Interner>,
{
self.resolve_with_fallback(t, &|_, _, d, _| d)
}
/// Unify two types and register new trait goals that arise from that.
pub(crate) fn unify(&mut self, ty1: &Ty, ty2: &Ty) -> bool {
let result = match self.try_unify(ty1, ty2) {
Ok(r) => r,
Err(_) => return false,
};
self.register_infer_ok(result);
true
}
/// Unify two types and return new trait goals arising from it, so the
/// caller needs to deal with them.
pub(crate) fn try_unify<T: Zip<Interner>>(&mut self, t1: &T, t2: &T) -> InferResult<()> {
match self.var_unification_table.relate(
Interner,
&self.db,
&self.trait_env.env,
chalk_ir::Variance::Invariant,
t1,
t2,
) {
Ok(result) => Ok(InferOk { goals: result.goals, value: () }),
Err(chalk_ir::NoSolution) => Err(TypeError),
}
}
/// If `ty` is a type variable with known type, returns that type;
/// otherwise, return ty.
pub(crate) fn resolve_ty_shallow(&mut self, ty: &Ty) -> Ty {
self.resolve_obligations_as_possible();
self.var_unification_table.normalize_ty_shallow(Interner, ty).unwrap_or_else(|| ty.clone())
}
pub(crate) fn snapshot(&mut self) -> InferenceTableSnapshot {
let var_table_snapshot = self.var_unification_table.snapshot();
let type_variable_table_snapshot = self.type_variable_table.clone();
InferenceTableSnapshot { var_table_snapshot, type_variable_table_snapshot }
}
pub(crate) fn rollback_to(&mut self, snapshot: InferenceTableSnapshot) {
self.var_unification_table.rollback_to(snapshot.var_table_snapshot);
self.type_variable_table = snapshot.type_variable_table_snapshot;
}
/// Checks an obligation without registering it. Useful mostly to check
/// whether a trait *might* be implemented before deciding to 'lock in' the
/// choice (during e.g. method resolution or deref).
pub(crate) fn try_obligation(&mut self, goal: Goal) -> Option<Solution> {
let in_env = InEnvironment::new(&self.trait_env.env, goal);
let canonicalized = self.canonicalize(in_env);
let solution = self.db.trait_solve(self.trait_env.krate, canonicalized.value);
solution
}
pub(crate) fn register_obligation(&mut self, goal: Goal) {
let in_env = InEnvironment::new(&self.trait_env.env, goal);
self.register_obligation_in_env(in_env)
}
fn register_obligation_in_env(&mut self, goal: InEnvironment<Goal>) {
let canonicalized = self.canonicalize(goal);
if !self.try_resolve_obligation(&canonicalized) {
self.pending_obligations.push(canonicalized);
}
}
pub(crate) fn register_infer_ok<T>(&mut self, infer_ok: InferOk<T>) {
infer_ok.goals.into_iter().for_each(|goal| self.register_obligation_in_env(goal));
}
pub(crate) fn resolve_obligations_as_possible(&mut self) {
let _span = profile::span("resolve_obligations_as_possible");
let mut changed = true;
let mut obligations = Vec::new();
while changed {
changed = false;
mem::swap(&mut self.pending_obligations, &mut obligations);
for canonicalized in obligations.drain(..) {
if !self.check_changed(&canonicalized) {
self.pending_obligations.push(canonicalized);
continue;
}
changed = true;
let uncanonical = chalk_ir::Substitute::apply(
&canonicalized.free_vars,
canonicalized.value.value,
Interner,
);
self.register_obligation_in_env(uncanonical);
}
}
}
pub(crate) fn fudge_inference<T: Fold<Interner>>(
&mut self,
f: impl FnOnce(&mut Self) -> T,
) -> T::Result {
use chalk_ir::fold::Folder;
struct VarFudger<'a, 'b> {
table: &'a mut InferenceTable<'b>,
highest_known_var: InferenceVar,
}
impl<'a, 'b> Folder<Interner> for VarFudger<'a, 'b> {
type Error = NoSolution;
fn as_dyn(&mut self) -> &mut dyn Folder<Interner, Error = Self::Error> {
self
}
fn interner(&self) -> Interner {
Interner
}
fn fold_inference_ty(
&mut self,
var: chalk_ir::InferenceVar,
kind: TyVariableKind,
_outer_binder: chalk_ir::DebruijnIndex,
) -> chalk_ir::Fallible<chalk_ir::Ty<Interner>> {
Ok(if var < self.highest_known_var {
var.to_ty(Interner, kind)
} else {
self.table.new_type_var()
})
}
fn fold_inference_lifetime(
&mut self,
var: chalk_ir::InferenceVar,
_outer_binder: chalk_ir::DebruijnIndex,
) -> chalk_ir::Fallible<chalk_ir::Lifetime<Interner>> {
Ok(if var < self.highest_known_var {
var.to_lifetime(Interner)
} else {
self.table.new_lifetime_var()
})
}
fn fold_inference_const(
&mut self,
ty: chalk_ir::Ty<Interner>,
var: chalk_ir::InferenceVar,
_outer_binder: chalk_ir::DebruijnIndex,
) -> chalk_ir::Fallible<chalk_ir::Const<Interner>> {
Ok(if var < self.highest_known_var {
var.to_const(Interner, ty)
} else {
self.table.new_const_var(ty)
})
}
}
let snapshot = self.snapshot();
let highest_known_var = self.new_type_var().inference_var(Interner).expect("inference_var");
let result = f(self);
self.rollback_to(snapshot);
result
.fold_with(&mut VarFudger { table: self, highest_known_var }, DebruijnIndex::INNERMOST)
.expect("fold_with with VarFudger")
}
/// This checks whether any of the free variables in the `canonicalized`
/// have changed (either been unified with another variable, or with a
/// value). If this is not the case, we don't need to try to solve the goal
/// again -- it'll give the same result as last time.
fn check_changed(&mut self, canonicalized: &Canonicalized<InEnvironment<Goal>>) -> bool {
canonicalized.free_vars.iter().any(|var| {
let iv = match var.data(Interner) {
chalk_ir::GenericArgData::Ty(ty) => ty.inference_var(Interner),
chalk_ir::GenericArgData::Lifetime(lt) => lt.inference_var(Interner),
chalk_ir::GenericArgData::Const(c) => c.inference_var(Interner),
}
.expect("free var is not inference var");
if self.var_unification_table.probe_var(iv).is_some() {
return true;
}
let root = self.var_unification_table.inference_var_root(iv);
iv != root
})
}
fn try_resolve_obligation(
&mut self,
canonicalized: &Canonicalized<InEnvironment<Goal>>,
) -> bool {
let solution = self.db.trait_solve(self.trait_env.krate, canonicalized.value.clone());
match solution {
Some(Solution::Unique(canonical_subst)) => {
canonicalized.apply_solution(
self,
Canonical {
binders: canonical_subst.binders,
// FIXME: handle constraints
value: canonical_subst.value.subst,
},
);
true
}
Some(Solution::Ambig(Guidance::Definite(substs))) => {
canonicalized.apply_solution(self, substs);
false
}
Some(_) => {
// FIXME use this when trying to resolve everything at the end
false
}
None => {
// FIXME obligation cannot be fulfilled => diagnostic
true
}
}
}
pub(crate) fn callable_sig(&mut self, ty: &Ty, num_args: usize) -> Option<(Vec<Ty>, Ty)> {
match ty.callable_sig(self.db) {
Some(sig) => Some((sig.params().to_vec(), sig.ret().clone())),
None => self.callable_sig_from_fn_trait(ty, num_args),
}
}
fn callable_sig_from_fn_trait(&mut self, ty: &Ty, num_args: usize) -> Option<(Vec<Ty>, Ty)> {
let krate = self.trait_env.krate;
let fn_once_trait = FnTrait::FnOnce.get_id(self.db, krate)?;
let output_assoc_type =
self.db.trait_data(fn_once_trait).associated_type_by_name(&name![Output])?;
let mut arg_tys = vec![];
let arg_ty = TyBuilder::tuple(num_args)
.fill(|x| {
let arg = match x {
ParamKind::Type => self.new_type_var(),
ParamKind::Const(ty) => {
never!("Tuple with const parameter");
return GenericArgData::Const(self.new_const_var(ty.clone()))
.intern(Interner);
}
};
arg_tys.push(arg.clone());
GenericArgData::Ty(arg).intern(Interner)
})
.build();
let projection = {
let b = TyBuilder::assoc_type_projection(self.db, output_assoc_type);
if b.remaining() != 2 {
return None;
}
b.push(ty.clone()).push(arg_ty).build()
};
let trait_env = self.trait_env.env.clone();
let obligation = InEnvironment {
goal: projection.trait_ref(self.db).cast(Interner),
environment: trait_env,
};
let canonical = self.canonicalize(obligation.clone());
if self.db.trait_solve(krate, canonical.value.cast(Interner)).is_some() {
self.register_obligation(obligation.goal);
let return_ty = self.normalize_projection_ty(projection);
Some((arg_tys, return_ty))
} else {
None
}
}
}
impl<'a> fmt::Debug for InferenceTable<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("InferenceTable").field("num_vars", &self.type_variable_table.len()).finish()
}
}
mod resolve {
use super::InferenceTable;
use crate::{
ConcreteConst, Const, ConstData, ConstValue, DebruijnIndex, GenericArg, InferenceVar,
Interner, Lifetime, Ty, TyVariableKind, VariableKind,
};
use chalk_ir::{
cast::Cast,
fold::{Fold, Folder},
Fallible, NoSolution,
};
use hir_def::type_ref::ConstScalar;
pub(super) struct Resolver<'a, 'b, F> {
pub(super) table: &'a mut InferenceTable<'b>,
pub(super) var_stack: &'a mut Vec<InferenceVar>,
pub(super) fallback: F,
}
impl<'a, 'b, 'i, F> Folder<Interner> for Resolver<'a, 'b, F>
where
F: Fn(InferenceVar, VariableKind, GenericArg, DebruijnIndex) -> GenericArg + 'i,
{
type Error = NoSolution;
fn as_dyn(&mut self) -> &mut dyn Folder<Interner, Error = Self::Error> {
self
}
fn interner(&self) -> Interner {
Interner
}
fn fold_inference_ty(
&mut self,
var: InferenceVar,
kind: TyVariableKind,
outer_binder: DebruijnIndex,
) -> Fallible<Ty> {
let var = self.table.var_unification_table.inference_var_root(var);
if self.var_stack.contains(&var) {
// recursive type
let default = self.table.fallback_value(var, kind).cast(Interner);
return Ok((self.fallback)(var, VariableKind::Ty(kind), default, outer_binder)
.assert_ty_ref(Interner)
.clone());
}
let result = if let Some(known_ty) = self.table.var_unification_table.probe_var(var) {
// known_ty may contain other variables that are known by now
self.var_stack.push(var);
let result =
known_ty.fold_with(self, outer_binder).expect("fold failed unexpectedly");
self.var_stack.pop();
result.assert_ty_ref(Interner).clone()
} else {
let default = self.table.fallback_value(var, kind).cast(Interner);
(self.fallback)(var, VariableKind::Ty(kind), default, outer_binder)
.assert_ty_ref(Interner)
.clone()
};
Ok(result)
}
fn fold_inference_const(
&mut self,
ty: Ty,
var: InferenceVar,
outer_binder: DebruijnIndex,
) -> Fallible<Const> {
let var = self.table.var_unification_table.inference_var_root(var);
let default = ConstData {
ty: ty.clone(),
value: ConstValue::Concrete(ConcreteConst { interned: ConstScalar::Unknown }),
}
.intern(Interner)
.cast(Interner);
if self.var_stack.contains(&var) {
// recursive
return Ok((self.fallback)(var, VariableKind::Const(ty), default, outer_binder)
.assert_const_ref(Interner)
.clone());
}
let result = if let Some(known_ty) = self.table.var_unification_table.probe_var(var) {
// known_ty may contain other variables that are known by now
self.var_stack.push(var);
let result =
known_ty.fold_with(self, outer_binder).expect("fold failed unexpectedly");
self.var_stack.pop();
result.assert_const_ref(Interner).clone()
} else {
(self.fallback)(var, VariableKind::Const(ty), default, outer_binder)
.assert_const_ref(Interner)
.clone()
};
Ok(result)
}
fn fold_inference_lifetime(
&mut self,
_var: InferenceVar,
_outer_binder: DebruijnIndex,
) -> Fallible<Lifetime> {
// fall back all lifetimes to 'static -- currently we don't deal
// with any lifetimes, but we can sometimes get some lifetime
// variables through Chalk's unification, and this at least makes
// sure we don't leak them outside of inference
Ok(crate::static_lifetime())
}
}
}
Snapshot obligations
//! Unification and canonicalization logic.
use std::{fmt, mem, sync::Arc};
use chalk_ir::{
cast::Cast, fold::Fold, interner::HasInterner, zip::Zip, FloatTy, IntTy, NoSolution,
TyVariableKind, UniverseIndex,
};
use chalk_solve::infer::ParameterEnaVariableExt;
use ena::unify::UnifyKey;
use hir_expand::name;
use stdx::never;
use super::{InferOk, InferResult, InferenceContext, TypeError};
use crate::{
db::HirDatabase, fold_tys, static_lifetime, traits::FnTrait, AliasEq, AliasTy, BoundVar,
Canonical, Const, DebruijnIndex, GenericArg, GenericArgData, Goal, Guidance, InEnvironment,
InferenceVar, Interner, Lifetime, ParamKind, ProjectionTy, ProjectionTyExt, Scalar, Solution,
Substitution, TraitEnvironment, Ty, TyBuilder, TyExt, TyKind, VariableKind,
};
impl<'a> InferenceContext<'a> {
pub(super) fn canonicalize<T: Fold<Interner> + HasInterner<Interner = Interner>>(
&mut self,
t: T,
) -> Canonicalized<T::Result>
where
T::Result: HasInterner<Interner = Interner>,
{
self.table.canonicalize(t)
}
}
#[derive(Debug, Clone)]
pub(crate) struct Canonicalized<T>
where
T: HasInterner<Interner = Interner>,
{
pub(crate) value: Canonical<T>,
free_vars: Vec<GenericArg>,
}
impl<T: HasInterner<Interner = Interner>> Canonicalized<T> {
pub(super) fn apply_solution(
&self,
ctx: &mut InferenceTable,
solution: Canonical<Substitution>,
) {
// the solution may contain new variables, which we need to convert to new inference vars
let new_vars = Substitution::from_iter(
Interner,
solution.binders.iter(Interner).map(|k| match &k.kind {
VariableKind::Ty(TyVariableKind::General) => ctx.new_type_var().cast(Interner),
VariableKind::Ty(TyVariableKind::Integer) => ctx.new_integer_var().cast(Interner),
VariableKind::Ty(TyVariableKind::Float) => ctx.new_float_var().cast(Interner),
// Chalk can sometimes return new lifetime variables. We just use the static lifetime everywhere
VariableKind::Lifetime => static_lifetime().cast(Interner),
VariableKind::Const(ty) => ctx.new_const_var(ty.clone()).cast(Interner),
}),
);
for (i, v) in solution.value.iter(Interner).enumerate() {
let var = self.free_vars[i].clone();
if let Some(ty) = v.ty(Interner) {
// eagerly replace projections in the type; we may be getting types
// e.g. from where clauses where this hasn't happened yet
let ty = ctx.normalize_associated_types_in(new_vars.apply(ty.clone(), Interner));
ctx.unify(var.assert_ty_ref(Interner), &ty);
} else {
let _ = ctx.try_unify(&var, &new_vars.apply(v.clone(), Interner));
}
}
}
}
pub fn could_unify(
db: &dyn HirDatabase,
env: Arc<TraitEnvironment>,
tys: &Canonical<(Ty, Ty)>,
) -> bool {
unify(db, env, tys).is_some()
}
pub(crate) fn unify(
db: &dyn HirDatabase,
env: Arc<TraitEnvironment>,
tys: &Canonical<(Ty, Ty)>,
) -> Option<Substitution> {
let mut table = InferenceTable::new(db, env);
let vars = Substitution::from_iter(
Interner,
tys.binders.iter(Interner).map(|x| match &x.kind {
chalk_ir::VariableKind::Ty(_) => {
GenericArgData::Ty(table.new_type_var()).intern(Interner)
}
chalk_ir::VariableKind::Lifetime => {
GenericArgData::Ty(table.new_type_var()).intern(Interner)
} // FIXME: maybe wrong?
chalk_ir::VariableKind::Const(ty) => {
GenericArgData::Const(table.new_const_var(ty.clone())).intern(Interner)
}
}),
);
let ty1_with_vars = vars.apply(tys.value.0.clone(), Interner);
let ty2_with_vars = vars.apply(tys.value.1.clone(), Interner);
if !table.unify(&ty1_with_vars, &ty2_with_vars) {
return None;
}
// default any type vars that weren't unified back to their original bound vars
// (kind of hacky)
let find_var = |iv| {
vars.iter(Interner).position(|v| match v.interned() {
chalk_ir::GenericArgData::Ty(ty) => ty.inference_var(Interner),
chalk_ir::GenericArgData::Lifetime(lt) => lt.inference_var(Interner),
chalk_ir::GenericArgData::Const(c) => c.inference_var(Interner),
} == Some(iv))
};
let fallback = |iv, kind, default, binder| match kind {
chalk_ir::VariableKind::Ty(_ty_kind) => find_var(iv)
.map_or(default, |i| BoundVar::new(binder, i).to_ty(Interner).cast(Interner)),
chalk_ir::VariableKind::Lifetime => find_var(iv)
.map_or(default, |i| BoundVar::new(binder, i).to_lifetime(Interner).cast(Interner)),
chalk_ir::VariableKind::Const(ty) => find_var(iv)
.map_or(default, |i| BoundVar::new(binder, i).to_const(Interner, ty).cast(Interner)),
};
Some(Substitution::from_iter(
Interner,
vars.iter(Interner).map(|v| table.resolve_with_fallback(v.clone(), &fallback)),
))
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct TypeVariableData {
diverging: bool,
}
type ChalkInferenceTable = chalk_solve::infer::InferenceTable<Interner>;
#[derive(Clone)]
pub(crate) struct InferenceTable<'a> {
pub(crate) db: &'a dyn HirDatabase,
pub(crate) trait_env: Arc<TraitEnvironment>,
var_unification_table: ChalkInferenceTable,
type_variable_table: Vec<TypeVariableData>,
pending_obligations: Vec<Canonicalized<InEnvironment<Goal>>>,
}
pub(crate) struct InferenceTableSnapshot {
var_table_snapshot: chalk_solve::infer::InferenceSnapshot<Interner>,
pending_obligations: Vec<Canonicalized<InEnvironment<Goal>>>,
type_variable_table_snapshot: Vec<TypeVariableData>,
}
impl<'a> InferenceTable<'a> {
pub(crate) fn new(db: &'a dyn HirDatabase, trait_env: Arc<TraitEnvironment>) -> Self {
InferenceTable {
db,
trait_env,
var_unification_table: ChalkInferenceTable::new(),
type_variable_table: Vec::new(),
pending_obligations: Vec::new(),
}
}
/// Chalk doesn't know about the `diverging` flag, so when it unifies two
/// type variables of which one is diverging, the chosen root might not be
/// diverging and we have no way of marking it as such at that time. This
/// function goes through all type variables and make sure their root is
/// marked as diverging if necessary, so that resolving them gives the right
/// result.
pub(super) fn propagate_diverging_flag(&mut self) {
for i in 0..self.type_variable_table.len() {
if !self.type_variable_table[i].diverging {
continue;
}
let v = InferenceVar::from(i as u32);
let root = self.var_unification_table.inference_var_root(v);
if let Some(data) = self.type_variable_table.get_mut(root.index() as usize) {
data.diverging = true;
}
}
}
pub(super) fn set_diverging(&mut self, iv: InferenceVar, diverging: bool) {
self.type_variable_table[iv.index() as usize].diverging = diverging;
}
fn fallback_value(&self, iv: InferenceVar, kind: TyVariableKind) -> Ty {
match kind {
_ if self
.type_variable_table
.get(iv.index() as usize)
.map_or(false, |data| data.diverging) =>
{
TyKind::Never
}
TyVariableKind::General => TyKind::Error,
TyVariableKind::Integer => TyKind::Scalar(Scalar::Int(IntTy::I32)),
TyVariableKind::Float => TyKind::Scalar(Scalar::Float(FloatTy::F64)),
}
.intern(Interner)
}
pub(crate) fn canonicalize<T: Fold<Interner> + HasInterner<Interner = Interner>>(
&mut self,
t: T,
) -> Canonicalized<T::Result>
where
T::Result: HasInterner<Interner = Interner>,
{
// try to resolve obligations before canonicalizing, since this might
// result in new knowledge about variables
self.resolve_obligations_as_possible();
let result = self.var_unification_table.canonicalize(Interner, t);
let free_vars = result
.free_vars
.into_iter()
.map(|free_var| free_var.to_generic_arg(Interner))
.collect();
Canonicalized { value: result.quantified, free_vars }
}
/// Recurses through the given type, normalizing associated types mentioned
/// in it by replacing them by type variables and registering obligations to
/// resolve later. This should be done once for every type we get from some
/// type annotation (e.g. from a let type annotation, field type or function
/// call). `make_ty` handles this already, but e.g. for field types we need
/// to do it as well.
pub(crate) fn normalize_associated_types_in(&mut self, ty: Ty) -> Ty {
fold_tys(
ty,
|ty, _| match ty.kind(Interner) {
TyKind::Alias(AliasTy::Projection(proj_ty)) => {
self.normalize_projection_ty(proj_ty.clone())
}
_ => ty,
},
DebruijnIndex::INNERMOST,
)
}
pub(crate) fn normalize_projection_ty(&mut self, proj_ty: ProjectionTy) -> Ty {
let var = self.new_type_var();
let alias_eq = AliasEq { alias: AliasTy::Projection(proj_ty), ty: var.clone() };
let obligation = alias_eq.cast(Interner);
self.register_obligation(obligation);
var
}
fn extend_type_variable_table(&mut self, to_index: usize) {
self.type_variable_table.extend(
(0..1 + to_index - self.type_variable_table.len())
.map(|_| TypeVariableData { diverging: false }),
);
}
fn new_var(&mut self, kind: TyVariableKind, diverging: bool) -> Ty {
let var = self.var_unification_table.new_variable(UniverseIndex::ROOT);
// Chalk might have created some type variables for its own purposes that we don't know about...
self.extend_type_variable_table(var.index() as usize);
assert_eq!(var.index() as usize, self.type_variable_table.len() - 1);
self.type_variable_table[var.index() as usize].diverging = diverging;
var.to_ty_with_kind(Interner, kind)
}
pub(crate) fn new_type_var(&mut self) -> Ty {
self.new_var(TyVariableKind::General, false)
}
pub(crate) fn new_integer_var(&mut self) -> Ty {
self.new_var(TyVariableKind::Integer, false)
}
pub(crate) fn new_float_var(&mut self) -> Ty {
self.new_var(TyVariableKind::Float, false)
}
pub(crate) fn new_maybe_never_var(&mut self) -> Ty {
self.new_var(TyVariableKind::General, true)
}
pub(crate) fn new_const_var(&mut self, ty: Ty) -> Const {
let var = self.var_unification_table.new_variable(UniverseIndex::ROOT);
var.to_const(Interner, ty)
}
pub(crate) fn new_lifetime_var(&mut self) -> Lifetime {
let var = self.var_unification_table.new_variable(UniverseIndex::ROOT);
var.to_lifetime(Interner)
}
pub(crate) fn resolve_with_fallback<T>(
&mut self,
t: T,
fallback: &dyn Fn(InferenceVar, VariableKind, GenericArg, DebruijnIndex) -> GenericArg,
) -> T::Result
where
T: HasInterner<Interner = Interner> + Fold<Interner>,
{
self.resolve_with_fallback_inner(&mut Vec::new(), t, &fallback)
}
pub(crate) fn instantiate_canonical<T>(&mut self, canonical: Canonical<T>) -> T::Result
where
T: HasInterner<Interner = Interner> + Fold<Interner> + std::fmt::Debug,
{
self.var_unification_table.instantiate_canonical(Interner, canonical)
}
fn resolve_with_fallback_inner<T>(
&mut self,
var_stack: &mut Vec<InferenceVar>,
t: T,
fallback: &dyn Fn(InferenceVar, VariableKind, GenericArg, DebruijnIndex) -> GenericArg,
) -> T::Result
where
T: HasInterner<Interner = Interner> + Fold<Interner>,
{
t.fold_with(
&mut resolve::Resolver { table: self, var_stack, fallback },
DebruijnIndex::INNERMOST,
)
.expect("fold failed unexpectedly")
}
pub(crate) fn resolve_completely<T>(&mut self, t: T) -> T::Result
where
T: HasInterner<Interner = Interner> + Fold<Interner>,
{
self.resolve_with_fallback(t, &|_, _, d, _| d)
}
/// Unify two types and register new trait goals that arise from that.
pub(crate) fn unify(&mut self, ty1: &Ty, ty2: &Ty) -> bool {
let result = match self.try_unify(ty1, ty2) {
Ok(r) => r,
Err(_) => return false,
};
self.register_infer_ok(result);
true
}
/// Unify two types and return new trait goals arising from it, so the
/// caller needs to deal with them.
pub(crate) fn try_unify<T: Zip<Interner>>(&mut self, t1: &T, t2: &T) -> InferResult<()> {
match self.var_unification_table.relate(
Interner,
&self.db,
&self.trait_env.env,
chalk_ir::Variance::Invariant,
t1,
t2,
) {
Ok(result) => Ok(InferOk { goals: result.goals, value: () }),
Err(chalk_ir::NoSolution) => Err(TypeError),
}
}
/// If `ty` is a type variable with known type, returns that type;
/// otherwise, return ty.
pub(crate) fn resolve_ty_shallow(&mut self, ty: &Ty) -> Ty {
self.resolve_obligations_as_possible();
self.var_unification_table.normalize_ty_shallow(Interner, ty).unwrap_or_else(|| ty.clone())
}
pub(crate) fn snapshot(&mut self) -> InferenceTableSnapshot {
let var_table_snapshot = self.var_unification_table.snapshot();
let type_variable_table_snapshot = self.type_variable_table.clone();
let pending_obligations = self.pending_obligations.clone();
InferenceTableSnapshot {
var_table_snapshot,
pending_obligations,
type_variable_table_snapshot,
}
}
pub(crate) fn rollback_to(&mut self, snapshot: InferenceTableSnapshot) {
self.var_unification_table.rollback_to(snapshot.var_table_snapshot);
self.type_variable_table = snapshot.type_variable_table_snapshot;
self.pending_obligations = snapshot.pending_obligations;
}
/// Checks an obligation without registering it. Useful mostly to check
/// whether a trait *might* be implemented before deciding to 'lock in' the
/// choice (during e.g. method resolution or deref).
pub(crate) fn try_obligation(&mut self, goal: Goal) -> Option<Solution> {
let in_env = InEnvironment::new(&self.trait_env.env, goal);
let canonicalized = self.canonicalize(in_env);
let solution = self.db.trait_solve(self.trait_env.krate, canonicalized.value);
solution
}
pub(crate) fn register_obligation(&mut self, goal: Goal) {
let in_env = InEnvironment::new(&self.trait_env.env, goal);
self.register_obligation_in_env(in_env)
}
fn register_obligation_in_env(&mut self, goal: InEnvironment<Goal>) {
let canonicalized = self.canonicalize(goal);
if !self.try_resolve_obligation(&canonicalized) {
self.pending_obligations.push(canonicalized);
}
}
pub(crate) fn register_infer_ok<T>(&mut self, infer_ok: InferOk<T>) {
infer_ok.goals.into_iter().for_each(|goal| self.register_obligation_in_env(goal));
}
pub(crate) fn resolve_obligations_as_possible(&mut self) {
let _span = profile::span("resolve_obligations_as_possible");
let mut changed = true;
let mut obligations = Vec::new();
while changed {
changed = false;
mem::swap(&mut self.pending_obligations, &mut obligations);
for canonicalized in obligations.drain(..) {
if !self.check_changed(&canonicalized) {
self.pending_obligations.push(canonicalized);
continue;
}
changed = true;
let uncanonical = chalk_ir::Substitute::apply(
&canonicalized.free_vars,
canonicalized.value.value,
Interner,
);
self.register_obligation_in_env(uncanonical);
}
}
}
pub(crate) fn fudge_inference<T: Fold<Interner>>(
&mut self,
f: impl FnOnce(&mut Self) -> T,
) -> T::Result {
use chalk_ir::fold::Folder;
struct VarFudger<'a, 'b> {
table: &'a mut InferenceTable<'b>,
highest_known_var: InferenceVar,
}
impl<'a, 'b> Folder<Interner> for VarFudger<'a, 'b> {
type Error = NoSolution;
fn as_dyn(&mut self) -> &mut dyn Folder<Interner, Error = Self::Error> {
self
}
fn interner(&self) -> Interner {
Interner
}
fn fold_inference_ty(
&mut self,
var: chalk_ir::InferenceVar,
kind: TyVariableKind,
_outer_binder: chalk_ir::DebruijnIndex,
) -> chalk_ir::Fallible<chalk_ir::Ty<Interner>> {
Ok(if var < self.highest_known_var {
var.to_ty(Interner, kind)
} else {
self.table.new_type_var()
})
}
fn fold_inference_lifetime(
&mut self,
var: chalk_ir::InferenceVar,
_outer_binder: chalk_ir::DebruijnIndex,
) -> chalk_ir::Fallible<chalk_ir::Lifetime<Interner>> {
Ok(if var < self.highest_known_var {
var.to_lifetime(Interner)
} else {
self.table.new_lifetime_var()
})
}
fn fold_inference_const(
&mut self,
ty: chalk_ir::Ty<Interner>,
var: chalk_ir::InferenceVar,
_outer_binder: chalk_ir::DebruijnIndex,
) -> chalk_ir::Fallible<chalk_ir::Const<Interner>> {
Ok(if var < self.highest_known_var {
var.to_const(Interner, ty)
} else {
self.table.new_const_var(ty)
})
}
}
let snapshot = self.snapshot();
let highest_known_var = self.new_type_var().inference_var(Interner).expect("inference_var");
let result = f(self);
self.rollback_to(snapshot);
result
.fold_with(&mut VarFudger { table: self, highest_known_var }, DebruijnIndex::INNERMOST)
.expect("fold_with with VarFudger")
}
/// This checks whether any of the free variables in the `canonicalized`
/// have changed (either been unified with another variable, or with a
/// value). If this is not the case, we don't need to try to solve the goal
/// again -- it'll give the same result as last time.
fn check_changed(&mut self, canonicalized: &Canonicalized<InEnvironment<Goal>>) -> bool {
canonicalized.free_vars.iter().any(|var| {
let iv = match var.data(Interner) {
chalk_ir::GenericArgData::Ty(ty) => ty.inference_var(Interner),
chalk_ir::GenericArgData::Lifetime(lt) => lt.inference_var(Interner),
chalk_ir::GenericArgData::Const(c) => c.inference_var(Interner),
}
.expect("free var is not inference var");
if self.var_unification_table.probe_var(iv).is_some() {
return true;
}
let root = self.var_unification_table.inference_var_root(iv);
iv != root
})
}
fn try_resolve_obligation(
&mut self,
canonicalized: &Canonicalized<InEnvironment<Goal>>,
) -> bool {
let solution = self.db.trait_solve(self.trait_env.krate, canonicalized.value.clone());
match solution {
Some(Solution::Unique(canonical_subst)) => {
canonicalized.apply_solution(
self,
Canonical {
binders: canonical_subst.binders,
// FIXME: handle constraints
value: canonical_subst.value.subst,
},
);
true
}
Some(Solution::Ambig(Guidance::Definite(substs))) => {
canonicalized.apply_solution(self, substs);
false
}
Some(_) => {
// FIXME use this when trying to resolve everything at the end
false
}
None => {
// FIXME obligation cannot be fulfilled => diagnostic
true
}
}
}
pub(crate) fn callable_sig(&mut self, ty: &Ty, num_args: usize) -> Option<(Vec<Ty>, Ty)> {
match ty.callable_sig(self.db) {
Some(sig) => Some((sig.params().to_vec(), sig.ret().clone())),
None => self.callable_sig_from_fn_trait(ty, num_args),
}
}
fn callable_sig_from_fn_trait(&mut self, ty: &Ty, num_args: usize) -> Option<(Vec<Ty>, Ty)> {
let krate = self.trait_env.krate;
let fn_once_trait = FnTrait::FnOnce.get_id(self.db, krate)?;
let output_assoc_type =
self.db.trait_data(fn_once_trait).associated_type_by_name(&name![Output])?;
let mut arg_tys = vec![];
let arg_ty = TyBuilder::tuple(num_args)
.fill(|x| {
let arg = match x {
ParamKind::Type => self.new_type_var(),
ParamKind::Const(ty) => {
never!("Tuple with const parameter");
return GenericArgData::Const(self.new_const_var(ty.clone()))
.intern(Interner);
}
};
arg_tys.push(arg.clone());
GenericArgData::Ty(arg).intern(Interner)
})
.build();
let projection = {
let b = TyBuilder::assoc_type_projection(self.db, output_assoc_type);
if b.remaining() != 2 {
return None;
}
b.push(ty.clone()).push(arg_ty).build()
};
let trait_env = self.trait_env.env.clone();
let obligation = InEnvironment {
goal: projection.trait_ref(self.db).cast(Interner),
environment: trait_env,
};
let canonical = self.canonicalize(obligation.clone());
if self.db.trait_solve(krate, canonical.value.cast(Interner)).is_some() {
self.register_obligation(obligation.goal);
let return_ty = self.normalize_projection_ty(projection);
Some((arg_tys, return_ty))
} else {
None
}
}
}
impl<'a> fmt::Debug for InferenceTable<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("InferenceTable").field("num_vars", &self.type_variable_table.len()).finish()
}
}
mod resolve {
use super::InferenceTable;
use crate::{
ConcreteConst, Const, ConstData, ConstValue, DebruijnIndex, GenericArg, InferenceVar,
Interner, Lifetime, Ty, TyVariableKind, VariableKind,
};
use chalk_ir::{
cast::Cast,
fold::{Fold, Folder},
Fallible, NoSolution,
};
use hir_def::type_ref::ConstScalar;
pub(super) struct Resolver<'a, 'b, F> {
pub(super) table: &'a mut InferenceTable<'b>,
pub(super) var_stack: &'a mut Vec<InferenceVar>,
pub(super) fallback: F,
}
impl<'a, 'b, 'i, F> Folder<Interner> for Resolver<'a, 'b, F>
where
F: Fn(InferenceVar, VariableKind, GenericArg, DebruijnIndex) -> GenericArg + 'i,
{
type Error = NoSolution;
fn as_dyn(&mut self) -> &mut dyn Folder<Interner, Error = Self::Error> {
self
}
fn interner(&self) -> Interner {
Interner
}
fn fold_inference_ty(
&mut self,
var: InferenceVar,
kind: TyVariableKind,
outer_binder: DebruijnIndex,
) -> Fallible<Ty> {
let var = self.table.var_unification_table.inference_var_root(var);
if self.var_stack.contains(&var) {
// recursive type
let default = self.table.fallback_value(var, kind).cast(Interner);
return Ok((self.fallback)(var, VariableKind::Ty(kind), default, outer_binder)
.assert_ty_ref(Interner)
.clone());
}
let result = if let Some(known_ty) = self.table.var_unification_table.probe_var(var) {
// known_ty may contain other variables that are known by now
self.var_stack.push(var);
let result =
known_ty.fold_with(self, outer_binder).expect("fold failed unexpectedly");
self.var_stack.pop();
result.assert_ty_ref(Interner).clone()
} else {
let default = self.table.fallback_value(var, kind).cast(Interner);
(self.fallback)(var, VariableKind::Ty(kind), default, outer_binder)
.assert_ty_ref(Interner)
.clone()
};
Ok(result)
}
fn fold_inference_const(
&mut self,
ty: Ty,
var: InferenceVar,
outer_binder: DebruijnIndex,
) -> Fallible<Const> {
let var = self.table.var_unification_table.inference_var_root(var);
let default = ConstData {
ty: ty.clone(),
value: ConstValue::Concrete(ConcreteConst { interned: ConstScalar::Unknown }),
}
.intern(Interner)
.cast(Interner);
if self.var_stack.contains(&var) {
// recursive
return Ok((self.fallback)(var, VariableKind::Const(ty), default, outer_binder)
.assert_const_ref(Interner)
.clone());
}
let result = if let Some(known_ty) = self.table.var_unification_table.probe_var(var) {
// known_ty may contain other variables that are known by now
self.var_stack.push(var);
let result =
known_ty.fold_with(self, outer_binder).expect("fold failed unexpectedly");
self.var_stack.pop();
result.assert_const_ref(Interner).clone()
} else {
(self.fallback)(var, VariableKind::Const(ty), default, outer_binder)
.assert_const_ref(Interner)
.clone()
};
Ok(result)
}
fn fold_inference_lifetime(
&mut self,
_var: InferenceVar,
_outer_binder: DebruijnIndex,
) -> Fallible<Lifetime> {
// fall back all lifetimes to 'static -- currently we don't deal
// with any lifetimes, but we can sometimes get some lifetime
// variables through Chalk's unification, and this at least makes
// sure we don't leak them outside of inference
Ok(crate::static_lifetime())
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.