CombinedText stringlengths 4 3.42M |
|---|
use collections::string::ToString;
use core::ops::*;
use bounded::Bounded;
use to_primitive::ToPrimitive;
use from_primitive::FromPrimitive;
use round::Round;
use one::One;
use sqrt::Sqrt;
use trig::Trig;
use zero::Zero;
pub trait Num:
Copy + One + Zero
+ Bounded
+ ToPrimitive
+ FromPrimitive
+ Trig
+ Sqrt
+ Round
+ PartialEq
+ PartialOrd
+ ToString
+ Add<Self, Output = Self>
+ Mul<Self, Output = Self>
+ Sub<Self, Output = Self>
+ Div<Self, Output = Self>
+ Rem<Self, Output = Self>
+ AddAssign<Self>
+ MulAssign<Self>
+ SubAssign<Self>
+ DivAssign<Self>
+ RemAssign<Self>
{
/// # Examples
/// ~~~
/// use num::Num;
///
/// assert_eq!((-50).clamp(&0, &100), 0);
/// assert_eq!(50.clamp(&0, &100), 50);
/// assert_eq!(150.clamp(&0, &100), 100);
/// ~~~
#[inline(always)]
fn clamp(&self, min: &Self, max: &Self) -> Self {
if self < min {
*min
} else if self > max {
*max
} else {
*self
}
}
/// # Examples
/// ~~~
/// use num::Num;
///
/// assert_eq!((-0.5).clamp01(), 0.0);
/// assert_eq!(0.5.clamp01(), 0.5);
/// assert_eq!(1.50.clamp01(), 1.0);
/// ~~~
#[inline(always)]
fn clamp01(&self) -> Self {
self.clamp(&Zero::zero(), &One::one())
}
}
impl<T> Num for T where T:
Copy + One + Zero
+ Bounded
+ ToPrimitive
+ FromPrimitive
+ Trig
+ Sqrt
+ Round
+ PartialEq
+ PartialOrd
+ ToString
+ Add<T, Output = T>
+ Mul<T, Output = T>
+ Sub<T, Output = T>
+ Div<T, Output = T>
+ Rem<T, Output = T>
+ AddAssign<T>
+ MulAssign<T>
+ SubAssign<T>
+ DivAssign<T>
+ RemAssign<T> {}
clean up
use collections::string::ToString;
use core::ops::*;
use bounded::Bounded;
use to_primitive::ToPrimitive;
use from_primitive::FromPrimitive;
use round::Round;
use one::One;
use sqrt::Sqrt;
use trig::Trig;
use zero::Zero;
pub trait Num:
Copy + One + Zero
+ Bounded
+ ToPrimitive
+ FromPrimitive
+ Trig
+ Sqrt
+ Round
+ PartialEq
+ PartialOrd
+ ToString
+ Add<Self, Output = Self>
+ Mul<Self, Output = Self>
+ Sub<Self, Output = Self>
+ Div<Self, Output = Self>
+ Rem<Self, Output = Self>
+ AddAssign<Self>
+ MulAssign<Self>
+ SubAssign<Self>
+ DivAssign<Self>
+ RemAssign<Self>
{
/// # Examples
/// ~~~
/// use num::Num;
///
/// assert_eq!(50.min(&100), 50);
/// assert_eq!(100.min(&50), 50);
/// ~~~
#[inline]
fn min(&self, other: &Self) -> Self {
if self < other {
*self
} else {
*other
}
}
/// # Examples
/// ~~~
/// use num::Num;
///
/// assert_eq!(50.max(&100), 100);
/// assert_eq!(100.max(&50), 100);
/// ~~~
#[inline]
fn max(&self, other: &Self) -> Self {
if self > other {
*self
} else {
*other
}
}
/// # Examples
/// ~~~
/// use num::Num;
///
/// assert_eq!((-50).clamp(&0, &100), 0);
/// assert_eq!(50.clamp(&0, &100), 50);
/// assert_eq!(150.clamp(&0, &100), 100);
/// ~~~
#[inline]
fn clamp(&self, min: &Self, max: &Self) -> Self {
self.min(max).max(min)
}
/// # Examples
/// ~~~
/// use num::Num;
///
/// assert_eq!((-0.5).clamp01(), 0.0);
/// assert_eq!(0.5.clamp01(), 0.5);
/// assert_eq!(1.50.clamp01(), 1.0);
/// ~~~
#[inline(always)]
fn clamp01(&self) -> Self {
self.clamp(&Zero::zero(), &One::one())
}
}
impl<T> Num for T where T:
Copy + One + Zero
+ Bounded
+ ToPrimitive
+ FromPrimitive
+ Trig
+ Sqrt
+ Round
+ PartialEq
+ PartialOrd
+ ToString
+ Add<T, Output = T>
+ Mul<T, Output = T>
+ Sub<T, Output = T>
+ Div<T, Output = T>
+ Rem<T, Output = T>
+ AddAssign<T>
+ MulAssign<T>
+ SubAssign<T>
+ DivAssign<T>
+ RemAssign<T> {}
|
use libc;
use std::{fmt, io, error, result};
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use byteorder::{ByteOrder, BigEndian, LittleEndian};
use crypto::digest::Digest;
use crypto::md5::Md5;
use rand::{Rng, OsRng};
use rustc_serialize::hex::{self, FromHex, ToHex};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use time;
const TIMESTAMP_SIZE: usize = 4;
const MACHINE_ID_SIZE: usize = 3;
const PROCESS_ID_SIZE: usize = 2;
const COUNTER_SIZE: usize = 3;
const TIMESTAMP_OFFSET: usize = 0;
const MACHINE_ID_OFFSET: usize = TIMESTAMP_OFFSET + TIMESTAMP_SIZE;
const PROCESS_ID_OFFSET: usize = MACHINE_ID_OFFSET + MACHINE_ID_SIZE;
const COUNTER_OFFSET: usize = PROCESS_ID_OFFSET + PROCESS_ID_SIZE;
const MAX_U24: usize = 0xFFFFFF;
static OID_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
static mut MACHINE_BYTES: Option<[u8; 3]> = None;
extern {
fn gethostname(name: *mut libc::c_char, size: libc::size_t) -> libc::c_int;
}
/// Errors that can occur during OID construction and generation.
#[derive(Debug)]
pub enum Error {
ArgumentError(String),
FromHexError(hex::FromHexError),
IoError(io::Error),
HostnameError,
}
impl From<hex::FromHexError> for Error {
fn from(err: hex::FromHexError) -> Error {
Error::FromHexError(err)
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::IoError(err)
}
}
/// Alias for Result<T, oid::Error>.
pub type Result<T> = result::Result<T, Error>;
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
&Error::ArgumentError(ref inner) => inner.fmt(fmt),
&Error::FromHexError(ref inner) => inner.fmt(fmt),
&Error::IoError(ref inner) => inner.fmt(fmt),
&Error::HostnameError => write!(fmt, "Failed to retrieve hostname for OID generation."),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match self {
&Error::ArgumentError(ref inner) => &inner,
&Error::FromHexError(ref inner) => inner.description(),
&Error::IoError(ref inner) => inner.description(),
&Error::HostnameError => "Failed to retrieve hostname for OID generation.",
}
}
fn cause(&self) -> Option<&error::Error> {
match self {
&Error::ArgumentError(_) => None,
&Error::FromHexError(ref inner) => Some(inner),
&Error::IoError(ref inner) => Some(inner),
&Error::HostnameError => None,
}
}
}
/// A wrapper around raw 12-byte ObjectId representations.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)]
pub struct ObjectId {
id: [u8; 12],
}
impl ObjectId {
/// Generates a new ObjectID, represented in bytes.
/// See the [docs](http://docs.mongodb.org/manual/reference/object-id/)
/// for more information.
pub fn new() -> Result<ObjectId> {
let timestamp = ObjectId::gen_timestamp();
let machine_id = try!(ObjectId::gen_machine_id());
let process_id = ObjectId::gen_process_id();
let counter = try!(ObjectId::gen_count());
let mut buf: [u8; 12] = [0; 12];
for i in 0..TIMESTAMP_SIZE { buf[TIMESTAMP_OFFSET + i] = timestamp[i]; }
for i in 0..MACHINE_ID_SIZE { buf[MACHINE_ID_OFFSET + i] = machine_id[i]; }
for i in 0..PROCESS_ID_SIZE { buf[PROCESS_ID_OFFSET + i] = process_id[i]; }
for i in 0..COUNTER_SIZE { buf[COUNTER_OFFSET +i] = counter[i]; }
Ok(ObjectId::with_bytes(buf))
}
/// Constructs a new ObjectId wrapper around the raw byte representation.
pub fn with_bytes(bytes: [u8; 12]) -> ObjectId {
ObjectId {
id: bytes,
}
}
/// Creates an ObjectID using a 12-byte (24-char) hexadecimal string.
pub fn with_string(s: &str) -> Result<ObjectId> {
let bytes = try!(s.from_hex());
if bytes.len() != 12 {
Err(Error::ArgumentError("Provided string must be a 12-byte hexadecimal string.".to_owned()))
} else {
let mut byte_array: [u8; 12] = [0; 12];
for i in 0..12 {
byte_array[i] = bytes[i];
}
Ok(ObjectId::with_bytes(byte_array))
}
}
/// Creates a dummy ObjectId with a specific generation time.
/// This method should only be used to do range queries on a field
/// containing ObjectId instances.
pub fn with_timestamp(time: u32) -> ObjectId {
let mut buf: [u8; 12] = [0; 12];
BigEndian::write_u32(&mut buf, time);
ObjectId::with_bytes(buf)
}
/// Returns the raw byte representation of an ObjectId.
pub fn bytes(&self) -> [u8; 12] {
self.id
}
/// Retrieves the timestamp (seconds since epoch) from an ObjectId.
pub fn timestamp(&self) -> u32 {
BigEndian::read_u32(&self.id)
}
/// Retrieves the machine id associated with an ObjectId.
pub fn machine_id(&self) -> u32 {
let mut buf: [u8; 4] = [0; 4];
for i in 0..MACHINE_ID_SIZE {
buf[i] = self.id[MACHINE_ID_OFFSET+i];
}
LittleEndian::read_u32(&buf)
}
/// Retrieves the process id associated with an ObjectId.
pub fn process_id(&self) -> u16 {
LittleEndian::read_u16(&self.id[PROCESS_ID_OFFSET..])
}
/// Retrieves the increment counter from an ObjectId.
pub fn counter(&self) -> u32 {
let mut buf: [u8; 4] = [0; 4];
for i in 0..COUNTER_SIZE {
buf[i+1] = self.id[COUNTER_OFFSET+i];
}
BigEndian::read_u32(&buf)
}
// Generates a new timestamp representing the current seconds since epoch.
// Represented in Big Endian.
fn gen_timestamp() -> [u8; 4] {
let timespec = time::get_time();
let timestamp = timespec.sec as u32;
let mut buf: [u8; 4] = [0; 4];
BigEndian::write_u32(&mut buf,timestamp);
buf
}
// Generates a new machine id represented as an MD5-hashed 3-byte-encoded hostname string.
// Represented in Little Endian.
fn gen_machine_id() -> Result<[u8; 3]> {
// Short-circuit if machine id has already been calculated.
// Since the generated machine id is not variable, arising race conditions
// will have the same MACHINE_BYTES result.
unsafe {
if let Some(bytes) = MACHINE_BYTES.as_ref() {
return Ok(bytes.clone());
}
}
// Retrieve hostname through libc
let len = 255;
let mut buf = Vec::<u8>::with_capacity(len);
let ptr = buf.as_mut_ptr();
let err = unsafe { gethostname(ptr as *mut libc::c_char, len as libc::size_t) } as i32;
if err != 0 {
return Err(Error::HostnameError);
}
// Convert bytes into string
let s = String::from_utf8_lossy(&buf);
// Hash hostname string
let mut md5 = Md5::new();
md5.input_str(&s.into_owned()[..]);
let hash = md5.result_str();
// Re-convert string to bytes and grab first three
let mut bytes = hash.bytes();
let mut vec: [u8; 3] = [0; 3];
for i in 0..MACHINE_ID_SIZE {
match bytes.next() {
Some(b) => vec[i] = b,
None => break,
}
}
unsafe { MACHINE_BYTES = Some(vec) };
Ok(vec)
}
// Gets the process ID and returns it as a 2-byte array.
// Represented in Little Endian.
fn gen_process_id() -> [u8; 2] {
let pid = unsafe { libc::getpid() as u16 };
let mut buf: [u8; 2] = [0; 2];
LittleEndian::write_u16(&mut buf, pid);
buf
}
// Gets an incremental 3-byte count.
// Represented in Big Endian.
fn gen_count() -> Result<[u8; 3]> {
// Init oid counter
if OID_COUNTER.load(Ordering::SeqCst) == 0 {
let mut rng = try!(OsRng::new());
let start = rng.gen_range(0, MAX_U24 + 1);
OID_COUNTER.store(start, Ordering::SeqCst);
}
let u_counter = OID_COUNTER.fetch_add(1, Ordering::SeqCst);
// Mod result instead of OID_COUNTER to prevent threading issues.
// Static mutexes are currently unstable; once they have been
// stabilized, one should be used to access OID_COUNTER and
// perform multiple operations atomically.
let u = u_counter % MAX_U24;
// Convert usize to writable u64, then extract the first three bytes.
let u_int = u as u64;
let mut buf: [u8; 8] = [0; 8];
BigEndian::write_u64(&mut buf, u_int);
let buf_u24: [u8; 3] = [buf[5], buf[6], buf[7]];
Ok(buf_u24)
}
}
impl ToHex for ObjectId {
fn to_hex(&self) -> String {
self.id.to_hex()
}
}
impl fmt::Display for ObjectId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.to_hex())
}
}
impl Decodable for ObjectId {
fn decode<D: Decoder>(d: &mut D) -> result::Result<Self, D::Error> {
let str = try!(d.read_str());
Ok(ObjectId::with_string(&str).unwrap())
}
}
impl Encodable for ObjectId {
fn encode<S: Encoder>(&self, s: &mut S) -> result::Result<(), S::Error> {
s.emit_str(&self.to_string())
}
}
#[test]
fn pid_generation() {
let pid = unsafe { libc::getpid() as u16 };
let generated = ObjectId::gen_process_id();
assert_eq!(pid, LittleEndian::read_u16(&generated));
}
#[test]
fn count_generation() {
let start = 52222;
OID_COUNTER.store(start, Ordering::SeqCst);
let count_res = ObjectId::gen_count();
assert!(count_res.is_ok());
let count_bytes = count_res.unwrap();
let mut buf: [u8; 4] = [0; 4];
for i in 0..COUNTER_SIZE {
buf[i+1] = count_bytes[i];
}
let count = BigEndian::read_u32(&buf);
assert_eq!(start as u32, count);
}
#[test]
fn count_is_big_endian() {
let start = 1122867;
OID_COUNTER.store(start, Ordering::SeqCst);
let oid_res = ObjectId::new();
assert!(oid_res.is_ok());
let oid = oid_res.unwrap();
assert_eq!(0x11u8, oid.bytes()[COUNTER_OFFSET]);
assert_eq!(0x22u8, oid.bytes()[COUNTER_OFFSET + 1]);
assert_eq!(0x33u8, oid.bytes()[COUNTER_OFFSET + 2]);
}
Show object id formatted as string in debug trait
use libc;
use std::{fmt, io, error, result};
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use byteorder::{ByteOrder, BigEndian, LittleEndian};
use crypto::digest::Digest;
use crypto::md5::Md5;
use rand::{Rng, OsRng};
use rustc_serialize::hex::{self, FromHex, ToHex};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use time;
const TIMESTAMP_SIZE: usize = 4;
const MACHINE_ID_SIZE: usize = 3;
const PROCESS_ID_SIZE: usize = 2;
const COUNTER_SIZE: usize = 3;
const TIMESTAMP_OFFSET: usize = 0;
const MACHINE_ID_OFFSET: usize = TIMESTAMP_OFFSET + TIMESTAMP_SIZE;
const PROCESS_ID_OFFSET: usize = MACHINE_ID_OFFSET + MACHINE_ID_SIZE;
const COUNTER_OFFSET: usize = PROCESS_ID_OFFSET + PROCESS_ID_SIZE;
const MAX_U24: usize = 0xFFFFFF;
static OID_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
static mut MACHINE_BYTES: Option<[u8; 3]> = None;
extern {
fn gethostname(name: *mut libc::c_char, size: libc::size_t) -> libc::c_int;
}
/// Errors that can occur during OID construction and generation.
#[derive(Debug)]
pub enum Error {
ArgumentError(String),
FromHexError(hex::FromHexError),
IoError(io::Error),
HostnameError,
}
impl From<hex::FromHexError> for Error {
fn from(err: hex::FromHexError) -> Error {
Error::FromHexError(err)
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::IoError(err)
}
}
/// Alias for Result<T, oid::Error>.
pub type Result<T> = result::Result<T, Error>;
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
&Error::ArgumentError(ref inner) => inner.fmt(fmt),
&Error::FromHexError(ref inner) => inner.fmt(fmt),
&Error::IoError(ref inner) => inner.fmt(fmt),
&Error::HostnameError => write!(fmt, "Failed to retrieve hostname for OID generation."),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match self {
&Error::ArgumentError(ref inner) => &inner,
&Error::FromHexError(ref inner) => inner.description(),
&Error::IoError(ref inner) => inner.description(),
&Error::HostnameError => "Failed to retrieve hostname for OID generation.",
}
}
fn cause(&self) -> Option<&error::Error> {
match self {
&Error::ArgumentError(_) => None,
&Error::FromHexError(ref inner) => Some(inner),
&Error::IoError(ref inner) => Some(inner),
&Error::HostnameError => None,
}
}
}
/// A wrapper around raw 12-byte ObjectId representations.
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord)]
pub struct ObjectId {
id: [u8; 12],
}
impl ObjectId {
/// Generates a new ObjectID, represented in bytes.
/// See the [docs](http://docs.mongodb.org/manual/reference/object-id/)
/// for more information.
pub fn new() -> Result<ObjectId> {
let timestamp = ObjectId::gen_timestamp();
let machine_id = try!(ObjectId::gen_machine_id());
let process_id = ObjectId::gen_process_id();
let counter = try!(ObjectId::gen_count());
let mut buf: [u8; 12] = [0; 12];
for i in 0..TIMESTAMP_SIZE { buf[TIMESTAMP_OFFSET + i] = timestamp[i]; }
for i in 0..MACHINE_ID_SIZE { buf[MACHINE_ID_OFFSET + i] = machine_id[i]; }
for i in 0..PROCESS_ID_SIZE { buf[PROCESS_ID_OFFSET + i] = process_id[i]; }
for i in 0..COUNTER_SIZE { buf[COUNTER_OFFSET +i] = counter[i]; }
Ok(ObjectId::with_bytes(buf))
}
/// Constructs a new ObjectId wrapper around the raw byte representation.
pub fn with_bytes(bytes: [u8; 12]) -> ObjectId {
ObjectId {
id: bytes,
}
}
/// Creates an ObjectID using a 12-byte (24-char) hexadecimal string.
pub fn with_string(s: &str) -> Result<ObjectId> {
let bytes = try!(s.from_hex());
if bytes.len() != 12 {
Err(Error::ArgumentError("Provided string must be a 12-byte hexadecimal string.".to_owned()))
} else {
let mut byte_array: [u8; 12] = [0; 12];
for i in 0..12 {
byte_array[i] = bytes[i];
}
Ok(ObjectId::with_bytes(byte_array))
}
}
/// Creates a dummy ObjectId with a specific generation time.
/// This method should only be used to do range queries on a field
/// containing ObjectId instances.
pub fn with_timestamp(time: u32) -> ObjectId {
let mut buf: [u8; 12] = [0; 12];
BigEndian::write_u32(&mut buf, time);
ObjectId::with_bytes(buf)
}
/// Returns the raw byte representation of an ObjectId.
pub fn bytes(&self) -> [u8; 12] {
self.id
}
/// Retrieves the timestamp (seconds since epoch) from an ObjectId.
pub fn timestamp(&self) -> u32 {
BigEndian::read_u32(&self.id)
}
/// Retrieves the machine id associated with an ObjectId.
pub fn machine_id(&self) -> u32 {
let mut buf: [u8; 4] = [0; 4];
for i in 0..MACHINE_ID_SIZE {
buf[i] = self.id[MACHINE_ID_OFFSET+i];
}
LittleEndian::read_u32(&buf)
}
/// Retrieves the process id associated with an ObjectId.
pub fn process_id(&self) -> u16 {
LittleEndian::read_u16(&self.id[PROCESS_ID_OFFSET..])
}
/// Retrieves the increment counter from an ObjectId.
pub fn counter(&self) -> u32 {
let mut buf: [u8; 4] = [0; 4];
for i in 0..COUNTER_SIZE {
buf[i+1] = self.id[COUNTER_OFFSET+i];
}
BigEndian::read_u32(&buf)
}
// Generates a new timestamp representing the current seconds since epoch.
// Represented in Big Endian.
fn gen_timestamp() -> [u8; 4] {
let timespec = time::get_time();
let timestamp = timespec.sec as u32;
let mut buf: [u8; 4] = [0; 4];
BigEndian::write_u32(&mut buf,timestamp);
buf
}
// Generates a new machine id represented as an MD5-hashed 3-byte-encoded hostname string.
// Represented in Little Endian.
fn gen_machine_id() -> Result<[u8; 3]> {
// Short-circuit if machine id has already been calculated.
// Since the generated machine id is not variable, arising race conditions
// will have the same MACHINE_BYTES result.
unsafe {
if let Some(bytes) = MACHINE_BYTES.as_ref() {
return Ok(bytes.clone());
}
}
// Retrieve hostname through libc
let len = 255;
let mut buf = Vec::<u8>::with_capacity(len);
let ptr = buf.as_mut_ptr();
let err = unsafe { gethostname(ptr as *mut libc::c_char, len as libc::size_t) } as i32;
if err != 0 {
return Err(Error::HostnameError);
}
// Convert bytes into string
let s = String::from_utf8_lossy(&buf);
// Hash hostname string
let mut md5 = Md5::new();
md5.input_str(&s.into_owned()[..]);
let hash = md5.result_str();
// Re-convert string to bytes and grab first three
let mut bytes = hash.bytes();
let mut vec: [u8; 3] = [0; 3];
for i in 0..MACHINE_ID_SIZE {
match bytes.next() {
Some(b) => vec[i] = b,
None => break,
}
}
unsafe { MACHINE_BYTES = Some(vec) };
Ok(vec)
}
// Gets the process ID and returns it as a 2-byte array.
// Represented in Little Endian.
fn gen_process_id() -> [u8; 2] {
let pid = unsafe { libc::getpid() as u16 };
let mut buf: [u8; 2] = [0; 2];
LittleEndian::write_u16(&mut buf, pid);
buf
}
// Gets an incremental 3-byte count.
// Represented in Big Endian.
fn gen_count() -> Result<[u8; 3]> {
// Init oid counter
if OID_COUNTER.load(Ordering::SeqCst) == 0 {
let mut rng = try!(OsRng::new());
let start = rng.gen_range(0, MAX_U24 + 1);
OID_COUNTER.store(start, Ordering::SeqCst);
}
let u_counter = OID_COUNTER.fetch_add(1, Ordering::SeqCst);
// Mod result instead of OID_COUNTER to prevent threading issues.
// Static mutexes are currently unstable; once they have been
// stabilized, one should be used to access OID_COUNTER and
// perform multiple operations atomically.
let u = u_counter % MAX_U24;
// Convert usize to writable u64, then extract the first three bytes.
let u_int = u as u64;
let mut buf: [u8; 8] = [0; 8];
BigEndian::write_u64(&mut buf, u_int);
let buf_u24: [u8; 3] = [buf[5], buf[6], buf[7]];
Ok(buf_u24)
}
}
impl ToHex for ObjectId {
fn to_hex(&self) -> String {
self.id.to_hex()
}
}
impl fmt::Display for ObjectId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.to_hex())
}
}
impl fmt::Debug for ObjectId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&format!("ObjectId({})", self.to_hex()))
}
}
impl Decodable for ObjectId {
fn decode<D: Decoder>(d: &mut D) -> result::Result<Self, D::Error> {
let str = try!(d.read_str());
Ok(ObjectId::with_string(&str).unwrap())
}
}
impl Encodable for ObjectId {
fn encode<S: Encoder>(&self, s: &mut S) -> result::Result<(), S::Error> {
s.emit_str(&self.to_string())
}
}
#[test]
fn pid_generation() {
let pid = unsafe { libc::getpid() as u16 };
let generated = ObjectId::gen_process_id();
assert_eq!(pid, LittleEndian::read_u16(&generated));
}
#[test]
fn count_generation() {
let start = 52222;
OID_COUNTER.store(start, Ordering::SeqCst);
let count_res = ObjectId::gen_count();
assert!(count_res.is_ok());
let count_bytes = count_res.unwrap();
let mut buf: [u8; 4] = [0; 4];
for i in 0..COUNTER_SIZE {
buf[i+1] = count_bytes[i];
}
let count = BigEndian::read_u32(&buf);
assert_eq!(start as u32, count);
}
#[test]
fn count_is_big_endian() {
let start = 1122867;
OID_COUNTER.store(start, Ordering::SeqCst);
let oid_res = ObjectId::new();
assert!(oid_res.is_ok());
let oid = oid_res.unwrap();
assert_eq!(0x11u8, oid.bytes()[COUNTER_OFFSET]);
assert_eq!(0x22u8, oid.bytes()[COUNTER_OFFSET + 1]);
assert_eq!(0x33u8, oid.bytes()[COUNTER_OFFSET + 2]);
}
#[test]
fn test_display() {
let id = ObjectId::with_string("53e37d08776f724e42000000").unwrap();
assert_eq!(
format!("{}", id),
"53e37d08776f724e42000000"
)
}
#[test]
fn test_debug() {
let id = ObjectId::with_string("53e37d08776f724e42000000").unwrap();
assert_eq!(
format!("{:?}", id),
"ObjectId(53e37d08776f724e42000000)"
)
}
|
use ast::{Atom, Expr};
use error::*;
fn eval_args(args: &[Expr]) -> Result<Vec<Expr>> {
args.iter().map(Expr::eval).collect()
}
pub fn add(args: &[Expr]) -> Result<Expr> {
// Evaluate all arguments
match eval_args(args) {
Ok(ok_args) => {
// Convert all args to atom
let atoms: Option<Vec<&Atom>> = ok_args.iter().map(Expr::atom).collect();
match atoms {
Some(a) => {
if a.iter().all(|x| x.is_flt() || x.is_int()) {
// If any are float, promote to float and perform float addition
if a.iter().any(|x| x.is_flt()) {
Ok(Expr::from(a.iter().map(|x| {
if let Some(float) = x.flt() { float } else { x.int().unwrap() as f64 }
}).sum::<f64>()))
} else {
Ok(Expr::from(a.iter()
.map(|x| x.int())
.map(Option::unwrap)
.sum::<i64>()))
}
} else {
Err("could not add non-numeric atom".into())
}
},
None => Err("could not add non-atom".into()),
}
},
Err(err) => Err(format!("could not eval: {}", err).into()),
}
}
pub fn sub(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn mul(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn div(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn equal(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn less(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn less_eq(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn greater(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn greater_eq(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn not(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn and(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn or(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn print(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
Stop complaining about unused vars in unimplemented ops
#![allow(unused_variables)]
use ast::{Atom, Expr};
use error::*;
fn eval_args(args: &[Expr]) -> Result<Vec<Expr>> {
args.iter().map(Expr::eval).collect()
}
pub fn add(args: &[Expr]) -> Result<Expr> {
// Evaluate all arguments
match eval_args(args) {
Ok(ok_args) => {
// Convert all args to atom
let atoms: Option<Vec<&Atom>> = ok_args.iter().map(Expr::atom).collect();
match atoms {
Some(a) => {
if a.iter().all(|x| x.is_flt() || x.is_int()) {
// If any are float, promote to float and perform float addition
if a.iter().any(|x| x.is_flt()) {
Ok(Expr::from(a.iter().map(|x| {
if let Some(float) = x.flt() { float } else { x.int().unwrap() as f64 }
}).sum::<f64>()))
} else {
Ok(Expr::from(a.iter()
.map(|x| x.int())
.map(Option::unwrap)
.sum::<i64>()))
}
} else {
Err("could not add non-numeric atom".into())
}
},
None => Err("could not add non-atom".into()),
}
},
Err(err) => Err(format!("could not eval: {}", err).into()),
}
}
pub fn sub(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn mul(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn div(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn equal(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn less(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn less_eq(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn greater(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn greater_eq(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn not(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn and(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn or(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
pub fn print(args: &[Expr]) -> Result<Expr> {
unimplemented!()
}
|
use md6;
use std::iter;
use time::now;
use unicase::UniCase;
use iron::mime::Mime;
use std::sync::RwLock;
use lazysort::SortedBy;
use std::path::PathBuf;
use std::fs::{self, File};
use std::default::Default;
use iron::modifiers::Header;
use std::collections::HashMap;
use self::super::{Options, Error};
use mime_guess::guess_mime_type_opt;
use std::io::{self, Read, Seek, SeekFrom};
use trivial_colours::{Reset as CReset, Colour as C};
use iron::{headers, status, method, mime, IronResult, Listening, Response, TypeMap, Request, Handler, Iron};
use self::super::util::{url_path, file_hash, is_symlink, encode_str, encode_file, hash_string, html_response, file_binary, client_mobile, percent_decode,
file_icon_suffix, response_encoding, detect_file_as_dir, encoding_extension, file_time_modified, human_readable_size, USER_AGENT,
ERROR_HTML, INDEX_EXTENSIONS, MIN_ENCODING_GAIN, MAX_ENCODING_SIZE, MIN_ENCODING_SIZE, DIRECTORY_LISTING_HTML,
MOBILE_DIRECTORY_LISTING_HTML, BLACKLISTED_ENCODING_EXTENSIONS};
macro_rules! log {
($fmt:expr) => {
print!("{}[{}]{} ", C::Cyan, now().strftime("%F %T").unwrap(), CReset);
println!(concat!($fmt, "{black:.0}{red:.0}{green:.0}{yellow:.0}{blue:.0}{magenta:.0}{cyan:.0}{white:.0}{reset:.0}"),
black = C::Black,
red = C::Red,
green = C::Green,
yellow = C::Yellow,
blue = C::Blue,
magenta = C::Magenta,
cyan = C::Cyan,
white = C::White,
reset = CReset);
};
($fmt:expr, $($arg:tt)*) => {
print!("{}[{}]{} ", C::Cyan, now().strftime("%F %T").unwrap(), CReset);
println!(concat!($fmt, "{black:.0}{red:.0}{green:.0}{yellow:.0}{blue:.0}{magenta:.0}{cyan:.0}{white:.0}{reset:.0}"),
$($arg)*,
black = C::Black,
red = C::Red,
green = C::Green,
yellow = C::Yellow,
blue = C::Blue,
magenta = C::Magenta,
cyan = C::Cyan,
white = C::White,
reset = CReset);
};
}
// TODO: ideally this String here would be Encoding instead but hyper is bad
type CacheT<Cnt> = HashMap<([u8; 32], String), Cnt>;
pub struct HttpHandler {
pub hosted_directory: (String, PathBuf),
pub follow_symlinks: bool,
pub check_indices: bool,
pub writes_temp_dir: Option<(String, PathBuf)>,
pub encoded_temp_dir: Option<(String, PathBuf)>,
cache_gen: RwLock<CacheT<Vec<u8>>>,
cache_fs: RwLock<CacheT<(PathBuf, bool)>>,
}
impl HttpHandler {
pub fn new(opts: &Options) -> HttpHandler {
HttpHandler {
hosted_directory: opts.hosted_directory.clone(),
follow_symlinks: opts.follow_symlinks,
check_indices: opts.check_indices,
writes_temp_dir: HttpHandler::temp_subdir(&opts.temp_directory, opts.allow_writes, "writes"),
encoded_temp_dir: HttpHandler::temp_subdir(&opts.temp_directory, opts.encode_fs, "encoded"),
cache_gen: Default::default(),
cache_fs: Default::default(),
}
}
fn temp_subdir(td: &Option<(String, PathBuf)>, flag: bool, name: &str) -> Option<(String, PathBuf)> {
if flag && td.is_some() {
let &(ref temp_name, ref temp_dir) = td.as_ref().unwrap();
Some((format!("{}{}{}",
temp_name,
if temp_name.ends_with("/") || temp_name.ends_with(r"\") {
""
} else {
"/"
},
name),
temp_dir.join(name)))
} else {
None
}
}
}
impl Handler for HttpHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
match req.method {
method::Options => self.handle_options(req),
method::Get => self.handle_get(req),
method::Put => self.handle_put(req),
method::Delete => self.handle_delete(req),
method::Head => {
self.handle_get(req).map(|mut r| {
r.body = None;
r
})
}
method::Trace => self.handle_trace(req),
_ => self.handle_bad_method(req),
}
}
}
impl HttpHandler {
fn handle_options(&self, req: &mut Request) -> IronResult<Response> {
log!("{green}{}{reset} asked for {red}OPTIONS{reset}", req.remote_addr);
Ok(Response::with((status::NoContent,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::Allow(vec![method::Options, method::Get, method::Put, method::Delete, method::Head, method::Trace])))))
}
fn handle_get(&self, req: &mut Request) -> IronResult<Response> {
let (req_p, symlink, url_err) = self.parse_requested_path(req);
let file = req_p.is_file();
let range = req.headers.get().map(|ref r: &headers::Range| (*r).clone());
if url_err {
self.handle_invalid_url(req, "<p>Percent-encoding decoded to invalid UTF-8.</p>")
} else if !req_p.exists() || (symlink && !self.follow_symlinks) {
self.handle_nonexistant(req, req_p)
} else if file && range.is_some() {
self.handle_get_file_range(req, req_p, range.unwrap())
} else if file {
self.handle_get_file(req, req_p)
} else {
self.handle_get_dir(req, req_p)
}
}
fn handle_invalid_url(&self, req: &mut Request, cause: &str) -> IronResult<Response> {
log!("{green}{}{reset} requested to {red}{}{reset} {yellow}{}{reset} with invalid URL -- {}",
req.remote_addr,
req.method,
req.url,
cause.replace("<p>", "").replace("</p>", ""));
self.handle_generated_response_encoding(req,
status::BadRequest,
html_response(ERROR_HTML, &["400 Bad Request", "The request URL was invalid.", cause]))
}
fn handle_nonexistant(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
log!("{green}{}{reset} requested to {red}{}{reset} nonexistant entity {magenta}{}{reset}",
req.remote_addr,
req.method,
req_p.display());
let url_p = url_path(&req.url);
self.handle_generated_response_encoding(req,
status::NotFound,
html_response(ERROR_HTML,
&["404 Not Found", &format!("The requested entity \"{}\" doesn't exist.", url_p), ""]))
}
fn handle_get_file_range(&self, req: &mut Request, req_p: PathBuf, range: headers::Range) -> IronResult<Response> {
match range {
headers::Range::Bytes(ref brs) => {
if brs.len() == 1 {
let flen = req_p.metadata().expect("Failed to get requested file metadata").len();
match brs[0] {
// Cases where from is bigger than to are filtered out by iron so can never happen
headers::ByteRangeSpec::FromTo(from, to) => self.handle_get_file_closed_range(req, req_p, from, to),
headers::ByteRangeSpec::AllFrom(from) => {
if flen < from {
self.handle_get_file_empty_range(req, req_p, from, flen)
} else {
self.handle_get_file_right_opened_range(req, req_p, from)
}
}
headers::ByteRangeSpec::Last(from) => {
if flen < from {
self.handle_get_file_empty_range(req, req_p, from, flen)
} else {
self.handle_get_file_left_opened_range(req, req_p, from)
}
}
}
} else {
self.handle_invalid_range(req, req_p, &range, "More than one range is unsupported.")
}
}
headers::Range::Unregistered(..) => self.handle_invalid_range(req, req_p, &range, "Custom ranges are unsupported."),
}
}
fn handle_get_file_closed_range(&self, req: &mut Request, req_p: PathBuf, from: u64, to: u64) -> IronResult<Response> {
let mime_type = guess_mime_type_opt(&req_p).unwrap_or_else(|| if file_binary(&req_p) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
});
log!("{green}{}{reset} was served byte range {}-{} of file {magenta}{}{reset} as {blue}{}{reset}",
req.remote_addr,
from,
to,
req_p.display(),
mime_type);
let mut buf = vec![0; (to + 1 - from) as usize];
let mut f = File::open(&req_p).expect("Failed to open requested file");
f.seek(SeekFrom::Start(from)).expect("Failed to seek requested file");
f.read(&mut buf).expect("Failed to read requested file");
Ok(Response::with((status::PartialContent,
(Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&req_p)))),
Header(headers::ContentRange(headers::ContentRangeSpec::Bytes {
range: Some((from, to)),
instance_length: Some(f.metadata().expect("Failed to get requested file metadata").len()),
})),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes]))),
buf,
mime_type)))
}
fn handle_get_file_right_opened_range(&self, req: &mut Request, req_p: PathBuf, from: u64) -> IronResult<Response> {
let mime_type = guess_mime_type_opt(&req_p).unwrap_or_else(|| if file_binary(&req_p) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
});
log!("{green}{}{reset} was served file {magenta}{}{reset} from byte {} as {blue}{}{reset}",
req.remote_addr,
req_p.display(),
from,
mime_type);
let flen = req_p.metadata().expect("Failed to get requested file metadata").len();
self.handle_get_file_opened_range(req_p, SeekFrom::Start(from), from, flen - from, mime_type)
}
fn handle_get_file_left_opened_range(&self, req: &mut Request, req_p: PathBuf, from: u64) -> IronResult<Response> {
let mime_type = guess_mime_type_opt(&req_p).unwrap_or_else(|| if file_binary(&req_p) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
});
log!("{green}{}{reset} was served last {} bytes of file {magenta}{}{reset} as {blue}{}{reset}",
req.remote_addr,
from,
req_p.display(),
mime_type);
let flen = req_p.metadata().expect("Failed to get requested file metadata").len();
self.handle_get_file_opened_range(req_p, SeekFrom::End(-(from as i64)), flen - from, from, mime_type)
}
fn handle_get_file_opened_range(&self, req_p: PathBuf, s: SeekFrom, b_from: u64, clen: u64, mt: Mime) -> IronResult<Response> {
let mut f = File::open(&req_p).expect("Failed to open requested file");
let flen = f.metadata().expect("Failed to get requested file metadata").len();
f.seek(s).expect("Failed to seek requested file");
Ok(Response::with((status::PartialContent,
f,
(Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&req_p)))),
Header(headers::ContentRange(headers::ContentRangeSpec::Bytes {
range: Some((b_from, flen - 1)),
instance_length: Some(flen),
})),
Header(headers::ContentLength(clen)),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes]))),
mt)))
}
fn handle_invalid_range(&self, req: &mut Request, req_p: PathBuf, range: &headers::Range, reason: &str) -> IronResult<Response> {
self.handle_generated_response_encoding(req,
status::RangeNotSatisfiable,
html_response(ERROR_HTML,
&["416 Range Not Satisfiable",
&format!("Requested range <samp>{}</samp> could not be fullfilled for file {}.",
range,
req_p.display()),
reason]))
}
fn handle_get_file_empty_range(&self, req: &mut Request, req_p: PathBuf, from: u64, to: u64) -> IronResult<Response> {
let mime_type = guess_mime_type_opt(&req_p).unwrap_or_else(|| if file_binary(&req_p) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
});
log!("{green}{}{reset} was served an empty range from file {magenta}{}{reset} as {blue}{}{reset}",
req.remote_addr,
req_p.display(),
mime_type);
Ok(Response::with((status::NoContent,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&req_p)))),
Header(headers::ContentRange(headers::ContentRangeSpec::Bytes {
range: Some((from, to)),
instance_length: Some(req_p.metadata().expect("Failed to get requested file metadata").len()),
})),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
mime_type)))
}
fn handle_get_file(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
let mime_type = guess_mime_type_opt(&req_p).unwrap_or_else(|| if file_binary(&req_p) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
});
log!("{green}{}{reset} was served file {magenta}{}{reset} as {blue}{}{reset}",
req.remote_addr,
req_p.display(),
mime_type);
let flen = req_p.metadata().expect("Failed to get requested file metadata").len();
if self.encoded_temp_dir.is_some() && flen > MIN_ENCODING_SIZE && flen < MAX_ENCODING_SIZE &&
req_p.extension().and_then(|s| s.to_str()).map(|s| !BLACKLISTED_ENCODING_EXTENSIONS.contains(&UniCase(s))).unwrap_or(true) {
self.handle_get_file_encoded(req, req_p, mime_type)
} else {
Ok(Response::with((status::Ok,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&req_p)))),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
req_p,
mime_type)))
}
}
fn handle_get_file_encoded(&self, req: &mut Request, req_p: PathBuf, mt: Mime) -> IronResult<Response> {
if let Some(encoding) = req.headers.get_mut::<headers::AcceptEncoding>().and_then(|es| response_encoding(&mut **es)) {
self.create_temp_dir(&self.encoded_temp_dir);
let cache_key = (file_hash(&req_p), encoding.to_string());
{
match self.cache_fs.read().expect("Filesystem cache read lock poisoned").get(&cache_key) {
Some(&(ref resp_p, true)) => {
log!("{} encoded as {} for {:.1}% ratio (cached)",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding,
((req_p.metadata().expect("Failed to get requested file metadata").len() as f64) /
(resp_p.metadata().expect("Failed to get encoded file metadata").len() as f64)) * 100f64);
return Ok(Response::with((status::Ok,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::ContentEncoding(vec![encoding])),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
resp_p.as_path(),
mt)));
}
Some(&(ref resp_p, false)) => {
return Ok(Response::with((status::Ok,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&resp_p)))),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
resp_p.as_path(),
mt)));
}
None => (),
}
}
let mut resp_p = self.encoded_temp_dir.as_ref().unwrap().1.join(hash_string(&cache_key.0));
match (req_p.extension(), encoding_extension(&encoding)) {
(Some(ext), Some(enc)) => resp_p.set_extension(format!("{}.{}", ext.to_str().unwrap_or("ext"), enc)),
(Some(ext), None) => resp_p.set_extension(format!("{}.{}", ext.to_str().unwrap_or("ext"), encoding)),
(None, Some(enc)) => resp_p.set_extension(enc),
(None, None) => resp_p.set_extension(format!("{}", encoding)),
};
if encode_file(&req_p, &resp_p, &encoding) {
let gain = (req_p.metadata().expect("Failed to get requested file metadata").len() as f64) /
(resp_p.metadata().expect("Failed to get encoded file metadata").len() as f64);
if gain < MIN_ENCODING_GAIN {
let mut cache = self.cache_fs.write().expect("Filesystem cache write lock poisoned");
cache.insert(cache_key, (req_p.clone(), false));
fs::remove_file(resp_p).expect("Failed to remove too big encoded file");
} else {
log!("{} encoded as {} for {:.1}% ratio",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding,
gain * 100f64);
let mut cache = self.cache_fs.write().expect("Filesystem cache write lock poisoned");
cache.insert(cache_key, (resp_p.clone(), true));
return Ok(Response::with((status::Ok,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::ContentEncoding(vec![encoding])),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
resp_p.as_path(),
mt)));
}
} else {
log!("{} failed to encode as {}, sending identity",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding);
}
}
Ok(Response::with((status::Ok,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&req_p)))),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
req_p,
mt)))
}
fn handle_get_dir(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
if self.check_indices {
let mut idx = req_p.join("index");
if let Some(e) = INDEX_EXTENSIONS.iter()
.find(|e| {
idx.set_extension(e);
idx.exists()
}) {
if req.url.path().pop() == Some("") {
let r = self.handle_get_file(req, idx);
log!("{} found index file for directory {magenta}{}{reset}",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
req_p.display());
return r;
} else {
return self.handle_get_dir_index_no_slash(req, e);
}
}
}
if client_mobile(&req.headers) {
self.handle_get_mobile_dir_listing(req, req_p)
} else {
self.handle_get_dir_listing(req, req_p)
}
}
fn handle_get_dir_index_no_slash(&self, req: &mut Request, idx_ext: &str) -> IronResult<Response> {
let new_url = req.url.to_string() + "/";
log!("Redirecting {green}{}{reset} to {yellow}{}{reset} - found index file {magenta}index.{}{reset}",
req.remote_addr,
new_url,
idx_ext);
// We redirect here because if we don't and serve the index right away funky shit happens.
// Example:
// - Without following slash:
// https://cloud.githubusercontent.com/assets/6709544/21442017/9eb20d64-c89b-11e6-8c7b-888b5f70a403.png
// - With following slash:
// https://cloud.githubusercontent.com/assets/6709544/21442028/a50918c4-c89b-11e6-8936-c29896947f6a.png
Ok(Response::with((status::MovedPermanently, Header(headers::Server(USER_AGENT.to_string())), Header(headers::Location(new_url)))))
}
fn handle_get_mobile_dir_listing(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
let relpath = (url_path(&req.url) + "/").replace("//", "/");
let is_root = &req.url.path() == &[""];
log!("{green}{}{reset} was served mobile directory listing for {magenta}{}{reset}",
req.remote_addr,
req_p.display());
let parent_s = if is_root {
String::new()
} else {
let rel_noslash = &relpath[0..relpath.len() - 1];
let slash_idx = rel_noslash.rfind('/');
format!("<a href=\"/{up_path}{up_path_slash}\" class=\"list entry top\"><span class=\"back_arrow_icon\">Parent directory</span></a> \
<a href=\"/{up_path}{up_path_slash}\" class=\"list entry bottom\"><span class=\"marker\">@</span>\
<span class=\"datetime\">{} UTC</span></a>",
file_time_modified(req_p.parent()
.expect("Failed to get requested directory's parent directory"))
.strftime("%F %T")
.unwrap(),
up_path = slash_idx.map(|i| &rel_noslash[0..i]).unwrap_or(""),
up_path_slash = if slash_idx.is_some() { "/" } else { "" })
};
let list_s = req_p.read_dir()
.expect("Failed to read requested directory")
.map(|p| p.expect("Failed to iterate over trequested directory"))
.filter(|f| self.follow_symlinks || !is_symlink(f.path()))
.sorted_by(|lhs, rhs| {
(lhs.file_type().expect("Failed to get file type").is_file(), lhs.file_name().to_str().expect("Failed to get file name").to_lowercase())
.cmp(&(rhs.file_type().expect("Failed to get file type").is_file(),
rhs.file_name().to_str().expect("Failed to get file name").to_lowercase()))
})
.fold("".to_string(), |cur, f| {
let is_file = f.file_type().expect("Failed to get file type").is_file();
let fname = f.file_name().into_string().expect("Failed to get file name");
let path = f.path();
format!("{}<a href=\"{path}{fname}\" class=\"list entry top\"><span class=\"{}{}_icon\" id=\"{}\">{fname}{}</span></a> \
<a href=\"{path}{fname}\" class=\"list entry bottom\"><span class=\"marker\">@</span><span class=\"datetime\">{} UTC</span> \
{}{}{}</a>\n",
cur,
if is_file { "file" } else { "dir" },
file_icon_suffix(&path, is_file),
path.file_name().map(|p| p.to_str().expect("Filename not UTF-8").replace('.', "_")).as_ref().unwrap_or(&fname),
if is_file { "" } else { "/" },
file_time_modified(&path).strftime("%F %T").unwrap(),
if is_file { "<span class=\"size\">" } else { "" },
if is_file {
human_readable_size(f.metadata().expect("Failed to get file metadata").len())
} else {
String::new()
},
if is_file { "</span>" } else { "" },
path = format!("/{}", relpath).replace("//", "/"),
fname = fname)
});
self.handle_generated_response_encoding(req,
status::Ok,
html_response(MOBILE_DIRECTORY_LISTING_HTML,
&[&relpath[..],
if is_root { "" } else { "/" },
&if self.writes_temp_dir.is_some() {
r#"<script type="text/javascript">{upload}</script>"#
} else {
""
},
&parent_s[..],
&list_s[..],
&if self.writes_temp_dir.is_some() {
"<span class=\"list heading top top-border bottom\"> \
Upload files: <input id=\"file_upload\" type=\"file\" multiple /> \
</span>"
} else {
""
}]))
}
fn handle_get_dir_listing(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
let relpath = (url_path(&req.url) + "/").replace("//", "/");
let is_root = &req.url.path() == &[""];
log!("{green}{}{reset} was served directory listing for {magenta}{}{reset}",
req.remote_addr,
req_p.display());
let parent_s = if is_root {
String::new()
} else {
let rel_noslash = &relpath[0..relpath.len() - 1];
let slash_idx = rel_noslash.rfind('/');
format!("<tr><td><a href=\"/{up_path}{up_path_slash}\"><span id=\"parent_dir\" class=\"back_arrow_icon\" /></span></a></td> \
<td><a href=\"/{up_path}{up_path_slash}\">Parent directory</a></td> \
<td><a href=\"/{up_path}{up_path_slash}\" class=\"datetime\">{}</a></td> \
<td><a href=\"/{up_path}{up_path_slash}\"> </a></td></tr>",
file_time_modified(req_p.parent()
.expect("Failed to get requested directory's parent directory"))
.strftime("%F %T")
.unwrap(),
up_path = slash_idx.map(|i| &rel_noslash[0..i]).unwrap_or(""),
up_path_slash = if slash_idx.is_some() { "/" } else { "" })
};
let list_s = req_p.read_dir()
.expect("Failed to read requested directory")
.map(|p| p.expect("Failed to iterate over trequested directory"))
.filter(|f| self.follow_symlinks || !is_symlink(f.path()))
.sorted_by(|lhs, rhs| {
(lhs.file_type().expect("Failed to get file type").is_file(), lhs.file_name().to_str().expect("Failed to get file name").to_lowercase())
.cmp(&(rhs.file_type().expect("Failed to get file type").is_file(),
rhs.file_name().to_str().expect("Failed to get file name").to_lowercase()))
})
.fold("".to_string(), |cur, f| {
let is_file = f.file_type().expect("Failed to get file type").is_file();
let fname = f.file_name().into_string().expect("Failed to get file name");
let path = f.path();
let len = f.metadata().expect("Failed to get file metadata").len();
let abspath = format!("/{}", relpath).replace("//", "/");
format!("{}<tr><td><a href=\"{path}{fname}\"><span id=\"{}\" class=\"{}{}_icon\"></span></a></td> \
<td><a href=\"{path}{fname}\">{fname}{}</a></td> <td><a href=\"{path}{fname}\" class=\"datetime\">{}</a></td> \
<td><a href=\"{path}{fname}\">{}{}{}{}{}</a></td></tr>\n",
cur,
path.file_name().map(|p| p.to_str().expect("Filename not UTF-8").replace('.', "_")).as_ref().unwrap_or(&fname),
if is_file { "file" } else { "dir" },
file_icon_suffix(&path, is_file),
if is_file { "" } else { "/" },
file_time_modified(&path).strftime("%F %T").unwrap(),
if is_file { "<abbr title=\"" } else { " " },
if is_file {
len.to_string()
} else {
String::new()
},
if is_file { " B\">" } else { "" },
if is_file {
human_readable_size(len)
} else {
String::new()
},
if is_file { "</abbr>" } else { "" },
path = abspath,
fname = fname)
});
self.handle_generated_response_encoding(req,
status::Ok,
html_response(DIRECTORY_LISTING_HTML,
&[&relpath[..],
&if self.writes_temp_dir.is_some() {
r#"<script type="text/javascript">{upload}</script>"#
} else {
""
},
&parent_s[..],
&list_s[..],
&if self.writes_temp_dir.is_some() {
"<hr /> \
<p> \
Drag&Drop to upload or <input id=\"file_upload\" type=\"file\" multiple />. \
</p>"
} else {
""
}]))
}
fn handle_put(&self, req: &mut Request) -> IronResult<Response> {
if self.writes_temp_dir.is_none() {
return self.handle_forbidden_method(req, "-w", "write requests");
}
let (req_p, _, url_err) = self.parse_requested_path(req);
if url_err {
self.handle_invalid_url(req, "<p>Percent-encoding decoded to invalid UTF-8.</p>")
} else if req_p.is_dir() {
self.handle_disallowed_method(req, &[method::Options, method::Get, method::Delete, method::Head, method::Trace], "directory")
} else if detect_file_as_dir(&req_p) {
self.handle_invalid_url(req, "<p>Attempted to use file as directory.</p>")
} else if req.headers.has::<headers::ContentRange>() {
self.handle_put_partial_content(req)
} else {
self.create_temp_dir(&self.writes_temp_dir);
self.handle_put_file(req, req_p)
}
}
fn handle_disallowed_method(&self, req: &mut Request, allowed: &[method::Method], tpe: &str) -> IronResult<Response> {
let allowed_s = allowed.iter()
.enumerate()
.fold("".to_string(), |cur, (i, m)| {
cur + &m.to_string() +
if i == allowed.len() - 2 {
", and "
} else if i == allowed.len() - 1 {
""
} else {
", "
}
})
.to_string();
log!("{green}{}{reset} tried to {red}{}{reset} on {magenta}{}{reset} ({blue}{}{reset}) but only {red}{}{reset} are allowed",
req.remote_addr,
req.method,
url_path(&req.url),
tpe,
allowed_s);
let resp_text =
html_response(ERROR_HTML,
&["405 Method Not Allowed", &format!("Can't {} on a {}.", req.method, tpe), &format!("<p>Allowed methods: {}</p>", allowed_s)]);
self.handle_generated_response_encoding(req, status::MethodNotAllowed, resp_text)
.map(|mut r| {
r.headers.set(headers::Allow(allowed.to_vec()));
r
})
}
fn handle_put_partial_content(&self, req: &mut Request) -> IronResult<Response> {
log!("{green}{}{reset} tried to {red}PUT{reset} partial content to {yellow}{}{reset}",
req.remote_addr,
url_path(&req.url));
self.handle_generated_response_encoding(req,
status::BadRequest,
html_response(ERROR_HTML,
&["400 Bad Request",
"<a href=\"https://tools.ietf.org/html/rfc7231#section-4.3.3\">RFC7231 forbids \
partial-content PUT requests.</a>",
""]))
}
fn handle_put_file(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
let existant = req_p.exists();
log!("{green}{}{reset} {} {magenta}{}{reset}, size: {}B",
req.remote_addr,
if existant { "replaced" } else { "created" },
req_p.display(),
*req.headers.get::<headers::ContentLength>().expect("No Content-Length header"));
let &(_, ref temp_dir) = self.writes_temp_dir.as_ref().unwrap();
let temp_file_p = temp_dir.join(req_p.file_name().expect("Failed to get requested file's filename"));
io::copy(&mut req.body, &mut File::create(&temp_file_p).expect("Failed to create temp file"))
.expect("Failed to write requested data to requested file");
let _ = fs::create_dir_all(req_p.parent().expect("Failed to get requested file's parent directory"));
fs::copy(&temp_file_p, req_p).expect("Failed to copy temp file to requested file");
Ok(Response::with((if existant {
status::NoContent
} else {
status::Created
},
Header(headers::Server(USER_AGENT.to_string())))))
}
fn handle_delete(&self, req: &mut Request) -> IronResult<Response> {
if self.writes_temp_dir.is_none() {
return self.handle_forbidden_method(req, "-w", "write requests");
}
let (req_p, symlink, url_err) = self.parse_requested_path(req);
if url_err {
self.handle_invalid_url(req, "<p>Percent-encoding decoded to invalid UTF-8.</p>")
} else if !req_p.exists() || (symlink && !self.follow_symlinks) {
self.handle_nonexistant(req, req_p)
} else {
self.handle_delete_path(req, req_p)
}
}
fn handle_delete_path(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
log!("{green}{}{reset} deleted {blue}{} {magenta}{}{reset}",
req.remote_addr,
if req_p.is_file() { "file" } else { "directory" },
req_p.display());
if req_p.is_file() {
fs::remove_file(req_p).expect("Failed to remove requested file");
} else {
fs::remove_dir_all(req_p).expect("Failed to remove requested directory");
}
Ok(Response::with((status::NoContent, Header(headers::Server(USER_AGENT.to_string())))))
}
fn handle_trace(&self, req: &mut Request) -> IronResult<Response> {
log!("{green}{}{reset} requested {red}TRACE{reset} for {magenta}{}{reset}",
req.remote_addr,
url_path(&req.url));
let mut hdr = req.headers.clone();
hdr.set(headers::ContentType("message/http".parse().unwrap()));
Ok(Response {
status: Some(status::Ok),
headers: hdr,
extensions: TypeMap::new(),
body: None,
})
}
fn handle_forbidden_method(&self, req: &mut Request, switch: &str, desc: &str) -> IronResult<Response> {
log!("{green}{}{reset} used disabled request method {red}{}{reset} grouped under {}",
req.remote_addr,
req.method,
desc);
self.handle_generated_response_encoding(req,
status::Forbidden,
html_response(ERROR_HTML,
&["403 Forbidden",
"This feature is currently disabled.",
&format!("<p>Ask the server administrator to pass <samp>{}</samp> to the executable to \
enable support for {}.</p>",
switch,
desc)]))
}
fn handle_bad_method(&self, req: &mut Request) -> IronResult<Response> {
log!("{green}{}{reset} used invalid request method {red}{}{reset}", req.remote_addr, req.method);
let last_p = format!("<p>Unsupported request method: {}.<br />\nSupported methods: OPTIONS, GET, PUT, DELETE, HEAD and TRACE.</p>",
req.method);
self.handle_generated_response_encoding(req,
status::NotImplemented,
html_response(ERROR_HTML, &["501 Not Implemented", "This operation was not implemented.", &last_p]))
}
fn handle_generated_response_encoding(&self, req: &mut Request, st: status::Status, resp: String) -> IronResult<Response> {
if let Some(encoding) = req.headers.get_mut::<headers::AcceptEncoding>().and_then(|es| response_encoding(&mut **es)) {
let mut cache_key = ([0u8; 32], encoding.to_string());
md6::hash(256, resp.as_bytes(), &mut cache_key.0).expect("Failed to hash generated response");
{
if let Some(enc_resp) = self.cache_gen.read().expect("Generated file cache read lock poisoned").get(&cache_key) {
log!("{} encoded as {} for {:.1}% ratio (cached)",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding,
((resp.len() as f64) / (enc_resp.len() as f64)) * 100f64);
return Ok(Response::with((st,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::ContentEncoding(vec![encoding])),
"text/html;charset=utf-8".parse::<mime::Mime>().unwrap(),
&enc_resp[..])));
}
}
if let Some(enc_resp) = encode_str(&resp, &encoding) {
log!("{} encoded as {} for {:.1}% ratio",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding,
((resp.len() as f64) / (enc_resp.len() as f64)) * 100f64);
let mut cache = self.cache_gen.write().expect("Generated file cache read lock poisoned");
cache.insert(cache_key.clone(), enc_resp);
return Ok(Response::with((st,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::ContentEncoding(vec![encoding])),
"text/html;charset=utf-8".parse::<mime::Mime>().unwrap(),
&cache[&cache_key][..])));
} else {
log!("{} failed to encode as {}, sending identity",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding);
}
}
Ok(Response::with((st, Header(headers::Server(USER_AGENT.to_string())), "text/html;charset=utf-8".parse::<mime::Mime>().unwrap(), resp)))
}
fn parse_requested_path(&self, req: &Request) -> (PathBuf, bool, bool) {
req.url.path().into_iter().filter(|p| !p.is_empty()).fold((self.hosted_directory.1.clone(), false, false), |(mut cur, mut sk, mut err), pp| {
if let Some(pp) = percent_decode(pp) {
cur.push(&*pp);
} else {
err = true;
}
while let Ok(newlink) = cur.read_link() {
cur = newlink;
sk = true;
}
(cur, sk, err)
})
}
fn create_temp_dir(&self, td: &Option<(String, PathBuf)>) {
let &(ref temp_name, ref temp_dir) = td.as_ref().unwrap();
if !temp_dir.exists() && fs::create_dir_all(&temp_dir).is_ok() {
log!("Created temp dir {magenta}{}{reset}", temp_name);
}
}
}
impl Clone for HttpHandler {
fn clone(&self) -> HttpHandler {
HttpHandler {
hosted_directory: self.hosted_directory.clone(),
follow_symlinks: self.follow_symlinks,
check_indices: self.check_indices,
writes_temp_dir: self.writes_temp_dir.clone(),
encoded_temp_dir: self.encoded_temp_dir.clone(),
cache_gen: Default::default(),
cache_fs: Default::default(),
}
}
}
/// Attempt to start a server on ports from `from` to `up_to`, inclusive, with the specified handler.
///
/// If an error other than the port being full is encountered it is returned.
///
/// If all ports from the range are not free an error is returned.
///
/// # Examples
///
/// ```
/// # extern crate https;
/// # extern crate iron;
/// # use https::ops::try_ports;
/// # use iron::{status, Response};
/// let server = try_ports(|req| Ok(Response::with((status::Ok, "Abolish the burgeoisie!"))), 8000, 8100).unwrap();
/// ```
pub fn try_ports<H: Handler + Clone>(hndlr: H, from: u16, up_to: u16) -> Result<Listening, Error> {
for port in from..up_to + 1 {
match Iron::new(hndlr.clone()).http(("0.0.0.0", port)) {
Ok(server) => return Ok(server),
Err(error) => {
if !error.to_string().contains("port") {
return Err(Error::Io {
desc: "server",
op: "start",
more: None,
});
}
}
}
}
Err(Error::Io {
desc: "server",
op: "start",
more: Some("no free ports"),
})
}
Simplify desktop dir listing
use md6;
use std::iter;
use time::now;
use unicase::UniCase;
use iron::mime::Mime;
use std::sync::RwLock;
use lazysort::SortedBy;
use std::path::PathBuf;
use std::fs::{self, File};
use std::default::Default;
use iron::modifiers::Header;
use std::collections::HashMap;
use self::super::{Options, Error};
use mime_guess::guess_mime_type_opt;
use std::io::{self, Read, Seek, SeekFrom};
use trivial_colours::{Reset as CReset, Colour as C};
use iron::{headers, status, method, mime, IronResult, Listening, Response, TypeMap, Request, Handler, Iron};
use self::super::util::{url_path, file_hash, is_symlink, encode_str, encode_file, hash_string, html_response, file_binary, client_mobile, percent_decode,
file_icon_suffix, response_encoding, detect_file_as_dir, encoding_extension, file_time_modified, human_readable_size, USER_AGENT,
ERROR_HTML, INDEX_EXTENSIONS, MIN_ENCODING_GAIN, MAX_ENCODING_SIZE, MIN_ENCODING_SIZE, DIRECTORY_LISTING_HTML,
MOBILE_DIRECTORY_LISTING_HTML, BLACKLISTED_ENCODING_EXTENSIONS};
macro_rules! log {
($fmt:expr) => {
print!("{}[{}]{} ", C::Cyan, now().strftime("%F %T").unwrap(), CReset);
println!(concat!($fmt, "{black:.0}{red:.0}{green:.0}{yellow:.0}{blue:.0}{magenta:.0}{cyan:.0}{white:.0}{reset:.0}"),
black = C::Black,
red = C::Red,
green = C::Green,
yellow = C::Yellow,
blue = C::Blue,
magenta = C::Magenta,
cyan = C::Cyan,
white = C::White,
reset = CReset);
};
($fmt:expr, $($arg:tt)*) => {
print!("{}[{}]{} ", C::Cyan, now().strftime("%F %T").unwrap(), CReset);
println!(concat!($fmt, "{black:.0}{red:.0}{green:.0}{yellow:.0}{blue:.0}{magenta:.0}{cyan:.0}{white:.0}{reset:.0}"),
$($arg)*,
black = C::Black,
red = C::Red,
green = C::Green,
yellow = C::Yellow,
blue = C::Blue,
magenta = C::Magenta,
cyan = C::Cyan,
white = C::White,
reset = CReset);
};
}
// TODO: ideally this String here would be Encoding instead but hyper is bad
type CacheT<Cnt> = HashMap<([u8; 32], String), Cnt>;
pub struct HttpHandler {
pub hosted_directory: (String, PathBuf),
pub follow_symlinks: bool,
pub check_indices: bool,
pub writes_temp_dir: Option<(String, PathBuf)>,
pub encoded_temp_dir: Option<(String, PathBuf)>,
cache_gen: RwLock<CacheT<Vec<u8>>>,
cache_fs: RwLock<CacheT<(PathBuf, bool)>>,
}
impl HttpHandler {
pub fn new(opts: &Options) -> HttpHandler {
HttpHandler {
hosted_directory: opts.hosted_directory.clone(),
follow_symlinks: opts.follow_symlinks,
check_indices: opts.check_indices,
writes_temp_dir: HttpHandler::temp_subdir(&opts.temp_directory, opts.allow_writes, "writes"),
encoded_temp_dir: HttpHandler::temp_subdir(&opts.temp_directory, opts.encode_fs, "encoded"),
cache_gen: Default::default(),
cache_fs: Default::default(),
}
}
fn temp_subdir(td: &Option<(String, PathBuf)>, flag: bool, name: &str) -> Option<(String, PathBuf)> {
if flag && td.is_some() {
let &(ref temp_name, ref temp_dir) = td.as_ref().unwrap();
Some((format!("{}{}{}",
temp_name,
if temp_name.ends_with("/") || temp_name.ends_with(r"\") {
""
} else {
"/"
},
name),
temp_dir.join(name)))
} else {
None
}
}
}
impl Handler for HttpHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
match req.method {
method::Options => self.handle_options(req),
method::Get => self.handle_get(req),
method::Put => self.handle_put(req),
method::Delete => self.handle_delete(req),
method::Head => {
self.handle_get(req).map(|mut r| {
r.body = None;
r
})
}
method::Trace => self.handle_trace(req),
_ => self.handle_bad_method(req),
}
}
}
impl HttpHandler {
fn handle_options(&self, req: &mut Request) -> IronResult<Response> {
log!("{green}{}{reset} asked for {red}OPTIONS{reset}", req.remote_addr);
Ok(Response::with((status::NoContent,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::Allow(vec![method::Options, method::Get, method::Put, method::Delete, method::Head, method::Trace])))))
}
fn handle_get(&self, req: &mut Request) -> IronResult<Response> {
let (req_p, symlink, url_err) = self.parse_requested_path(req);
let file = req_p.is_file();
let range = req.headers.get().map(|ref r: &headers::Range| (*r).clone());
if url_err {
self.handle_invalid_url(req, "<p>Percent-encoding decoded to invalid UTF-8.</p>")
} else if !req_p.exists() || (symlink && !self.follow_symlinks) {
self.handle_nonexistant(req, req_p)
} else if file && range.is_some() {
self.handle_get_file_range(req, req_p, range.unwrap())
} else if file {
self.handle_get_file(req, req_p)
} else {
self.handle_get_dir(req, req_p)
}
}
fn handle_invalid_url(&self, req: &mut Request, cause: &str) -> IronResult<Response> {
log!("{green}{}{reset} requested to {red}{}{reset} {yellow}{}{reset} with invalid URL -- {}",
req.remote_addr,
req.method,
req.url,
cause.replace("<p>", "").replace("</p>", ""));
self.handle_generated_response_encoding(req,
status::BadRequest,
html_response(ERROR_HTML, &["400 Bad Request", "The request URL was invalid.", cause]))
}
fn handle_nonexistant(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
log!("{green}{}{reset} requested to {red}{}{reset} nonexistant entity {magenta}{}{reset}",
req.remote_addr,
req.method,
req_p.display());
let url_p = url_path(&req.url);
self.handle_generated_response_encoding(req,
status::NotFound,
html_response(ERROR_HTML,
&["404 Not Found", &format!("The requested entity \"{}\" doesn't exist.", url_p), ""]))
}
fn handle_get_file_range(&self, req: &mut Request, req_p: PathBuf, range: headers::Range) -> IronResult<Response> {
match range {
headers::Range::Bytes(ref brs) => {
if brs.len() == 1 {
let flen = req_p.metadata().expect("Failed to get requested file metadata").len();
match brs[0] {
// Cases where from is bigger than to are filtered out by iron so can never happen
headers::ByteRangeSpec::FromTo(from, to) => self.handle_get_file_closed_range(req, req_p, from, to),
headers::ByteRangeSpec::AllFrom(from) => {
if flen < from {
self.handle_get_file_empty_range(req, req_p, from, flen)
} else {
self.handle_get_file_right_opened_range(req, req_p, from)
}
}
headers::ByteRangeSpec::Last(from) => {
if flen < from {
self.handle_get_file_empty_range(req, req_p, from, flen)
} else {
self.handle_get_file_left_opened_range(req, req_p, from)
}
}
}
} else {
self.handle_invalid_range(req, req_p, &range, "More than one range is unsupported.")
}
}
headers::Range::Unregistered(..) => self.handle_invalid_range(req, req_p, &range, "Custom ranges are unsupported."),
}
}
fn handle_get_file_closed_range(&self, req: &mut Request, req_p: PathBuf, from: u64, to: u64) -> IronResult<Response> {
let mime_type = guess_mime_type_opt(&req_p).unwrap_or_else(|| if file_binary(&req_p) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
});
log!("{green}{}{reset} was served byte range {}-{} of file {magenta}{}{reset} as {blue}{}{reset}",
req.remote_addr,
from,
to,
req_p.display(),
mime_type);
let mut buf = vec![0; (to + 1 - from) as usize];
let mut f = File::open(&req_p).expect("Failed to open requested file");
f.seek(SeekFrom::Start(from)).expect("Failed to seek requested file");
f.read(&mut buf).expect("Failed to read requested file");
Ok(Response::with((status::PartialContent,
(Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&req_p)))),
Header(headers::ContentRange(headers::ContentRangeSpec::Bytes {
range: Some((from, to)),
instance_length: Some(f.metadata().expect("Failed to get requested file metadata").len()),
})),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes]))),
buf,
mime_type)))
}
fn handle_get_file_right_opened_range(&self, req: &mut Request, req_p: PathBuf, from: u64) -> IronResult<Response> {
let mime_type = guess_mime_type_opt(&req_p).unwrap_or_else(|| if file_binary(&req_p) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
});
log!("{green}{}{reset} was served file {magenta}{}{reset} from byte {} as {blue}{}{reset}",
req.remote_addr,
req_p.display(),
from,
mime_type);
let flen = req_p.metadata().expect("Failed to get requested file metadata").len();
self.handle_get_file_opened_range(req_p, SeekFrom::Start(from), from, flen - from, mime_type)
}
fn handle_get_file_left_opened_range(&self, req: &mut Request, req_p: PathBuf, from: u64) -> IronResult<Response> {
let mime_type = guess_mime_type_opt(&req_p).unwrap_or_else(|| if file_binary(&req_p) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
});
log!("{green}{}{reset} was served last {} bytes of file {magenta}{}{reset} as {blue}{}{reset}",
req.remote_addr,
from,
req_p.display(),
mime_type);
let flen = req_p.metadata().expect("Failed to get requested file metadata").len();
self.handle_get_file_opened_range(req_p, SeekFrom::End(-(from as i64)), flen - from, from, mime_type)
}
fn handle_get_file_opened_range(&self, req_p: PathBuf, s: SeekFrom, b_from: u64, clen: u64, mt: Mime) -> IronResult<Response> {
let mut f = File::open(&req_p).expect("Failed to open requested file");
let flen = f.metadata().expect("Failed to get requested file metadata").len();
f.seek(s).expect("Failed to seek requested file");
Ok(Response::with((status::PartialContent,
f,
(Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&req_p)))),
Header(headers::ContentRange(headers::ContentRangeSpec::Bytes {
range: Some((b_from, flen - 1)),
instance_length: Some(flen),
})),
Header(headers::ContentLength(clen)),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes]))),
mt)))
}
fn handle_invalid_range(&self, req: &mut Request, req_p: PathBuf, range: &headers::Range, reason: &str) -> IronResult<Response> {
self.handle_generated_response_encoding(req,
status::RangeNotSatisfiable,
html_response(ERROR_HTML,
&["416 Range Not Satisfiable",
&format!("Requested range <samp>{}</samp> could not be fullfilled for file {}.",
range,
req_p.display()),
reason]))
}
fn handle_get_file_empty_range(&self, req: &mut Request, req_p: PathBuf, from: u64, to: u64) -> IronResult<Response> {
let mime_type = guess_mime_type_opt(&req_p).unwrap_or_else(|| if file_binary(&req_p) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
});
log!("{green}{}{reset} was served an empty range from file {magenta}{}{reset} as {blue}{}{reset}",
req.remote_addr,
req_p.display(),
mime_type);
Ok(Response::with((status::NoContent,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&req_p)))),
Header(headers::ContentRange(headers::ContentRangeSpec::Bytes {
range: Some((from, to)),
instance_length: Some(req_p.metadata().expect("Failed to get requested file metadata").len()),
})),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
mime_type)))
}
fn handle_get_file(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
let mime_type = guess_mime_type_opt(&req_p).unwrap_or_else(|| if file_binary(&req_p) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
});
log!("{green}{}{reset} was served file {magenta}{}{reset} as {blue}{}{reset}",
req.remote_addr,
req_p.display(),
mime_type);
let flen = req_p.metadata().expect("Failed to get requested file metadata").len();
if self.encoded_temp_dir.is_some() && flen > MIN_ENCODING_SIZE && flen < MAX_ENCODING_SIZE &&
req_p.extension().and_then(|s| s.to_str()).map(|s| !BLACKLISTED_ENCODING_EXTENSIONS.contains(&UniCase(s))).unwrap_or(true) {
self.handle_get_file_encoded(req, req_p, mime_type)
} else {
Ok(Response::with((status::Ok,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&req_p)))),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
req_p,
mime_type)))
}
}
fn handle_get_file_encoded(&self, req: &mut Request, req_p: PathBuf, mt: Mime) -> IronResult<Response> {
if let Some(encoding) = req.headers.get_mut::<headers::AcceptEncoding>().and_then(|es| response_encoding(&mut **es)) {
self.create_temp_dir(&self.encoded_temp_dir);
let cache_key = (file_hash(&req_p), encoding.to_string());
{
match self.cache_fs.read().expect("Filesystem cache read lock poisoned").get(&cache_key) {
Some(&(ref resp_p, true)) => {
log!("{} encoded as {} for {:.1}% ratio (cached)",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding,
((req_p.metadata().expect("Failed to get requested file metadata").len() as f64) /
(resp_p.metadata().expect("Failed to get encoded file metadata").len() as f64)) * 100f64);
return Ok(Response::with((status::Ok,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::ContentEncoding(vec![encoding])),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
resp_p.as_path(),
mt)));
}
Some(&(ref resp_p, false)) => {
return Ok(Response::with((status::Ok,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&resp_p)))),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
resp_p.as_path(),
mt)));
}
None => (),
}
}
let mut resp_p = self.encoded_temp_dir.as_ref().unwrap().1.join(hash_string(&cache_key.0));
match (req_p.extension(), encoding_extension(&encoding)) {
(Some(ext), Some(enc)) => resp_p.set_extension(format!("{}.{}", ext.to_str().unwrap_or("ext"), enc)),
(Some(ext), None) => resp_p.set_extension(format!("{}.{}", ext.to_str().unwrap_or("ext"), encoding)),
(None, Some(enc)) => resp_p.set_extension(enc),
(None, None) => resp_p.set_extension(format!("{}", encoding)),
};
if encode_file(&req_p, &resp_p, &encoding) {
let gain = (req_p.metadata().expect("Failed to get requested file metadata").len() as f64) /
(resp_p.metadata().expect("Failed to get encoded file metadata").len() as f64);
if gain < MIN_ENCODING_GAIN {
let mut cache = self.cache_fs.write().expect("Filesystem cache write lock poisoned");
cache.insert(cache_key, (req_p.clone(), false));
fs::remove_file(resp_p).expect("Failed to remove too big encoded file");
} else {
log!("{} encoded as {} for {:.1}% ratio",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding,
gain * 100f64);
let mut cache = self.cache_fs.write().expect("Filesystem cache write lock poisoned");
cache.insert(cache_key, (resp_p.clone(), true));
return Ok(Response::with((status::Ok,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::ContentEncoding(vec![encoding])),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
resp_p.as_path(),
mt)));
}
} else {
log!("{} failed to encode as {}, sending identity",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding);
}
}
Ok(Response::with((status::Ok,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::LastModified(headers::HttpDate(file_time_modified(&req_p)))),
Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])),
req_p,
mt)))
}
fn handle_get_dir(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
if self.check_indices {
let mut idx = req_p.join("index");
if let Some(e) = INDEX_EXTENSIONS.iter()
.find(|e| {
idx.set_extension(e);
idx.exists()
}) {
if req.url.path().pop() == Some("") {
let r = self.handle_get_file(req, idx);
log!("{} found index file for directory {magenta}{}{reset}",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
req_p.display());
return r;
} else {
return self.handle_get_dir_index_no_slash(req, e);
}
}
}
if client_mobile(&req.headers) {
self.handle_get_mobile_dir_listing(req, req_p)
} else {
self.handle_get_dir_listing(req, req_p)
}
}
fn handle_get_dir_index_no_slash(&self, req: &mut Request, idx_ext: &str) -> IronResult<Response> {
let new_url = req.url.to_string() + "/";
log!("Redirecting {green}{}{reset} to {yellow}{}{reset} - found index file {magenta}index.{}{reset}",
req.remote_addr,
new_url,
idx_ext);
// We redirect here because if we don't and serve the index right away funky shit happens.
// Example:
// - Without following slash:
// https://cloud.githubusercontent.com/assets/6709544/21442017/9eb20d64-c89b-11e6-8c7b-888b5f70a403.png
// - With following slash:
// https://cloud.githubusercontent.com/assets/6709544/21442028/a50918c4-c89b-11e6-8936-c29896947f6a.png
Ok(Response::with((status::MovedPermanently, Header(headers::Server(USER_AGENT.to_string())), Header(headers::Location(new_url)))))
}
fn handle_get_mobile_dir_listing(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
let relpath = (url_path(&req.url) + "/").replace("//", "/");
let is_root = &req.url.path() == &[""];
log!("{green}{}{reset} was served mobile directory listing for {magenta}{}{reset}",
req.remote_addr,
req_p.display());
let parent_s = if is_root {
String::new()
} else {
let rel_noslash = &relpath[0..relpath.len() - 1];
let slash_idx = rel_noslash.rfind('/');
format!("<a href=\"/{up_path}{up_path_slash}\" class=\"list entry top\"><span class=\"back_arrow_icon\">Parent directory</span></a> \
<a href=\"/{up_path}{up_path_slash}\" class=\"list entry bottom\"><span class=\"marker\">@</span>\
<span class=\"datetime\">{} UTC</span></a>",
file_time_modified(req_p.parent()
.expect("Failed to get requested directory's parent directory"))
.strftime("%F %T")
.unwrap(),
up_path = slash_idx.map(|i| &rel_noslash[0..i]).unwrap_or(""),
up_path_slash = if slash_idx.is_some() { "/" } else { "" })
};
let list_s = req_p.read_dir()
.expect("Failed to read requested directory")
.map(|p| p.expect("Failed to iterate over trequested directory"))
.filter(|f| self.follow_symlinks || !is_symlink(f.path()))
.sorted_by(|lhs, rhs| {
(lhs.file_type().expect("Failed to get file type").is_file(), lhs.file_name().to_str().expect("Failed to get file name").to_lowercase())
.cmp(&(rhs.file_type().expect("Failed to get file type").is_file(),
rhs.file_name().to_str().expect("Failed to get file name").to_lowercase()))
})
.fold("".to_string(), |cur, f| {
let is_file = f.file_type().expect("Failed to get file type").is_file();
let fname = f.file_name().into_string().expect("Failed to get file name");
let path = f.path();
format!("{}<a href=\"{path}{fname}\" class=\"list entry top\"><span class=\"{}{}_icon\" id=\"{}\">{fname}{}</span></a> \
<a href=\"{path}{fname}\" class=\"list entry bottom\"><span class=\"marker\">@</span><span class=\"datetime\">{} UTC</span> \
{}{}{}</a>\n",
cur,
if is_file { "file" } else { "dir" },
file_icon_suffix(&path, is_file),
path.file_name().map(|p| p.to_str().expect("Filename not UTF-8").replace('.', "_")).as_ref().unwrap_or(&fname),
if is_file { "" } else { "/" },
file_time_modified(&path).strftime("%F %T").unwrap(),
if is_file { "<span class=\"size\">" } else { "" },
if is_file {
human_readable_size(f.metadata().expect("Failed to get file metadata").len())
} else {
String::new()
},
if is_file { "</span>" } else { "" },
path = format!("/{}", relpath).replace("//", "/"),
fname = fname)
});
self.handle_generated_response_encoding(req,
status::Ok,
html_response(MOBILE_DIRECTORY_LISTING_HTML,
&[&relpath[..],
if is_root { "" } else { "/" },
&if self.writes_temp_dir.is_some() {
r#"<script type="text/javascript">{upload}</script>"#
} else {
""
},
&parent_s[..],
&list_s[..],
&if self.writes_temp_dir.is_some() {
"<span class=\"list heading top top-border bottom\"> \
Upload files: <input id=\"file_upload\" type=\"file\" multiple /> \
</span>"
} else {
""
}]))
}
fn handle_get_dir_listing(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
let relpath = (url_path(&req.url) + "/").replace("//", "/");
let is_root = &req.url.path() == &[""];
log!("{green}{}{reset} was served directory listing for {magenta}{}{reset}",
req.remote_addr,
req_p.display());
let parent_s = if is_root {
String::new()
} else {
let rel_noslash = &relpath[0..relpath.len() - 1];
let slash_idx = rel_noslash.rfind('/');
format!("<tr><td><a href=\"/{up_path}{up_path_slash}\" id=\"parent_dir\" class=\"back_arrow_icon\"></a></td> \
<td><a href=\"/{up_path}{up_path_slash}\">Parent directory</a></td> \
<td><a href=\"/{up_path}{up_path_slash}\" class=\"datetime\">{}</a></td> \
<td><a href=\"/{up_path}{up_path_slash}\"> </a></td></tr>",
file_time_modified(req_p.parent()
.expect("Failed to get requested directory's parent directory"))
.strftime("%F %T")
.unwrap(),
up_path = slash_idx.map(|i| &rel_noslash[0..i]).unwrap_or(""),
up_path_slash = if slash_idx.is_some() { "/" } else { "" })
};
let list_s = req_p.read_dir()
.expect("Failed to read requested directory")
.map(|p| p.expect("Failed to iterate over trequested directory"))
.filter(|f| self.follow_symlinks || !is_symlink(f.path()))
.sorted_by(|lhs, rhs| {
(lhs.file_type().expect("Failed to get file type").is_file(), lhs.file_name().to_str().expect("Failed to get file name").to_lowercase())
.cmp(&(rhs.file_type().expect("Failed to get file type").is_file(),
rhs.file_name().to_str().expect("Failed to get file name").to_lowercase()))
})
.fold("".to_string(), |cur, f| {
let is_file = f.file_type().expect("Failed to get file type").is_file();
let fname = f.file_name().into_string().expect("Failed to get file name");
let path = f.path();
let len = f.metadata().expect("Failed to get file metadata").len();
let abspath = format!("/{}", relpath).replace("//", "/");
format!("{}<tr><td><a href=\"{path}{fname}\" id=\"{}\" class=\"{}{}_icon\"></a></td> \
<td><a href=\"{path}{fname}\">{fname}{}</a></td> <td><a href=\"{path}{fname}\" class=\"datetime\">{}</a></td> \
<td><a href=\"{path}{fname}\">{}{}{}{}{}</a></td></tr>\n",
cur,
path.file_name().map(|p| p.to_str().expect("Filename not UTF-8").replace('.', "_")).as_ref().unwrap_or(&fname),
if is_file { "file" } else { "dir" },
file_icon_suffix(&path, is_file),
if is_file { "" } else { "/" },
file_time_modified(&path).strftime("%F %T").unwrap(),
if is_file { "<abbr title=\"" } else { " " },
if is_file {
len.to_string()
} else {
String::new()
},
if is_file { " B\">" } else { "" },
if is_file {
human_readable_size(len)
} else {
String::new()
},
if is_file { "</abbr>" } else { "" },
path = abspath,
fname = fname)
});
self.handle_generated_response_encoding(req,
status::Ok,
html_response(DIRECTORY_LISTING_HTML,
&[&relpath[..],
&if self.writes_temp_dir.is_some() {
r#"<script type="text/javascript">{upload}</script>"#
} else {
""
},
&parent_s[..],
&list_s[..],
&if self.writes_temp_dir.is_some() {
"<hr /> \
<p> \
Drag&Drop to upload or <input id=\"file_upload\" type=\"file\" multiple />. \
</p>"
} else {
""
}]))
}
fn handle_put(&self, req: &mut Request) -> IronResult<Response> {
if self.writes_temp_dir.is_none() {
return self.handle_forbidden_method(req, "-w", "write requests");
}
let (req_p, _, url_err) = self.parse_requested_path(req);
if url_err {
self.handle_invalid_url(req, "<p>Percent-encoding decoded to invalid UTF-8.</p>")
} else if req_p.is_dir() {
self.handle_disallowed_method(req, &[method::Options, method::Get, method::Delete, method::Head, method::Trace], "directory")
} else if detect_file_as_dir(&req_p) {
self.handle_invalid_url(req, "<p>Attempted to use file as directory.</p>")
} else if req.headers.has::<headers::ContentRange>() {
self.handle_put_partial_content(req)
} else {
self.create_temp_dir(&self.writes_temp_dir);
self.handle_put_file(req, req_p)
}
}
fn handle_disallowed_method(&self, req: &mut Request, allowed: &[method::Method], tpe: &str) -> IronResult<Response> {
let allowed_s = allowed.iter()
.enumerate()
.fold("".to_string(), |cur, (i, m)| {
cur + &m.to_string() +
if i == allowed.len() - 2 {
", and "
} else if i == allowed.len() - 1 {
""
} else {
", "
}
})
.to_string();
log!("{green}{}{reset} tried to {red}{}{reset} on {magenta}{}{reset} ({blue}{}{reset}) but only {red}{}{reset} are allowed",
req.remote_addr,
req.method,
url_path(&req.url),
tpe,
allowed_s);
let resp_text =
html_response(ERROR_HTML,
&["405 Method Not Allowed", &format!("Can't {} on a {}.", req.method, tpe), &format!("<p>Allowed methods: {}</p>", allowed_s)]);
self.handle_generated_response_encoding(req, status::MethodNotAllowed, resp_text)
.map(|mut r| {
r.headers.set(headers::Allow(allowed.to_vec()));
r
})
}
fn handle_put_partial_content(&self, req: &mut Request) -> IronResult<Response> {
log!("{green}{}{reset} tried to {red}PUT{reset} partial content to {yellow}{}{reset}",
req.remote_addr,
url_path(&req.url));
self.handle_generated_response_encoding(req,
status::BadRequest,
html_response(ERROR_HTML,
&["400 Bad Request",
"<a href=\"https://tools.ietf.org/html/rfc7231#section-4.3.3\">RFC7231 forbids \
partial-content PUT requests.</a>",
""]))
}
fn handle_put_file(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
let existant = req_p.exists();
log!("{green}{}{reset} {} {magenta}{}{reset}, size: {}B",
req.remote_addr,
if existant { "replaced" } else { "created" },
req_p.display(),
*req.headers.get::<headers::ContentLength>().expect("No Content-Length header"));
let &(_, ref temp_dir) = self.writes_temp_dir.as_ref().unwrap();
let temp_file_p = temp_dir.join(req_p.file_name().expect("Failed to get requested file's filename"));
io::copy(&mut req.body, &mut File::create(&temp_file_p).expect("Failed to create temp file"))
.expect("Failed to write requested data to requested file");
let _ = fs::create_dir_all(req_p.parent().expect("Failed to get requested file's parent directory"));
fs::copy(&temp_file_p, req_p).expect("Failed to copy temp file to requested file");
Ok(Response::with((if existant {
status::NoContent
} else {
status::Created
},
Header(headers::Server(USER_AGENT.to_string())))))
}
fn handle_delete(&self, req: &mut Request) -> IronResult<Response> {
if self.writes_temp_dir.is_none() {
return self.handle_forbidden_method(req, "-w", "write requests");
}
let (req_p, symlink, url_err) = self.parse_requested_path(req);
if url_err {
self.handle_invalid_url(req, "<p>Percent-encoding decoded to invalid UTF-8.</p>")
} else if !req_p.exists() || (symlink && !self.follow_symlinks) {
self.handle_nonexistant(req, req_p)
} else {
self.handle_delete_path(req, req_p)
}
}
fn handle_delete_path(&self, req: &mut Request, req_p: PathBuf) -> IronResult<Response> {
log!("{green}{}{reset} deleted {blue}{} {magenta}{}{reset}",
req.remote_addr,
if req_p.is_file() { "file" } else { "directory" },
req_p.display());
if req_p.is_file() {
fs::remove_file(req_p).expect("Failed to remove requested file");
} else {
fs::remove_dir_all(req_p).expect("Failed to remove requested directory");
}
Ok(Response::with((status::NoContent, Header(headers::Server(USER_AGENT.to_string())))))
}
fn handle_trace(&self, req: &mut Request) -> IronResult<Response> {
log!("{green}{}{reset} requested {red}TRACE{reset} for {magenta}{}{reset}",
req.remote_addr,
url_path(&req.url));
let mut hdr = req.headers.clone();
hdr.set(headers::ContentType("message/http".parse().unwrap()));
Ok(Response {
status: Some(status::Ok),
headers: hdr,
extensions: TypeMap::new(),
body: None,
})
}
fn handle_forbidden_method(&self, req: &mut Request, switch: &str, desc: &str) -> IronResult<Response> {
log!("{green}{}{reset} used disabled request method {red}{}{reset} grouped under {}",
req.remote_addr,
req.method,
desc);
self.handle_generated_response_encoding(req,
status::Forbidden,
html_response(ERROR_HTML,
&["403 Forbidden",
"This feature is currently disabled.",
&format!("<p>Ask the server administrator to pass <samp>{}</samp> to the executable to \
enable support for {}.</p>",
switch,
desc)]))
}
fn handle_bad_method(&self, req: &mut Request) -> IronResult<Response> {
log!("{green}{}{reset} used invalid request method {red}{}{reset}", req.remote_addr, req.method);
let last_p = format!("<p>Unsupported request method: {}.<br />\nSupported methods: OPTIONS, GET, PUT, DELETE, HEAD and TRACE.</p>",
req.method);
self.handle_generated_response_encoding(req,
status::NotImplemented,
html_response(ERROR_HTML, &["501 Not Implemented", "This operation was not implemented.", &last_p]))
}
fn handle_generated_response_encoding(&self, req: &mut Request, st: status::Status, resp: String) -> IronResult<Response> {
if let Some(encoding) = req.headers.get_mut::<headers::AcceptEncoding>().and_then(|es| response_encoding(&mut **es)) {
let mut cache_key = ([0u8; 32], encoding.to_string());
md6::hash(256, resp.as_bytes(), &mut cache_key.0).expect("Failed to hash generated response");
{
if let Some(enc_resp) = self.cache_gen.read().expect("Generated file cache read lock poisoned").get(&cache_key) {
log!("{} encoded as {} for {:.1}% ratio (cached)",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding,
((resp.len() as f64) / (enc_resp.len() as f64)) * 100f64);
return Ok(Response::with((st,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::ContentEncoding(vec![encoding])),
"text/html;charset=utf-8".parse::<mime::Mime>().unwrap(),
&enc_resp[..])));
}
}
if let Some(enc_resp) = encode_str(&resp, &encoding) {
log!("{} encoded as {} for {:.1}% ratio",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding,
((resp.len() as f64) / (enc_resp.len() as f64)) * 100f64);
let mut cache = self.cache_gen.write().expect("Generated file cache read lock poisoned");
cache.insert(cache_key.clone(), enc_resp);
return Ok(Response::with((st,
Header(headers::Server(USER_AGENT.to_string())),
Header(headers::ContentEncoding(vec![encoding])),
"text/html;charset=utf-8".parse::<mime::Mime>().unwrap(),
&cache[&cache_key][..])));
} else {
log!("{} failed to encode as {}, sending identity",
iter::repeat(' ').take(req.remote_addr.to_string().len()).collect::<String>(),
encoding);
}
}
Ok(Response::with((st, Header(headers::Server(USER_AGENT.to_string())), "text/html;charset=utf-8".parse::<mime::Mime>().unwrap(), resp)))
}
fn parse_requested_path(&self, req: &Request) -> (PathBuf, bool, bool) {
req.url.path().into_iter().filter(|p| !p.is_empty()).fold((self.hosted_directory.1.clone(), false, false), |(mut cur, mut sk, mut err), pp| {
if let Some(pp) = percent_decode(pp) {
cur.push(&*pp);
} else {
err = true;
}
while let Ok(newlink) = cur.read_link() {
cur = newlink;
sk = true;
}
(cur, sk, err)
})
}
fn create_temp_dir(&self, td: &Option<(String, PathBuf)>) {
let &(ref temp_name, ref temp_dir) = td.as_ref().unwrap();
if !temp_dir.exists() && fs::create_dir_all(&temp_dir).is_ok() {
log!("Created temp dir {magenta}{}{reset}", temp_name);
}
}
}
impl Clone for HttpHandler {
fn clone(&self) -> HttpHandler {
HttpHandler {
hosted_directory: self.hosted_directory.clone(),
follow_symlinks: self.follow_symlinks,
check_indices: self.check_indices,
writes_temp_dir: self.writes_temp_dir.clone(),
encoded_temp_dir: self.encoded_temp_dir.clone(),
cache_gen: Default::default(),
cache_fs: Default::default(),
}
}
}
/// Attempt to start a server on ports from `from` to `up_to`, inclusive, with the specified handler.
///
/// If an error other than the port being full is encountered it is returned.
///
/// If all ports from the range are not free an error is returned.
///
/// # Examples
///
/// ```
/// # extern crate https;
/// # extern crate iron;
/// # use https::ops::try_ports;
/// # use iron::{status, Response};
/// let server = try_ports(|req| Ok(Response::with((status::Ok, "Abolish the burgeoisie!"))), 8000, 8100).unwrap();
/// ```
pub fn try_ports<H: Handler + Clone>(hndlr: H, from: u16, up_to: u16) -> Result<Listening, Error> {
for port in from..up_to + 1 {
match Iron::new(hndlr.clone()).http(("0.0.0.0", port)) {
Ok(server) => return Ok(server),
Err(error) => {
if !error.to_string().contains("port") {
return Err(Error::Io {
desc: "server",
op: "start",
more: None,
});
}
}
}
}
Err(Error::Io {
desc: "server",
op: "start",
more: Some("no free ports"),
})
}
|
use crate::server::ServerData;
use actix_http::http::header;
use actix_web::{error::ResponseError, get, http::StatusCode, web, HttpRequest, HttpResponse};
use mongodb::oid::ObjectId;
use serde::Deserialize;
use serde_qs::actix::QsQuery;
#[get("thumbnail/{course_id}")]
pub fn get_thumbnail(
data: web::Data<ServerData>,
path: web::Path<String>,
query: QsQuery<GetThumbnail2>,
req: HttpRequest,
) -> Result<HttpResponse, GetCourse2ThumbnailError> {
let course_id = path.into_inner();
let course_id = ObjectId::with_string(&course_id)?;
let thumb = data.get_course2_thumbnail(course_id, query.into_inner())?;
Ok(HttpResponse::Ok()
.set_header(header::CONTENT_TYPE, "image/jpeg")
.body(thumb))
}
#[derive(Debug, Deserialize)]
pub struct GetThumbnail2 {
#[serde(default)]
pub size: Size2,
}
#[derive(Clone, Debug, Deserialize, PartialEq)]
pub enum Size2 {
S,
M,
L,
ORIGINAL,
}
impl Size2 {
pub fn get_dimensions(&self) -> (u32, u32) {
match *self {
Size2::S => (160, 90),
Size2::M => (320, 180),
Size2::L => (480, 270),
Size2::ORIGINAL => (640, 360),
}
}
}
impl Default for Size2 {
fn default() -> Self {
Size2::ORIGINAL
}
}
impl Into<String> for Size2 {
fn into(self) -> String {
match self {
Size2::S => "thumb_s".to_string(),
Size2::M => "thumb_m".to_string(),
Size2::L => "thumb_l".to_string(),
Size2::ORIGINAL => "thumb".to_string(),
}
}
}
#[derive(Debug, Fail)]
pub enum GetCourse2ThumbnailError {
#[fail(display = "")]
CourseNotFound(ObjectId),
#[fail(display = "Object id invalid.\nReason: {}", _0)]
MongoOid(mongodb::oid::Error),
#[fail(display = "[GetCourse2ThumbnailError::Mongo]: {}", _0)]
Mongo(mongodb::Error),
#[fail(display = "[GetCourse2ThumbnailError::Image]: {}", _0)]
Image(image::ImageError),
}
impl From<mongodb::oid::Error> for GetCourse2ThumbnailError {
fn from(err: mongodb::oid::Error) -> Self {
GetCourse2ThumbnailError::MongoOid(err)
}
}
impl From<mongodb::Error> for GetCourse2ThumbnailError {
fn from(err: mongodb::Error) -> Self {
GetCourse2ThumbnailError::Mongo(err)
}
}
impl From<image::ImageError> for GetCourse2ThumbnailError {
fn from(err: image::ImageError) -> Self {
GetCourse2ThumbnailError::Image(err)
}
}
impl ResponseError for GetCourse2ThumbnailError {
fn error_response(&self) -> HttpResponse {
match *self {
GetCourse2ThumbnailError::CourseNotFound(_) => HttpResponse::new(StatusCode::NOT_FOUND),
GetCourse2ThumbnailError::MongoOid(mongodb::oid::Error::FromHexError(_)) => {
HttpResponse::new(StatusCode::BAD_REQUEST)
}
GetCourse2ThumbnailError::MongoOid(_) => {
HttpResponse::new(StatusCode::INTERNAL_SERVER_ERROR)
}
GetCourse2ThumbnailError::Mongo(_) => {
HttpResponse::new(StatusCode::INTERNAL_SERVER_ERROR)
}
GetCourse2ThumbnailError::Image(_) => {
HttpResponse::new(StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
}
fix thumbnail2 oid error status code
use crate::server::ServerData;
use actix_http::http::header;
use actix_web::{error::ResponseError, get, http::StatusCode, web, HttpRequest, HttpResponse};
use mongodb::oid::ObjectId;
use serde::Deserialize;
use serde_qs::actix::QsQuery;
#[get("thumbnail/{course_id}")]
pub fn get_thumbnail(
data: web::Data<ServerData>,
path: web::Path<String>,
query: QsQuery<GetThumbnail2>,
req: HttpRequest,
) -> Result<HttpResponse, GetCourse2ThumbnailError> {
let course_id = path.into_inner();
let course_id = ObjectId::with_string(&course_id)?;
let thumb = data.get_course2_thumbnail(course_id, query.into_inner())?;
Ok(HttpResponse::Ok()
.set_header(header::CONTENT_TYPE, "image/jpeg")
.body(thumb))
}
#[derive(Debug, Deserialize)]
pub struct GetThumbnail2 {
#[serde(default)]
pub size: Size2,
}
#[derive(Clone, Debug, Deserialize, PartialEq)]
pub enum Size2 {
S,
M,
L,
ORIGINAL,
}
impl Size2 {
pub fn get_dimensions(&self) -> (u32, u32) {
match *self {
Size2::S => (160, 90),
Size2::M => (320, 180),
Size2::L => (480, 270),
Size2::ORIGINAL => (640, 360),
}
}
}
impl Default for Size2 {
fn default() -> Self {
Size2::ORIGINAL
}
}
impl Into<String> for Size2 {
fn into(self) -> String {
match self {
Size2::S => "thumb_s".to_string(),
Size2::M => "thumb_m".to_string(),
Size2::L => "thumb_l".to_string(),
Size2::ORIGINAL => "thumb".to_string(),
}
}
}
#[derive(Debug, Fail)]
pub enum GetCourse2ThumbnailError {
#[fail(display = "")]
CourseNotFound(ObjectId),
#[fail(display = "Object id invalid.\nReason: {}", _0)]
MongoOid(mongodb::oid::Error),
#[fail(display = "[GetCourse2ThumbnailError::Mongo]: {}", _0)]
Mongo(mongodb::Error),
#[fail(display = "[GetCourse2ThumbnailError::Image]: {}", _0)]
Image(image::ImageError),
}
impl From<mongodb::oid::Error> for GetCourse2ThumbnailError {
fn from(err: mongodb::oid::Error) -> Self {
GetCourse2ThumbnailError::MongoOid(err)
}
}
impl From<mongodb::Error> for GetCourse2ThumbnailError {
fn from(err: mongodb::Error) -> Self {
GetCourse2ThumbnailError::Mongo(err)
}
}
impl From<image::ImageError> for GetCourse2ThumbnailError {
fn from(err: image::ImageError) -> Self {
GetCourse2ThumbnailError::Image(err)
}
}
impl ResponseError for GetCourse2ThumbnailError {
fn error_response(&self) -> HttpResponse {
match *self {
GetCourse2ThumbnailError::CourseNotFound(_) => HttpResponse::new(StatusCode::NOT_FOUND),
GetCourse2ThumbnailError::MongoOid(mongodb::oid::Error::FromHexError(_)) => {
HttpResponse::new(StatusCode::BAD_REQUEST)
}
GetCourse2ThumbnailError::MongoOid(_) => HttpResponse::new(StatusCode::BAD_REQUEST),
GetCourse2ThumbnailError::Mongo(_) => {
HttpResponse::new(StatusCode::INTERNAL_SERVER_ERROR)
}
GetCourse2ThumbnailError::Image(_) => {
HttpResponse::new(StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! [Length values][length].
//!
//! [length]: https://drafts.csswg.org/css-values/#lengths
use app_units::Au;
use cssparser::{Parser, Token, BasicParseError};
use euclid::Size2D;
use font_metrics::FontMetricsQueryResult;
use parser::{Parse, ParserContext};
use std::{cmp, fmt, mem};
use std::ascii::AsciiExt;
use std::ops::{Add, Mul};
use style_traits::{ToCss, ParseError, StyleParseError};
use style_traits::values::specified::AllowedNumericType;
use stylesheets::CssRuleType;
use super::{AllowQuirks, Number, ToComputedValue, Percentage};
use values::{Auto, CSSFloat, Either, FONT_MEDIUM_PX, None_, Normal};
use values::{ExtremumLength, serialize_dimension};
use values::computed::{self, CSSPixelLength, Context};
use values::generics::NonNegative;
use values::specified::NonNegativeNumber;
use values::specified::calc::CalcNode;
pub use values::specified::calc::CalcLengthOrPercentage;
pub use super::image::{ColorStop, EndingShape as GradientEndingShape, Gradient};
pub use super::image::{GradientKind, Image};
/// Number of app units per pixel
pub const AU_PER_PX: CSSFloat = 60.;
/// Number of app units per inch
pub const AU_PER_IN: CSSFloat = AU_PER_PX * 96.;
/// Number of app units per centimeter
pub const AU_PER_CM: CSSFloat = AU_PER_IN / 2.54;
/// Number of app units per millimeter
pub const AU_PER_MM: CSSFloat = AU_PER_IN / 25.4;
/// Number of app units per quarter
pub const AU_PER_Q: CSSFloat = AU_PER_MM / 4.;
/// Number of app units per point
pub const AU_PER_PT: CSSFloat = AU_PER_IN / 72.;
/// Number of app units per pica
pub const AU_PER_PC: CSSFloat = AU_PER_PT * 12.;
/// Same as Gecko's AppUnitsToIntCSSPixels
///
/// Converts app units to integer pixel values,
/// rounding during the conversion
pub fn au_to_int_px(au: f32) -> i32 {
(au / AU_PER_PX).round() as i32
}
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
/// A font relative length.
pub enum FontRelativeLength {
/// A "em" value: https://drafts.csswg.org/css-values/#em
Em(CSSFloat),
/// A "ex" value: https://drafts.csswg.org/css-values/#ex
Ex(CSSFloat),
/// A "ch" value: https://drafts.csswg.org/css-values/#ch
Ch(CSSFloat),
/// A "rem" value: https://drafts.csswg.org/css-values/#rem
Rem(CSSFloat)
}
impl ToCss for FontRelativeLength {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write
{
match *self {
FontRelativeLength::Em(length) => serialize_dimension(length, "em", dest),
FontRelativeLength::Ex(length) => serialize_dimension(length, "ex", dest),
FontRelativeLength::Ch(length) => serialize_dimension(length, "ch", dest),
FontRelativeLength::Rem(length) => serialize_dimension(length, "rem", dest)
}
}
}
/// A source to resolve font-relative units against
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FontBaseSize {
/// Use the font-size of the current element
CurrentStyle,
/// Use the inherited font-size
InheritedStyle,
/// Use a custom base size
Custom(Au),
}
impl FontBaseSize {
/// Calculate the actual size for a given context
pub fn resolve(&self, context: &Context) -> Au {
match *self {
FontBaseSize::Custom(size) => size,
FontBaseSize::CurrentStyle => Au::from(context.style().get_font().clone_font_size()),
FontBaseSize::InheritedStyle => Au::from(context.style().get_parent_font().clone_font_size()),
}
}
}
impl FontRelativeLength {
/// Computes the font-relative length.
pub fn to_computed_value(&self, context: &Context, base_size: FontBaseSize) -> CSSPixelLength {
use std::f32;
let (reference_size, length) = self.reference_font_size_and_length(context, base_size);
let pixel = (length * reference_size.to_f32_px()).min(f32::MAX).max(f32::MIN);
CSSPixelLength::new(pixel)
}
/// Return reference font size. We use the base_size flag to pass a different size
/// for computing font-size and unconstrained font-size.
/// This returns a pair, the first one is the reference font size, and the second one is the
/// unpacked relative length.
fn reference_font_size_and_length(
&self,
context: &Context,
base_size: FontBaseSize,
) -> (Au, CSSFloat) {
fn query_font_metrics(context: &Context, reference_font_size: Au) -> FontMetricsQueryResult {
context.font_metrics_provider.query(context.style().get_font(),
reference_font_size,
context.style().writing_mode,
context.in_media_query,
context.device())
}
let reference_font_size = base_size.resolve(context);
match *self {
FontRelativeLength::Em(length) => {
if !matches!(base_size, FontBaseSize::InheritedStyle) {
context.rule_cache_conditions.borrow_mut()
.set_font_size_dependency(
reference_font_size.into()
);
}
(reference_font_size, length)
},
FontRelativeLength::Ex(length) => {
if context.for_non_inherited_property.is_some() {
context.rule_cache_conditions.borrow_mut().set_uncacheable();
}
let reference_size = match query_font_metrics(context, reference_font_size) {
FontMetricsQueryResult::Available(metrics) => {
metrics.x_height
},
// https://drafts.csswg.org/css-values/#ex
//
// In the cases where it is impossible or impractical to
// determine the x-height, a value of 0.5em must be
// assumed.
//
FontMetricsQueryResult::NotAvailable => {
reference_font_size.scale_by(0.5)
},
};
(reference_size, length)
},
FontRelativeLength::Ch(length) => {
if context.for_non_inherited_property.is_some() {
context.rule_cache_conditions.borrow_mut().set_uncacheable();
}
let reference_size = match query_font_metrics(context, reference_font_size) {
FontMetricsQueryResult::Available(metrics) => {
metrics.zero_advance_measure
},
// https://drafts.csswg.org/css-values/#ch
//
// In the cases where it is impossible or impractical to
// determine the measure of the “0” glyph, it must be
// assumed to be 0.5em wide by 1em tall. Thus, the ch
// unit falls back to 0.5em in the general case, and to
// 1em when it would be typeset upright (i.e.
// writing-mode is vertical-rl or vertical-lr and
// text-orientation is upright).
//
FontMetricsQueryResult::NotAvailable => {
if context.style().writing_mode.is_vertical() {
reference_font_size
} else {
reference_font_size.scale_by(0.5)
}
}
};
(reference_size, length)
}
FontRelativeLength::Rem(length) => {
// https://drafts.csswg.org/css-values/#rem:
//
// When specified on the font-size property of the root
// element, the rem units refer to the property’s initial
// value.
//
let reference_size = if context.is_root_element {
reference_font_size
} else {
context.device().root_font_size()
};
(reference_size, length)
}
}
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
/// A viewport-relative length.
///
/// https://drafts.csswg.org/css-values/#viewport-relative-lengths
pub enum ViewportPercentageLength {
/// A vw unit: https://drafts.csswg.org/css-values/#vw
Vw(CSSFloat),
/// A vh unit: https://drafts.csswg.org/css-values/#vh
Vh(CSSFloat),
/// https://drafts.csswg.org/css-values/#vmin
Vmin(CSSFloat),
/// https://drafts.csswg.org/css-values/#vmax
Vmax(CSSFloat)
}
impl ToCss for ViewportPercentageLength {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
ViewportPercentageLength::Vw(length) => serialize_dimension(length, "vw", dest),
ViewportPercentageLength::Vh(length) => serialize_dimension(length, "vh", dest),
ViewportPercentageLength::Vmin(length) => serialize_dimension(length, "vmin", dest),
ViewportPercentageLength::Vmax(length) => serialize_dimension(length, "vmax", dest)
}
}
}
impl ViewportPercentageLength {
/// Computes the given viewport-relative length for the given viewport size.
pub fn to_computed_value(&self, viewport_size: Size2D<Au>) -> CSSPixelLength {
let (factor, length) = match *self {
ViewportPercentageLength::Vw(length) =>
(length, viewport_size.width),
ViewportPercentageLength::Vh(length) =>
(length, viewport_size.height),
ViewportPercentageLength::Vmin(length) =>
(length, cmp::min(viewport_size.width, viewport_size.height)),
ViewportPercentageLength::Vmax(length) =>
(length, cmp::max(viewport_size.width, viewport_size.height)),
};
// FIXME: Bug 1396535, we need to fix the extremely small viewport length for transform.
// See bug 989802. We truncate so that adding multiple viewport units
// that add up to 100 does not overflow due to rounding differences
let trunc_scaled = ((length.0 as f64) * factor as f64 / 100.).trunc();
Au::from_f64_au(trunc_scaled).into()
}
}
/// HTML5 "character width", as defined in HTML5 § 14.5.4.
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct CharacterWidth(pub i32);
impl CharacterWidth {
/// Computes the given character width.
pub fn to_computed_value(&self, reference_font_size: Au) -> CSSPixelLength {
// This applies the *converting a character width to pixels* algorithm as specified
// in HTML5 § 14.5.4.
//
// TODO(pcwalton): Find these from the font.
let average_advance = reference_font_size.scale_by(0.5);
let max_advance = reference_font_size;
let au = average_advance.scale_by(self.0 as CSSFloat - 1.0) + max_advance;
au.into()
}
}
/// Represents an absolute length with its unit
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum AbsoluteLength {
/// An absolute length in pixels (px)
Px(CSSFloat),
/// An absolute length in inches (in)
In(CSSFloat),
/// An absolute length in centimeters (cm)
Cm(CSSFloat),
/// An absolute length in millimeters (mm)
Mm(CSSFloat),
/// An absolute length in quarter-millimeters (q)
Q(CSSFloat),
/// An absolute length in points (pt)
Pt(CSSFloat),
/// An absolute length in pica (pc)
Pc(CSSFloat),
}
impl AbsoluteLength {
fn is_zero(&self) -> bool {
match *self {
AbsoluteLength::Px(v)
| AbsoluteLength::In(v)
| AbsoluteLength::Cm(v)
| AbsoluteLength::Mm(v)
| AbsoluteLength::Q(v)
| AbsoluteLength::Pt(v)
| AbsoluteLength::Pc(v) => v == 0.,
}
}
/// Convert this into a pixel value.
#[inline]
pub fn to_px(&self) -> CSSFloat {
use std::f32;
let pixel = match *self {
AbsoluteLength::Px(value) => value,
AbsoluteLength::In(value) => value * (AU_PER_IN / AU_PER_PX),
AbsoluteLength::Cm(value) => value * (AU_PER_CM / AU_PER_PX),
AbsoluteLength::Mm(value) => value * (AU_PER_MM / AU_PER_PX),
AbsoluteLength::Q(value) => value * (AU_PER_Q / AU_PER_PX),
AbsoluteLength::Pt(value) => value * (AU_PER_PT / AU_PER_PX),
AbsoluteLength::Pc(value) => value * (AU_PER_PC / AU_PER_PX),
};
pixel.min(f32::MAX).max(f32::MIN)
}
}
impl ToComputedValue for AbsoluteLength {
type ComputedValue = CSSPixelLength;
fn to_computed_value(&self, _: &Context) -> Self::ComputedValue {
CSSPixelLength::new(self.to_px())
}
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
AbsoluteLength::Px(computed.px())
}
}
impl ToCss for AbsoluteLength {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
AbsoluteLength::Px(length) => serialize_dimension(length, "px", dest),
AbsoluteLength::In(length) => serialize_dimension(length, "in", dest),
AbsoluteLength::Cm(length) => serialize_dimension(length, "cm", dest),
AbsoluteLength::Mm(length) => serialize_dimension(length, "mm", dest),
AbsoluteLength::Q(length) => serialize_dimension(length, "q", dest),
AbsoluteLength::Pt(length) => serialize_dimension(length, "pt", dest),
AbsoluteLength::Pc(length) => serialize_dimension(length, "pc", dest),
}
}
}
impl Mul<CSSFloat> for AbsoluteLength {
type Output = AbsoluteLength;
#[inline]
fn mul(self, scalar: CSSFloat) -> AbsoluteLength {
match self {
AbsoluteLength::Px(v) => AbsoluteLength::Px(v * scalar),
AbsoluteLength::In(v) => AbsoluteLength::In(v * scalar),
AbsoluteLength::Cm(v) => AbsoluteLength::Cm(v * scalar),
AbsoluteLength::Mm(v) => AbsoluteLength::Mm(v * scalar),
AbsoluteLength::Q(v) => AbsoluteLength::Q(v * scalar),
AbsoluteLength::Pt(v) => AbsoluteLength::Pt(v * scalar),
AbsoluteLength::Pc(v) => AbsoluteLength::Pc(v * scalar),
}
}
}
impl Add<AbsoluteLength> for AbsoluteLength {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self {
match (self, rhs) {
(AbsoluteLength::Px(x), AbsoluteLength::Px(y)) => AbsoluteLength::Px(x + y),
(AbsoluteLength::In(x), AbsoluteLength::In(y)) => AbsoluteLength::In(x + y),
(AbsoluteLength::Cm(x), AbsoluteLength::Cm(y)) => AbsoluteLength::Cm(x + y),
(AbsoluteLength::Mm(x), AbsoluteLength::Mm(y)) => AbsoluteLength::Mm(x + y),
(AbsoluteLength::Q(x), AbsoluteLength::Q(y)) => AbsoluteLength::Q(x + y),
(AbsoluteLength::Pt(x), AbsoluteLength::Pt(y)) => AbsoluteLength::Pt(x + y),
(AbsoluteLength::Pc(x), AbsoluteLength::Pc(y)) => AbsoluteLength::Pc(x + y),
_ => AbsoluteLength::Px(self.to_px() + rhs.to_px()),
}
}
}
/// Represents a physical length (mozmm) based on DPI
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg(feature = "gecko")]
#[derive(MallocSizeOf)]
pub struct PhysicalLength(pub CSSFloat);
#[cfg(feature = "gecko")]
impl PhysicalLength {
fn is_zero(&self) -> bool {
self.0 == 0.
}
/// Computes the given character width.
pub fn to_computed_value(&self, context: &Context) -> CSSPixelLength {
use gecko_bindings::bindings;
use std::f32;
// Same as Gecko
const INCH_PER_MM: f32 = 1. / 25.4;
let au_per_physical_inch = unsafe {
bindings::Gecko_GetAppUnitsPerPhysicalInch(context.device().pres_context()) as f32
};
let px_per_physical_inch = au_per_physical_inch / AU_PER_PX;
let pixel = self.0 * px_per_physical_inch * INCH_PER_MM;
CSSPixelLength::new(pixel.min(f32::MAX).max(f32::MIN))
}
}
#[cfg(feature = "gecko")]
impl ToCss for PhysicalLength {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
serialize_dimension(self.0, "mozmm", dest)
}
}
#[cfg(feature = "gecko")]
impl Mul<CSSFloat> for PhysicalLength {
type Output = PhysicalLength;
#[inline]
fn mul(self, scalar: CSSFloat) -> PhysicalLength {
PhysicalLength(self.0 * scalar)
}
}
/// A `<length>` without taking `calc` expressions into account
///
/// https://drafts.csswg.org/css-values/#lengths
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum NoCalcLength {
/// An absolute length
///
/// https://drafts.csswg.org/css-values/#absolute-length
Absolute(AbsoluteLength),
/// A font-relative length:
///
/// https://drafts.csswg.org/css-values/#font-relative-lengths
FontRelative(FontRelativeLength),
/// A viewport-relative length.
///
/// https://drafts.csswg.org/css-values/#viewport-relative-lengths
ViewportPercentage(ViewportPercentageLength),
/// HTML5 "character width", as defined in HTML5 § 14.5.4.
///
/// This cannot be specified by the user directly and is only generated by
/// `Stylist::synthesize_rules_for_legacy_attributes()`.
ServoCharacterWidth(CharacterWidth),
/// A physical length (mozmm) based on DPI
#[cfg(feature = "gecko")]
Physical(PhysicalLength),
}
impl ToCss for NoCalcLength {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
NoCalcLength::Absolute(length) => length.to_css(dest),
NoCalcLength::FontRelative(length) => length.to_css(dest),
NoCalcLength::ViewportPercentage(length) => length.to_css(dest),
/* This should only be reached from style dumping code */
NoCalcLength::ServoCharacterWidth(CharacterWidth(i)) => {
dest.write_str("CharWidth(")?;
i.to_css(dest)?;
dest.write_char(')')
}
#[cfg(feature = "gecko")]
NoCalcLength::Physical(length) => length.to_css(dest),
}
}
}
impl Mul<CSSFloat> for NoCalcLength {
type Output = NoCalcLength;
#[inline]
fn mul(self, scalar: CSSFloat) -> NoCalcLength {
match self {
NoCalcLength::Absolute(v) => NoCalcLength::Absolute(v * scalar),
NoCalcLength::FontRelative(v) => NoCalcLength::FontRelative(v * scalar),
NoCalcLength::ViewportPercentage(v) => NoCalcLength::ViewportPercentage(v * scalar),
NoCalcLength::ServoCharacterWidth(_) => panic!("Can't multiply ServoCharacterWidth!"),
#[cfg(feature = "gecko")]
NoCalcLength::Physical(v) => NoCalcLength::Physical(v * scalar),
}
}
}
impl NoCalcLength {
/// Parse a given absolute or relative dimension.
pub fn parse_dimension(context: &ParserContext, value: CSSFloat, unit: &str)
-> Result<NoCalcLength, ()> {
let in_page_rule = context.rule_type.map_or(false, |rule_type| rule_type == CssRuleType::Page);
match_ignore_ascii_case! { unit,
"px" => Ok(NoCalcLength::Absolute(AbsoluteLength::Px(value))),
"in" => Ok(NoCalcLength::Absolute(AbsoluteLength::In(value))),
"cm" => Ok(NoCalcLength::Absolute(AbsoluteLength::Cm(value))),
"mm" => Ok(NoCalcLength::Absolute(AbsoluteLength::Mm(value))),
"q" => Ok(NoCalcLength::Absolute(AbsoluteLength::Q(value))),
"pt" => Ok(NoCalcLength::Absolute(AbsoluteLength::Pt(value))),
"pc" => Ok(NoCalcLength::Absolute(AbsoluteLength::Pc(value))),
// font-relative
"em" => Ok(NoCalcLength::FontRelative(FontRelativeLength::Em(value))),
"ex" => Ok(NoCalcLength::FontRelative(FontRelativeLength::Ex(value))),
"ch" => Ok(NoCalcLength::FontRelative(FontRelativeLength::Ch(value))),
"rem" => Ok(NoCalcLength::FontRelative(FontRelativeLength::Rem(value))),
// viewport percentages
"vw" => {
if in_page_rule {
return Err(())
}
Ok(NoCalcLength::ViewportPercentage(ViewportPercentageLength::Vw(value)))
},
"vh" => {
if in_page_rule {
return Err(())
}
Ok(NoCalcLength::ViewportPercentage(ViewportPercentageLength::Vh(value)))
},
"vmin" => {
if in_page_rule {
return Err(())
}
Ok(NoCalcLength::ViewportPercentage(ViewportPercentageLength::Vmin(value)))
},
"vmax" => {
if in_page_rule {
return Err(())
}
Ok(NoCalcLength::ViewportPercentage(ViewportPercentageLength::Vmax(value)))
},
#[cfg(feature = "gecko")]
"mozmm" => Ok(NoCalcLength::Physical(PhysicalLength(value))),
_ => Err(())
}
}
#[inline]
/// Returns a `zero` length.
pub fn zero() -> NoCalcLength {
NoCalcLength::Absolute(AbsoluteLength::Px(0.))
}
#[inline]
/// Checks whether the length value is zero.
pub fn is_zero(&self) -> bool {
match *self {
NoCalcLength::Absolute(length) => length.is_zero(),
#[cfg(feature = "gecko")]
NoCalcLength::Physical(length) => length.is_zero(),
_ => false
}
}
#[inline]
/// Returns a `medium` length.
pub fn medium() -> NoCalcLength {
NoCalcLength::Absolute(AbsoluteLength::Px(FONT_MEDIUM_PX as f32))
}
/// Get an absolute length from a px value.
#[inline]
pub fn from_px(px_value: CSSFloat) -> NoCalcLength {
NoCalcLength::Absolute(AbsoluteLength::Px(px_value))
}
}
/// An extension to `NoCalcLength` to parse `calc` expressions.
/// This is commonly used for the `<length>` values.
///
/// https://drafts.csswg.org/css-values/#lengths
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub enum Length {
/// The internal length type that cannot parse `calc`
NoCalc(NoCalcLength),
/// A calc expression.
///
/// https://drafts.csswg.org/css-values/#calc-notation
Calc(Box<CalcLengthOrPercentage>),
}
impl From<NoCalcLength> for Length {
#[inline]
fn from(len: NoCalcLength) -> Self {
Length::NoCalc(len)
}
}
impl Mul<CSSFloat> for Length {
type Output = Length;
#[inline]
fn mul(self, scalar: CSSFloat) -> Length {
match self {
Length::NoCalc(inner) => Length::NoCalc(inner * scalar),
Length::Calc(..) => panic!("Can't multiply Calc!"),
}
}
}
impl Mul<CSSFloat> for FontRelativeLength {
type Output = FontRelativeLength;
#[inline]
fn mul(self, scalar: CSSFloat) -> FontRelativeLength {
match self {
FontRelativeLength::Em(v) => FontRelativeLength::Em(v * scalar),
FontRelativeLength::Ex(v) => FontRelativeLength::Ex(v * scalar),
FontRelativeLength::Ch(v) => FontRelativeLength::Ch(v * scalar),
FontRelativeLength::Rem(v) => FontRelativeLength::Rem(v * scalar),
}
}
}
impl Mul<CSSFloat> for ViewportPercentageLength {
type Output = ViewportPercentageLength;
#[inline]
fn mul(self, scalar: CSSFloat) -> ViewportPercentageLength {
match self {
ViewportPercentageLength::Vw(v) => ViewportPercentageLength::Vw(v * scalar),
ViewportPercentageLength::Vh(v) => ViewportPercentageLength::Vh(v * scalar),
ViewportPercentageLength::Vmin(v) => ViewportPercentageLength::Vmin(v * scalar),
ViewportPercentageLength::Vmax(v) => ViewportPercentageLength::Vmax(v * scalar),
}
}
}
impl Length {
#[inline]
/// Returns a `zero` length.
pub fn zero() -> Length {
Length::NoCalc(NoCalcLength::zero())
}
/// Parse a given absolute or relative dimension.
pub fn parse_dimension(context: &ParserContext, value: CSSFloat, unit: &str)
-> Result<Length, ()> {
NoCalcLength::parse_dimension(context, value, unit).map(Length::NoCalc)
}
#[inline]
fn parse_internal<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
num_context: AllowedNumericType,
allow_quirks: AllowQuirks)
-> Result<Length, ParseError<'i>> {
// FIXME: remove early returns when lifetimes are non-lexical
{
let token = input.next()?;
match *token {
Token::Dimension { value, ref unit, .. } if num_context.is_ok(context.parsing_mode, value) => {
return Length::parse_dimension(context, value, unit)
.map_err(|()| BasicParseError::UnexpectedToken(token.clone()).into())
}
Token::Number { value, .. } if num_context.is_ok(context.parsing_mode, value) => {
if value != 0. &&
!context.parsing_mode.allows_unitless_lengths() &&
!allow_quirks.allowed(context.quirks_mode) {
return Err(StyleParseError::UnspecifiedError.into())
}
return Ok(Length::NoCalc(NoCalcLength::Absolute(AbsoluteLength::Px(value))))
},
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {}
ref token => return Err(BasicParseError::UnexpectedToken(token.clone()).into())
}
}
input.parse_nested_block(|input| {
CalcNode::parse_length(context, input, num_context).map(|calc| Length::Calc(Box::new(calc)))
})
}
/// Parse a non-negative length
#[inline]
pub fn parse_non_negative<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Length, ParseError<'i>> {
Self::parse_non_negative_quirky(context, input, AllowQuirks::No)
}
/// Parse a non-negative length, allowing quirks.
#[inline]
pub fn parse_non_negative_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Length, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::NonNegative, allow_quirks)
}
/// Get an absolute length from a px value.
#[inline]
pub fn from_px(px_value: CSSFloat) -> Length {
Length::NoCalc(NoCalcLength::from_px(px_value))
}
/// Extract inner length without a clone, replacing it with a 0 Au
///
/// Use when you need to move out of a length array without cloning
#[inline]
pub fn take(&mut self) -> Self {
mem::replace(self, Length::zero())
}
}
impl Parse for Length {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Self::parse_quirky(context, input, AllowQuirks::No)
}
}
impl Length {
/// Parses a length, with quirks.
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::All, allow_quirks)
}
}
impl<T: Parse> Either<Length, T> {
/// Parse a non-negative length
#[inline]
pub fn parse_non_negative_length<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Self, ParseError<'i>> {
if let Ok(v) = input.try(|input| T::parse(context, input)) {
return Ok(Either::Second(v));
}
Length::parse_internal(context, input, AllowedNumericType::NonNegative, AllowQuirks::No).map(Either::First)
}
}
/// A wrapper of Length, whose value must be >= 0.
pub type NonNegativeLength = NonNegative<Length>;
impl From<NoCalcLength> for NonNegativeLength {
#[inline]
fn from(len: NoCalcLength) -> Self {
NonNegative::<Length>(Length::NoCalc(len))
}
}
impl From<Length> for NonNegativeLength {
#[inline]
fn from(len: Length) -> Self {
NonNegative::<Length>(len)
}
}
impl<T: Parse> Parse for Either<NonNegativeLength, T> {
#[inline]
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if let Ok(v) = input.try(|input| T::parse(context, input)) {
return Ok(Either::Second(v));
}
Length::parse_internal(context, input, AllowedNumericType::NonNegative, AllowQuirks::No)
.map(NonNegative::<Length>).map(Either::First)
}
}
impl NonNegativeLength {
/// Returns a `zero` length.
#[inline]
pub fn zero() -> Self {
Length::zero().into()
}
/// Get an absolute length from a px value.
#[inline]
pub fn from_px(px_value: CSSFloat) -> Self {
Length::from_px(px_value.max(0.)).into()
}
}
/// Either a NonNegativeLength or the `normal` keyword.
pub type NonNegativeLengthOrNormal = Either<NonNegativeLength, Normal>;
/// Either a NonNegativeLength or the `auto` keyword.
pub type NonNegativeLengthOrAuto = Either<NonNegativeLength, Auto>;
/// Either a NonNegativeLength or a NonNegativeNumber value.
pub type NonNegativeLengthOrNumber = Either<NonNegativeLength, NonNegativeNumber>;
/// A length or a percentage value.
#[allow(missing_docs)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub enum LengthOrPercentage {
Length(NoCalcLength),
Percentage(computed::Percentage),
Calc(Box<CalcLengthOrPercentage>),
}
impl From<Length> for LengthOrPercentage {
fn from(len: Length) -> LengthOrPercentage {
match len {
Length::NoCalc(l) => LengthOrPercentage::Length(l),
Length::Calc(l) => LengthOrPercentage::Calc(l),
}
}
}
impl From<NoCalcLength> for LengthOrPercentage {
#[inline]
fn from(len: NoCalcLength) -> Self {
LengthOrPercentage::Length(len)
}
}
impl From<Percentage> for LengthOrPercentage {
#[inline]
fn from(pc: Percentage) -> Self {
if pc.is_calc() {
LengthOrPercentage::Calc(Box::new(CalcLengthOrPercentage {
percentage: Some(computed::Percentage(pc.get())),
.. Default::default()
}))
} else {
LengthOrPercentage::Percentage(computed::Percentage(pc.get()))
}
}
}
impl From<computed::Percentage> for LengthOrPercentage {
#[inline]
fn from(pc: computed::Percentage) -> Self {
LengthOrPercentage::Percentage(pc)
}
}
impl LengthOrPercentage {
#[inline]
/// Returns a `zero` length.
pub fn zero() -> LengthOrPercentage {
LengthOrPercentage::Length(NoCalcLength::zero())
}
fn parse_internal<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
num_context: AllowedNumericType,
allow_quirks: AllowQuirks)
-> Result<LengthOrPercentage, ParseError<'i>>
{
// FIXME: remove early returns when lifetimes are non-lexical
{
let token = input.next()?;
match *token {
Token::Dimension { value, ref unit, .. } if num_context.is_ok(context.parsing_mode, value) => {
return NoCalcLength::parse_dimension(context, value, unit)
.map(LengthOrPercentage::Length)
.map_err(|()| BasicParseError::UnexpectedToken(token.clone()).into())
}
Token::Percentage { unit_value, .. } if num_context.is_ok(context.parsing_mode, unit_value) => {
return Ok(LengthOrPercentage::Percentage(computed::Percentage(unit_value)))
}
Token::Number { value, .. } if num_context.is_ok(context.parsing_mode, value) => {
if value != 0. &&
!context.parsing_mode.allows_unitless_lengths() &&
!allow_quirks.allowed(context.quirks_mode) {
return Err(BasicParseError::UnexpectedToken(token.clone()).into())
} else {
return Ok(LengthOrPercentage::Length(NoCalcLength::from_px(value)))
}
}
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {}
_ => return Err(BasicParseError::UnexpectedToken(token.clone()).into())
}
}
let calc = input.parse_nested_block(|i| {
CalcNode::parse_length_or_percentage(context, i, num_context)
})?;
Ok(LengthOrPercentage::Calc(Box::new(calc)))
}
/// Parse a non-negative length.
#[inline]
pub fn parse_non_negative<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<LengthOrPercentage, ParseError<'i>> {
Self::parse_non_negative_quirky(context, input, AllowQuirks::No)
}
/// Parse a non-negative length, with quirks.
#[inline]
pub fn parse_non_negative_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<LengthOrPercentage, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::NonNegative, allow_quirks)
}
/// Parse a length, treating dimensionless numbers as pixels
///
/// https://www.w3.org/TR/SVG2/types.html#presentation-attribute-css-value
pub fn parse_numbers_are_pixels<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<LengthOrPercentage, ParseError<'i>> {
if let Ok(lop) = input.try(|i| Self::parse(context, i)) {
return Ok(lop)
}
// TODO(emilio): Probably should use Number::parse_non_negative to
// handle calc()?
let num = input.expect_number()?;
Ok(LengthOrPercentage::Length(NoCalcLength::Absolute(AbsoluteLength::Px(num))))
}
/// Parse a non-negative length, treating dimensionless numbers as pixels
///
/// This is nonstandard behavior used by Firefox for SVG
pub fn parse_numbers_are_pixels_non_negative<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>)
-> Result<LengthOrPercentage, ParseError<'i>> {
if let Ok(lop) = input.try(|i| Self::parse_non_negative(context, i)) {
return Ok(lop)
}
// TODO(emilio): Probably should use Number::parse_non_negative to
// handle calc()?
let num = input.expect_number()?;
if num >= 0. {
Ok(LengthOrPercentage::Length(NoCalcLength::Absolute(AbsoluteLength::Px(num))))
} else {
Err(StyleParseError::UnspecifiedError.into())
}
}
/// Extract value from ref without a clone, replacing it with a 0 Au
///
/// Use when you need to move out of a length array without cloning
#[inline]
pub fn take(&mut self) -> Self {
mem::replace(self, LengthOrPercentage::zero())
}
}
impl Parse for LengthOrPercentage {
#[inline]
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Self::parse_quirky(context, input, AllowQuirks::No)
}
}
impl LengthOrPercentage {
/// Parses a length or a percentage, allowing the unitless length quirk.
/// https://quirks.spec.whatwg.org/#the-unitless-length-quirk
#[inline]
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks) -> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::All, allow_quirks)
}
}
/// Either a `<length>`, a `<percentage>`, or the `auto` keyword.
#[allow(missing_docs)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub enum LengthOrPercentageOrAuto {
Length(NoCalcLength),
Percentage(computed::Percentage),
Auto,
Calc(Box<CalcLengthOrPercentage>),
}
impl From<NoCalcLength> for LengthOrPercentageOrAuto {
#[inline]
fn from(len: NoCalcLength) -> Self {
LengthOrPercentageOrAuto::Length(len)
}
}
impl From<computed::Percentage> for LengthOrPercentageOrAuto {
#[inline]
fn from(pc: computed::Percentage) -> Self {
LengthOrPercentageOrAuto::Percentage(pc)
}
}
impl LengthOrPercentageOrAuto {
fn parse_internal<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
num_context: AllowedNumericType,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
// FIXME: remove early returns when lifetimes are non-lexical
{
let token = input.next()?;
match *token {
Token::Dimension { value, ref unit, .. } if num_context.is_ok(context.parsing_mode, value) => {
return NoCalcLength::parse_dimension(context, value, unit)
.map(LengthOrPercentageOrAuto::Length)
.map_err(|()| BasicParseError::UnexpectedToken(token.clone()).into())
}
Token::Percentage { unit_value, .. } if num_context.is_ok(context.parsing_mode, unit_value) => {
return Ok(LengthOrPercentageOrAuto::Percentage(computed::Percentage(unit_value)))
}
Token::Number { value, .. } if num_context.is_ok(context.parsing_mode, value) => {
if value != 0. &&
!context.parsing_mode.allows_unitless_lengths() &&
!allow_quirks.allowed(context.quirks_mode) {
return Err(StyleParseError::UnspecifiedError.into())
}
return Ok(LengthOrPercentageOrAuto::Length(
NoCalcLength::Absolute(AbsoluteLength::Px(value))
))
}
Token::Ident(ref value) if value.eq_ignore_ascii_case("auto") => {
return Ok(LengthOrPercentageOrAuto::Auto)
}
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {}
_ => return Err(BasicParseError::UnexpectedToken(token.clone()).into())
}
}
let calc = input.parse_nested_block(|i| {
CalcNode::parse_length_or_percentage(context, i, num_context)
})?;
Ok(LengthOrPercentageOrAuto::Calc(Box::new(calc)))
}
/// Parse a non-negative length, percentage, or auto.
#[inline]
pub fn parse_non_negative<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<LengthOrPercentageOrAuto, ParseError<'i>> {
Self::parse_non_negative_quirky(context, input, AllowQuirks::No)
}
/// Parse a non-negative length, percentage, or auto.
#[inline]
pub fn parse_non_negative_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::NonNegative, allow_quirks)
}
/// Returns the `auto` value.
pub fn auto() -> Self {
LengthOrPercentageOrAuto::Auto
}
/// Returns a value representing a `0` length.
pub fn zero() -> Self {
LengthOrPercentageOrAuto::Length(NoCalcLength::zero())
}
/// Returns a value representing `0%`.
pub fn zero_percent() -> Self {
LengthOrPercentageOrAuto::Percentage(computed::Percentage::zero())
}
}
impl Parse for LengthOrPercentageOrAuto {
#[inline]
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Self::parse_quirky(context, input, AllowQuirks::No)
}
}
impl LengthOrPercentageOrAuto {
/// Parses, with quirks.
#[inline]
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::All, allow_quirks)
}
}
/// Either a `<length>`, a `<percentage>`, or the `none` keyword.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
#[allow(missing_docs)]
pub enum LengthOrPercentageOrNone {
Length(NoCalcLength),
Percentage(computed::Percentage),
Calc(Box<CalcLengthOrPercentage>),
None,
}
impl LengthOrPercentageOrNone {
fn parse_internal<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
num_context: AllowedNumericType,
allow_quirks: AllowQuirks)
-> Result<LengthOrPercentageOrNone, ParseError<'i>>
{
// FIXME: remove early returns when lifetimes are non-lexical
{
let token = input.next()?;
match *token {
Token::Dimension { value, ref unit, .. } if num_context.is_ok(context.parsing_mode, value) => {
return NoCalcLength::parse_dimension(context, value, unit)
.map(LengthOrPercentageOrNone::Length)
.map_err(|()| BasicParseError::UnexpectedToken(token.clone()).into())
}
Token::Percentage { unit_value, .. } if num_context.is_ok(context.parsing_mode, unit_value) => {
return Ok(LengthOrPercentageOrNone::Percentage(computed::Percentage(unit_value)))
}
Token::Number { value, .. } if num_context.is_ok(context.parsing_mode, value) => {
if value != 0. && !context.parsing_mode.allows_unitless_lengths() &&
!allow_quirks.allowed(context.quirks_mode) {
return Err(StyleParseError::UnspecifiedError.into())
}
return Ok(LengthOrPercentageOrNone::Length(
NoCalcLength::Absolute(AbsoluteLength::Px(value))
))
}
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {}
Token::Ident(ref value) if value.eq_ignore_ascii_case("none") => {
return Ok(LengthOrPercentageOrNone::None)
}
_ => return Err(BasicParseError::UnexpectedToken(token.clone()).into())
}
}
let calc = input.parse_nested_block(|i| {
CalcNode::parse_length_or_percentage(context, i, num_context)
})?;
Ok(LengthOrPercentageOrNone::Calc(Box::new(calc)))
}
/// Parse a non-negative LengthOrPercentageOrNone.
#[inline]
pub fn parse_non_negative<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Self, ParseError<'i>> {
Self::parse_non_negative_quirky(context, input, AllowQuirks::No)
}
/// Parse a non-negative LengthOrPercentageOrNone, with quirks.
#[inline]
pub fn parse_non_negative_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::NonNegative, allow_quirks)
}
}
impl Parse for LengthOrPercentageOrNone {
#[inline]
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::All, AllowQuirks::No)
}
}
/// A wrapper of LengthOrPercentage, whose value must be >= 0.
pub type NonNegativeLengthOrPercentage = NonNegative<LengthOrPercentage>;
impl From<NoCalcLength> for NonNegativeLengthOrPercentage {
#[inline]
fn from(len: NoCalcLength) -> Self {
NonNegative::<LengthOrPercentage>(LengthOrPercentage::from(len))
}
}
impl Parse for NonNegativeLengthOrPercentage {
#[inline]
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
LengthOrPercentage::parse_non_negative(context, input).map(NonNegative::<LengthOrPercentage>)
}
}
impl NonNegativeLengthOrPercentage {
#[inline]
/// Returns a `zero` length.
pub fn zero() -> Self {
NonNegative::<LengthOrPercentage>(LengthOrPercentage::zero())
}
/// Parses a length or a percentage, allowing the unitless length quirk.
/// https://quirks.spec.whatwg.org/#the-unitless-length-quirk
#[inline]
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks) -> Result<Self, ParseError<'i>> {
LengthOrPercentage::parse_non_negative_quirky(context, input, allow_quirks)
.map(NonNegative::<LengthOrPercentage>)
}
}
/// Either a `<length>` or the `none` keyword.
pub type LengthOrNone = Either<Length, None_>;
/// Either a `<length>` or the `normal` keyword.
pub type LengthOrNormal = Either<Length, Normal>;
/// Either a `<length>` or the `auto` keyword.
pub type LengthOrAuto = Either<Length, Auto>;
/// Either a `<length>` or a `<number>`.
pub type LengthOrNumber = Either<Length, Number>;
impl LengthOrNumber {
/// Parse a non-negative LengthOrNumber.
pub fn parse_non_negative<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Self, ParseError<'i>> {
// We try to parse as a Number first because, for cases like
// LengthOrNumber, we want "0" to be parsed as a plain Number rather
// than a Length (0px); this matches the behaviour of all major browsers
if let Ok(v) = input.try(|i| Number::parse_non_negative(context, i)) {
return Ok(Either::Second(v))
}
Length::parse_non_negative(context, input).map(Either::First)
}
/// Returns `0`.
#[inline]
pub fn zero() -> Self {
Either::Second(Number::new(0.))
}
}
/// A value suitable for a `min-width` or `min-height` property.
/// Unlike `max-width` or `max-height` properties, a MozLength can be
/// `auto`, and cannot be `none`.
#[allow(missing_docs)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub enum MozLength {
LengthOrPercentageOrAuto(LengthOrPercentageOrAuto),
ExtremumLength(ExtremumLength),
}
impl Parse for MozLength {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
MozLength::parse_quirky(context, input, AllowQuirks::No)
}
}
impl MozLength {
/// Parses, with quirks.
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks) -> Result<Self, ParseError<'i>> {
input.try(ExtremumLength::parse).map(MozLength::ExtremumLength)
.or_else(|_| input.try(|i| LengthOrPercentageOrAuto::parse_non_negative_quirky(context, i, allow_quirks))
.map(MozLength::LengthOrPercentageOrAuto))
}
}
/// A value suitable for a `max-width` or `max-height` property.
#[allow(missing_docs)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub enum MaxLength {
LengthOrPercentageOrNone(LengthOrPercentageOrNone),
ExtremumLength(ExtremumLength),
}
impl Parse for MaxLength {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
MaxLength::parse_quirky(context, input, AllowQuirks::No)
}
}
impl MaxLength {
/// Parses, with quirks.
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks) -> Result<Self, ParseError<'i>> {
input.try(ExtremumLength::parse).map(MaxLength::ExtremumLength)
.or_else(|_| input.try(|i| LengthOrPercentageOrNone::parse_non_negative_quirky(context, i, allow_quirks))
.map(MaxLength::LengthOrPercentageOrNone))
}
}
Auto merge of #18538 - emilio:font-size-dep-correct, r=heycam
stylo: Don't add a font-size dependency to the rule cache if the reference size is not our current style.
We enter the Custom(..) code path from other random places, like to remove the
relative lengths from a calc expression while zooming, or whatever craziness
MathML font-size uses, and we don't want to set the dependency on those cases.
<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/servo/18538)
<!-- Reviewable:end -->
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! [Length values][length].
//!
//! [length]: https://drafts.csswg.org/css-values/#lengths
use app_units::Au;
use cssparser::{Parser, Token, BasicParseError};
use euclid::Size2D;
use font_metrics::FontMetricsQueryResult;
use parser::{Parse, ParserContext};
use std::{cmp, fmt, mem};
use std::ascii::AsciiExt;
use std::ops::{Add, Mul};
use style_traits::{ToCss, ParseError, StyleParseError};
use style_traits::values::specified::AllowedNumericType;
use stylesheets::CssRuleType;
use super::{AllowQuirks, Number, ToComputedValue, Percentage};
use values::{Auto, CSSFloat, Either, FONT_MEDIUM_PX, None_, Normal};
use values::{ExtremumLength, serialize_dimension};
use values::computed::{self, CSSPixelLength, Context};
use values::generics::NonNegative;
use values::specified::NonNegativeNumber;
use values::specified::calc::CalcNode;
pub use values::specified::calc::CalcLengthOrPercentage;
pub use super::image::{ColorStop, EndingShape as GradientEndingShape, Gradient};
pub use super::image::{GradientKind, Image};
/// Number of app units per pixel
pub const AU_PER_PX: CSSFloat = 60.;
/// Number of app units per inch
pub const AU_PER_IN: CSSFloat = AU_PER_PX * 96.;
/// Number of app units per centimeter
pub const AU_PER_CM: CSSFloat = AU_PER_IN / 2.54;
/// Number of app units per millimeter
pub const AU_PER_MM: CSSFloat = AU_PER_IN / 25.4;
/// Number of app units per quarter
pub const AU_PER_Q: CSSFloat = AU_PER_MM / 4.;
/// Number of app units per point
pub const AU_PER_PT: CSSFloat = AU_PER_IN / 72.;
/// Number of app units per pica
pub const AU_PER_PC: CSSFloat = AU_PER_PT * 12.;
/// Same as Gecko's AppUnitsToIntCSSPixels
///
/// Converts app units to integer pixel values,
/// rounding during the conversion
pub fn au_to_int_px(au: f32) -> i32 {
(au / AU_PER_PX).round() as i32
}
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
/// A font relative length.
pub enum FontRelativeLength {
/// A "em" value: https://drafts.csswg.org/css-values/#em
Em(CSSFloat),
/// A "ex" value: https://drafts.csswg.org/css-values/#ex
Ex(CSSFloat),
/// A "ch" value: https://drafts.csswg.org/css-values/#ch
Ch(CSSFloat),
/// A "rem" value: https://drafts.csswg.org/css-values/#rem
Rem(CSSFloat)
}
impl ToCss for FontRelativeLength {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write
{
match *self {
FontRelativeLength::Em(length) => serialize_dimension(length, "em", dest),
FontRelativeLength::Ex(length) => serialize_dimension(length, "ex", dest),
FontRelativeLength::Ch(length) => serialize_dimension(length, "ch", dest),
FontRelativeLength::Rem(length) => serialize_dimension(length, "rem", dest)
}
}
}
/// A source to resolve font-relative units against
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FontBaseSize {
/// Use the font-size of the current element
CurrentStyle,
/// Use the inherited font-size
InheritedStyle,
/// Use a custom base size
Custom(Au),
}
impl FontBaseSize {
/// Calculate the actual size for a given context
pub fn resolve(&self, context: &Context) -> Au {
match *self {
FontBaseSize::Custom(size) => size,
FontBaseSize::CurrentStyle => Au::from(context.style().get_font().clone_font_size()),
FontBaseSize::InheritedStyle => Au::from(context.style().get_parent_font().clone_font_size()),
}
}
}
impl FontRelativeLength {
/// Computes the font-relative length.
pub fn to_computed_value(&self, context: &Context, base_size: FontBaseSize) -> CSSPixelLength {
use std::f32;
let (reference_size, length) = self.reference_font_size_and_length(context, base_size);
let pixel = (length * reference_size.to_f32_px()).min(f32::MAX).max(f32::MIN);
CSSPixelLength::new(pixel)
}
/// Return reference font size. We use the base_size flag to pass a different size
/// for computing font-size and unconstrained font-size.
/// This returns a pair, the first one is the reference font size, and the second one is the
/// unpacked relative length.
fn reference_font_size_and_length(
&self,
context: &Context,
base_size: FontBaseSize,
) -> (Au, CSSFloat) {
fn query_font_metrics(context: &Context, reference_font_size: Au) -> FontMetricsQueryResult {
context.font_metrics_provider.query(context.style().get_font(),
reference_font_size,
context.style().writing_mode,
context.in_media_query,
context.device())
}
let reference_font_size = base_size.resolve(context);
match *self {
FontRelativeLength::Em(length) => {
if context.for_non_inherited_property.is_some() {
if matches!(base_size, FontBaseSize::CurrentStyle) {
context.rule_cache_conditions.borrow_mut()
.set_font_size_dependency(
reference_font_size.into()
);
}
}
(reference_font_size, length)
},
FontRelativeLength::Ex(length) => {
if context.for_non_inherited_property.is_some() {
context.rule_cache_conditions.borrow_mut().set_uncacheable();
}
let reference_size = match query_font_metrics(context, reference_font_size) {
FontMetricsQueryResult::Available(metrics) => {
metrics.x_height
},
// https://drafts.csswg.org/css-values/#ex
//
// In the cases where it is impossible or impractical to
// determine the x-height, a value of 0.5em must be
// assumed.
//
FontMetricsQueryResult::NotAvailable => {
reference_font_size.scale_by(0.5)
},
};
(reference_size, length)
},
FontRelativeLength::Ch(length) => {
if context.for_non_inherited_property.is_some() {
context.rule_cache_conditions.borrow_mut().set_uncacheable();
}
let reference_size = match query_font_metrics(context, reference_font_size) {
FontMetricsQueryResult::Available(metrics) => {
metrics.zero_advance_measure
},
// https://drafts.csswg.org/css-values/#ch
//
// In the cases where it is impossible or impractical to
// determine the measure of the “0” glyph, it must be
// assumed to be 0.5em wide by 1em tall. Thus, the ch
// unit falls back to 0.5em in the general case, and to
// 1em when it would be typeset upright (i.e.
// writing-mode is vertical-rl or vertical-lr and
// text-orientation is upright).
//
FontMetricsQueryResult::NotAvailable => {
if context.style().writing_mode.is_vertical() {
reference_font_size
} else {
reference_font_size.scale_by(0.5)
}
}
};
(reference_size, length)
}
FontRelativeLength::Rem(length) => {
// https://drafts.csswg.org/css-values/#rem:
//
// When specified on the font-size property of the root
// element, the rem units refer to the property’s initial
// value.
//
let reference_size = if context.is_root_element {
reference_font_size
} else {
context.device().root_font_size()
};
(reference_size, length)
}
}
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
/// A viewport-relative length.
///
/// https://drafts.csswg.org/css-values/#viewport-relative-lengths
pub enum ViewportPercentageLength {
/// A vw unit: https://drafts.csswg.org/css-values/#vw
Vw(CSSFloat),
/// A vh unit: https://drafts.csswg.org/css-values/#vh
Vh(CSSFloat),
/// https://drafts.csswg.org/css-values/#vmin
Vmin(CSSFloat),
/// https://drafts.csswg.org/css-values/#vmax
Vmax(CSSFloat)
}
impl ToCss for ViewportPercentageLength {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
ViewportPercentageLength::Vw(length) => serialize_dimension(length, "vw", dest),
ViewportPercentageLength::Vh(length) => serialize_dimension(length, "vh", dest),
ViewportPercentageLength::Vmin(length) => serialize_dimension(length, "vmin", dest),
ViewportPercentageLength::Vmax(length) => serialize_dimension(length, "vmax", dest)
}
}
}
impl ViewportPercentageLength {
/// Computes the given viewport-relative length for the given viewport size.
pub fn to_computed_value(&self, viewport_size: Size2D<Au>) -> CSSPixelLength {
let (factor, length) = match *self {
ViewportPercentageLength::Vw(length) =>
(length, viewport_size.width),
ViewportPercentageLength::Vh(length) =>
(length, viewport_size.height),
ViewportPercentageLength::Vmin(length) =>
(length, cmp::min(viewport_size.width, viewport_size.height)),
ViewportPercentageLength::Vmax(length) =>
(length, cmp::max(viewport_size.width, viewport_size.height)),
};
// FIXME: Bug 1396535, we need to fix the extremely small viewport length for transform.
// See bug 989802. We truncate so that adding multiple viewport units
// that add up to 100 does not overflow due to rounding differences
let trunc_scaled = ((length.0 as f64) * factor as f64 / 100.).trunc();
Au::from_f64_au(trunc_scaled).into()
}
}
/// HTML5 "character width", as defined in HTML5 § 14.5.4.
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct CharacterWidth(pub i32);
impl CharacterWidth {
/// Computes the given character width.
pub fn to_computed_value(&self, reference_font_size: Au) -> CSSPixelLength {
// This applies the *converting a character width to pixels* algorithm as specified
// in HTML5 § 14.5.4.
//
// TODO(pcwalton): Find these from the font.
let average_advance = reference_font_size.scale_by(0.5);
let max_advance = reference_font_size;
let au = average_advance.scale_by(self.0 as CSSFloat - 1.0) + max_advance;
au.into()
}
}
/// Represents an absolute length with its unit
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum AbsoluteLength {
/// An absolute length in pixels (px)
Px(CSSFloat),
/// An absolute length in inches (in)
In(CSSFloat),
/// An absolute length in centimeters (cm)
Cm(CSSFloat),
/// An absolute length in millimeters (mm)
Mm(CSSFloat),
/// An absolute length in quarter-millimeters (q)
Q(CSSFloat),
/// An absolute length in points (pt)
Pt(CSSFloat),
/// An absolute length in pica (pc)
Pc(CSSFloat),
}
impl AbsoluteLength {
fn is_zero(&self) -> bool {
match *self {
AbsoluteLength::Px(v)
| AbsoluteLength::In(v)
| AbsoluteLength::Cm(v)
| AbsoluteLength::Mm(v)
| AbsoluteLength::Q(v)
| AbsoluteLength::Pt(v)
| AbsoluteLength::Pc(v) => v == 0.,
}
}
/// Convert this into a pixel value.
#[inline]
pub fn to_px(&self) -> CSSFloat {
use std::f32;
let pixel = match *self {
AbsoluteLength::Px(value) => value,
AbsoluteLength::In(value) => value * (AU_PER_IN / AU_PER_PX),
AbsoluteLength::Cm(value) => value * (AU_PER_CM / AU_PER_PX),
AbsoluteLength::Mm(value) => value * (AU_PER_MM / AU_PER_PX),
AbsoluteLength::Q(value) => value * (AU_PER_Q / AU_PER_PX),
AbsoluteLength::Pt(value) => value * (AU_PER_PT / AU_PER_PX),
AbsoluteLength::Pc(value) => value * (AU_PER_PC / AU_PER_PX),
};
pixel.min(f32::MAX).max(f32::MIN)
}
}
impl ToComputedValue for AbsoluteLength {
type ComputedValue = CSSPixelLength;
fn to_computed_value(&self, _: &Context) -> Self::ComputedValue {
CSSPixelLength::new(self.to_px())
}
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
AbsoluteLength::Px(computed.px())
}
}
impl ToCss for AbsoluteLength {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
AbsoluteLength::Px(length) => serialize_dimension(length, "px", dest),
AbsoluteLength::In(length) => serialize_dimension(length, "in", dest),
AbsoluteLength::Cm(length) => serialize_dimension(length, "cm", dest),
AbsoluteLength::Mm(length) => serialize_dimension(length, "mm", dest),
AbsoluteLength::Q(length) => serialize_dimension(length, "q", dest),
AbsoluteLength::Pt(length) => serialize_dimension(length, "pt", dest),
AbsoluteLength::Pc(length) => serialize_dimension(length, "pc", dest),
}
}
}
impl Mul<CSSFloat> for AbsoluteLength {
type Output = AbsoluteLength;
#[inline]
fn mul(self, scalar: CSSFloat) -> AbsoluteLength {
match self {
AbsoluteLength::Px(v) => AbsoluteLength::Px(v * scalar),
AbsoluteLength::In(v) => AbsoluteLength::In(v * scalar),
AbsoluteLength::Cm(v) => AbsoluteLength::Cm(v * scalar),
AbsoluteLength::Mm(v) => AbsoluteLength::Mm(v * scalar),
AbsoluteLength::Q(v) => AbsoluteLength::Q(v * scalar),
AbsoluteLength::Pt(v) => AbsoluteLength::Pt(v * scalar),
AbsoluteLength::Pc(v) => AbsoluteLength::Pc(v * scalar),
}
}
}
impl Add<AbsoluteLength> for AbsoluteLength {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self {
match (self, rhs) {
(AbsoluteLength::Px(x), AbsoluteLength::Px(y)) => AbsoluteLength::Px(x + y),
(AbsoluteLength::In(x), AbsoluteLength::In(y)) => AbsoluteLength::In(x + y),
(AbsoluteLength::Cm(x), AbsoluteLength::Cm(y)) => AbsoluteLength::Cm(x + y),
(AbsoluteLength::Mm(x), AbsoluteLength::Mm(y)) => AbsoluteLength::Mm(x + y),
(AbsoluteLength::Q(x), AbsoluteLength::Q(y)) => AbsoluteLength::Q(x + y),
(AbsoluteLength::Pt(x), AbsoluteLength::Pt(y)) => AbsoluteLength::Pt(x + y),
(AbsoluteLength::Pc(x), AbsoluteLength::Pc(y)) => AbsoluteLength::Pc(x + y),
_ => AbsoluteLength::Px(self.to_px() + rhs.to_px()),
}
}
}
/// Represents a physical length (mozmm) based on DPI
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg(feature = "gecko")]
#[derive(MallocSizeOf)]
pub struct PhysicalLength(pub CSSFloat);
#[cfg(feature = "gecko")]
impl PhysicalLength {
fn is_zero(&self) -> bool {
self.0 == 0.
}
/// Computes the given character width.
pub fn to_computed_value(&self, context: &Context) -> CSSPixelLength {
use gecko_bindings::bindings;
use std::f32;
// Same as Gecko
const INCH_PER_MM: f32 = 1. / 25.4;
let au_per_physical_inch = unsafe {
bindings::Gecko_GetAppUnitsPerPhysicalInch(context.device().pres_context()) as f32
};
let px_per_physical_inch = au_per_physical_inch / AU_PER_PX;
let pixel = self.0 * px_per_physical_inch * INCH_PER_MM;
CSSPixelLength::new(pixel.min(f32::MAX).max(f32::MIN))
}
}
#[cfg(feature = "gecko")]
impl ToCss for PhysicalLength {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
serialize_dimension(self.0, "mozmm", dest)
}
}
#[cfg(feature = "gecko")]
impl Mul<CSSFloat> for PhysicalLength {
type Output = PhysicalLength;
#[inline]
fn mul(self, scalar: CSSFloat) -> PhysicalLength {
PhysicalLength(self.0 * scalar)
}
}
/// A `<length>` without taking `calc` expressions into account
///
/// https://drafts.csswg.org/css-values/#lengths
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum NoCalcLength {
/// An absolute length
///
/// https://drafts.csswg.org/css-values/#absolute-length
Absolute(AbsoluteLength),
/// A font-relative length:
///
/// https://drafts.csswg.org/css-values/#font-relative-lengths
FontRelative(FontRelativeLength),
/// A viewport-relative length.
///
/// https://drafts.csswg.org/css-values/#viewport-relative-lengths
ViewportPercentage(ViewportPercentageLength),
/// HTML5 "character width", as defined in HTML5 § 14.5.4.
///
/// This cannot be specified by the user directly and is only generated by
/// `Stylist::synthesize_rules_for_legacy_attributes()`.
ServoCharacterWidth(CharacterWidth),
/// A physical length (mozmm) based on DPI
#[cfg(feature = "gecko")]
Physical(PhysicalLength),
}
impl ToCss for NoCalcLength {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
NoCalcLength::Absolute(length) => length.to_css(dest),
NoCalcLength::FontRelative(length) => length.to_css(dest),
NoCalcLength::ViewportPercentage(length) => length.to_css(dest),
/* This should only be reached from style dumping code */
NoCalcLength::ServoCharacterWidth(CharacterWidth(i)) => {
dest.write_str("CharWidth(")?;
i.to_css(dest)?;
dest.write_char(')')
}
#[cfg(feature = "gecko")]
NoCalcLength::Physical(length) => length.to_css(dest),
}
}
}
impl Mul<CSSFloat> for NoCalcLength {
type Output = NoCalcLength;
#[inline]
fn mul(self, scalar: CSSFloat) -> NoCalcLength {
match self {
NoCalcLength::Absolute(v) => NoCalcLength::Absolute(v * scalar),
NoCalcLength::FontRelative(v) => NoCalcLength::FontRelative(v * scalar),
NoCalcLength::ViewportPercentage(v) => NoCalcLength::ViewportPercentage(v * scalar),
NoCalcLength::ServoCharacterWidth(_) => panic!("Can't multiply ServoCharacterWidth!"),
#[cfg(feature = "gecko")]
NoCalcLength::Physical(v) => NoCalcLength::Physical(v * scalar),
}
}
}
impl NoCalcLength {
/// Parse a given absolute or relative dimension.
pub fn parse_dimension(context: &ParserContext, value: CSSFloat, unit: &str)
-> Result<NoCalcLength, ()> {
let in_page_rule = context.rule_type.map_or(false, |rule_type| rule_type == CssRuleType::Page);
match_ignore_ascii_case! { unit,
"px" => Ok(NoCalcLength::Absolute(AbsoluteLength::Px(value))),
"in" => Ok(NoCalcLength::Absolute(AbsoluteLength::In(value))),
"cm" => Ok(NoCalcLength::Absolute(AbsoluteLength::Cm(value))),
"mm" => Ok(NoCalcLength::Absolute(AbsoluteLength::Mm(value))),
"q" => Ok(NoCalcLength::Absolute(AbsoluteLength::Q(value))),
"pt" => Ok(NoCalcLength::Absolute(AbsoluteLength::Pt(value))),
"pc" => Ok(NoCalcLength::Absolute(AbsoluteLength::Pc(value))),
// font-relative
"em" => Ok(NoCalcLength::FontRelative(FontRelativeLength::Em(value))),
"ex" => Ok(NoCalcLength::FontRelative(FontRelativeLength::Ex(value))),
"ch" => Ok(NoCalcLength::FontRelative(FontRelativeLength::Ch(value))),
"rem" => Ok(NoCalcLength::FontRelative(FontRelativeLength::Rem(value))),
// viewport percentages
"vw" => {
if in_page_rule {
return Err(())
}
Ok(NoCalcLength::ViewportPercentage(ViewportPercentageLength::Vw(value)))
},
"vh" => {
if in_page_rule {
return Err(())
}
Ok(NoCalcLength::ViewportPercentage(ViewportPercentageLength::Vh(value)))
},
"vmin" => {
if in_page_rule {
return Err(())
}
Ok(NoCalcLength::ViewportPercentage(ViewportPercentageLength::Vmin(value)))
},
"vmax" => {
if in_page_rule {
return Err(())
}
Ok(NoCalcLength::ViewportPercentage(ViewportPercentageLength::Vmax(value)))
},
#[cfg(feature = "gecko")]
"mozmm" => Ok(NoCalcLength::Physical(PhysicalLength(value))),
_ => Err(())
}
}
#[inline]
/// Returns a `zero` length.
pub fn zero() -> NoCalcLength {
NoCalcLength::Absolute(AbsoluteLength::Px(0.))
}
#[inline]
/// Checks whether the length value is zero.
pub fn is_zero(&self) -> bool {
match *self {
NoCalcLength::Absolute(length) => length.is_zero(),
#[cfg(feature = "gecko")]
NoCalcLength::Physical(length) => length.is_zero(),
_ => false
}
}
#[inline]
/// Returns a `medium` length.
pub fn medium() -> NoCalcLength {
NoCalcLength::Absolute(AbsoluteLength::Px(FONT_MEDIUM_PX as f32))
}
/// Get an absolute length from a px value.
#[inline]
pub fn from_px(px_value: CSSFloat) -> NoCalcLength {
NoCalcLength::Absolute(AbsoluteLength::Px(px_value))
}
}
/// An extension to `NoCalcLength` to parse `calc` expressions.
/// This is commonly used for the `<length>` values.
///
/// https://drafts.csswg.org/css-values/#lengths
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub enum Length {
/// The internal length type that cannot parse `calc`
NoCalc(NoCalcLength),
/// A calc expression.
///
/// https://drafts.csswg.org/css-values/#calc-notation
Calc(Box<CalcLengthOrPercentage>),
}
impl From<NoCalcLength> for Length {
#[inline]
fn from(len: NoCalcLength) -> Self {
Length::NoCalc(len)
}
}
impl Mul<CSSFloat> for Length {
type Output = Length;
#[inline]
fn mul(self, scalar: CSSFloat) -> Length {
match self {
Length::NoCalc(inner) => Length::NoCalc(inner * scalar),
Length::Calc(..) => panic!("Can't multiply Calc!"),
}
}
}
impl Mul<CSSFloat> for FontRelativeLength {
type Output = FontRelativeLength;
#[inline]
fn mul(self, scalar: CSSFloat) -> FontRelativeLength {
match self {
FontRelativeLength::Em(v) => FontRelativeLength::Em(v * scalar),
FontRelativeLength::Ex(v) => FontRelativeLength::Ex(v * scalar),
FontRelativeLength::Ch(v) => FontRelativeLength::Ch(v * scalar),
FontRelativeLength::Rem(v) => FontRelativeLength::Rem(v * scalar),
}
}
}
impl Mul<CSSFloat> for ViewportPercentageLength {
type Output = ViewportPercentageLength;
#[inline]
fn mul(self, scalar: CSSFloat) -> ViewportPercentageLength {
match self {
ViewportPercentageLength::Vw(v) => ViewportPercentageLength::Vw(v * scalar),
ViewportPercentageLength::Vh(v) => ViewportPercentageLength::Vh(v * scalar),
ViewportPercentageLength::Vmin(v) => ViewportPercentageLength::Vmin(v * scalar),
ViewportPercentageLength::Vmax(v) => ViewportPercentageLength::Vmax(v * scalar),
}
}
}
impl Length {
#[inline]
/// Returns a `zero` length.
pub fn zero() -> Length {
Length::NoCalc(NoCalcLength::zero())
}
/// Parse a given absolute or relative dimension.
pub fn parse_dimension(context: &ParserContext, value: CSSFloat, unit: &str)
-> Result<Length, ()> {
NoCalcLength::parse_dimension(context, value, unit).map(Length::NoCalc)
}
#[inline]
fn parse_internal<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
num_context: AllowedNumericType,
allow_quirks: AllowQuirks)
-> Result<Length, ParseError<'i>> {
// FIXME: remove early returns when lifetimes are non-lexical
{
let token = input.next()?;
match *token {
Token::Dimension { value, ref unit, .. } if num_context.is_ok(context.parsing_mode, value) => {
return Length::parse_dimension(context, value, unit)
.map_err(|()| BasicParseError::UnexpectedToken(token.clone()).into())
}
Token::Number { value, .. } if num_context.is_ok(context.parsing_mode, value) => {
if value != 0. &&
!context.parsing_mode.allows_unitless_lengths() &&
!allow_quirks.allowed(context.quirks_mode) {
return Err(StyleParseError::UnspecifiedError.into())
}
return Ok(Length::NoCalc(NoCalcLength::Absolute(AbsoluteLength::Px(value))))
},
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {}
ref token => return Err(BasicParseError::UnexpectedToken(token.clone()).into())
}
}
input.parse_nested_block(|input| {
CalcNode::parse_length(context, input, num_context).map(|calc| Length::Calc(Box::new(calc)))
})
}
/// Parse a non-negative length
#[inline]
pub fn parse_non_negative<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Length, ParseError<'i>> {
Self::parse_non_negative_quirky(context, input, AllowQuirks::No)
}
/// Parse a non-negative length, allowing quirks.
#[inline]
pub fn parse_non_negative_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Length, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::NonNegative, allow_quirks)
}
/// Get an absolute length from a px value.
#[inline]
pub fn from_px(px_value: CSSFloat) -> Length {
Length::NoCalc(NoCalcLength::from_px(px_value))
}
/// Extract inner length without a clone, replacing it with a 0 Au
///
/// Use when you need to move out of a length array without cloning
#[inline]
pub fn take(&mut self) -> Self {
mem::replace(self, Length::zero())
}
}
impl Parse for Length {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Self::parse_quirky(context, input, AllowQuirks::No)
}
}
impl Length {
/// Parses a length, with quirks.
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::All, allow_quirks)
}
}
impl<T: Parse> Either<Length, T> {
/// Parse a non-negative length
#[inline]
pub fn parse_non_negative_length<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Self, ParseError<'i>> {
if let Ok(v) = input.try(|input| T::parse(context, input)) {
return Ok(Either::Second(v));
}
Length::parse_internal(context, input, AllowedNumericType::NonNegative, AllowQuirks::No).map(Either::First)
}
}
/// A wrapper of Length, whose value must be >= 0.
pub type NonNegativeLength = NonNegative<Length>;
impl From<NoCalcLength> for NonNegativeLength {
#[inline]
fn from(len: NoCalcLength) -> Self {
NonNegative::<Length>(Length::NoCalc(len))
}
}
impl From<Length> for NonNegativeLength {
#[inline]
fn from(len: Length) -> Self {
NonNegative::<Length>(len)
}
}
impl<T: Parse> Parse for Either<NonNegativeLength, T> {
#[inline]
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
if let Ok(v) = input.try(|input| T::parse(context, input)) {
return Ok(Either::Second(v));
}
Length::parse_internal(context, input, AllowedNumericType::NonNegative, AllowQuirks::No)
.map(NonNegative::<Length>).map(Either::First)
}
}
impl NonNegativeLength {
/// Returns a `zero` length.
#[inline]
pub fn zero() -> Self {
Length::zero().into()
}
/// Get an absolute length from a px value.
#[inline]
pub fn from_px(px_value: CSSFloat) -> Self {
Length::from_px(px_value.max(0.)).into()
}
}
/// Either a NonNegativeLength or the `normal` keyword.
pub type NonNegativeLengthOrNormal = Either<NonNegativeLength, Normal>;
/// Either a NonNegativeLength or the `auto` keyword.
pub type NonNegativeLengthOrAuto = Either<NonNegativeLength, Auto>;
/// Either a NonNegativeLength or a NonNegativeNumber value.
pub type NonNegativeLengthOrNumber = Either<NonNegativeLength, NonNegativeNumber>;
/// A length or a percentage value.
#[allow(missing_docs)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub enum LengthOrPercentage {
Length(NoCalcLength),
Percentage(computed::Percentage),
Calc(Box<CalcLengthOrPercentage>),
}
impl From<Length> for LengthOrPercentage {
fn from(len: Length) -> LengthOrPercentage {
match len {
Length::NoCalc(l) => LengthOrPercentage::Length(l),
Length::Calc(l) => LengthOrPercentage::Calc(l),
}
}
}
impl From<NoCalcLength> for LengthOrPercentage {
#[inline]
fn from(len: NoCalcLength) -> Self {
LengthOrPercentage::Length(len)
}
}
impl From<Percentage> for LengthOrPercentage {
#[inline]
fn from(pc: Percentage) -> Self {
if pc.is_calc() {
LengthOrPercentage::Calc(Box::new(CalcLengthOrPercentage {
percentage: Some(computed::Percentage(pc.get())),
.. Default::default()
}))
} else {
LengthOrPercentage::Percentage(computed::Percentage(pc.get()))
}
}
}
impl From<computed::Percentage> for LengthOrPercentage {
#[inline]
fn from(pc: computed::Percentage) -> Self {
LengthOrPercentage::Percentage(pc)
}
}
impl LengthOrPercentage {
#[inline]
/// Returns a `zero` length.
pub fn zero() -> LengthOrPercentage {
LengthOrPercentage::Length(NoCalcLength::zero())
}
fn parse_internal<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
num_context: AllowedNumericType,
allow_quirks: AllowQuirks)
-> Result<LengthOrPercentage, ParseError<'i>>
{
// FIXME: remove early returns when lifetimes are non-lexical
{
let token = input.next()?;
match *token {
Token::Dimension { value, ref unit, .. } if num_context.is_ok(context.parsing_mode, value) => {
return NoCalcLength::parse_dimension(context, value, unit)
.map(LengthOrPercentage::Length)
.map_err(|()| BasicParseError::UnexpectedToken(token.clone()).into())
}
Token::Percentage { unit_value, .. } if num_context.is_ok(context.parsing_mode, unit_value) => {
return Ok(LengthOrPercentage::Percentage(computed::Percentage(unit_value)))
}
Token::Number { value, .. } if num_context.is_ok(context.parsing_mode, value) => {
if value != 0. &&
!context.parsing_mode.allows_unitless_lengths() &&
!allow_quirks.allowed(context.quirks_mode) {
return Err(BasicParseError::UnexpectedToken(token.clone()).into())
} else {
return Ok(LengthOrPercentage::Length(NoCalcLength::from_px(value)))
}
}
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {}
_ => return Err(BasicParseError::UnexpectedToken(token.clone()).into())
}
}
let calc = input.parse_nested_block(|i| {
CalcNode::parse_length_or_percentage(context, i, num_context)
})?;
Ok(LengthOrPercentage::Calc(Box::new(calc)))
}
/// Parse a non-negative length.
#[inline]
pub fn parse_non_negative<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<LengthOrPercentage, ParseError<'i>> {
Self::parse_non_negative_quirky(context, input, AllowQuirks::No)
}
/// Parse a non-negative length, with quirks.
#[inline]
pub fn parse_non_negative_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<LengthOrPercentage, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::NonNegative, allow_quirks)
}
/// Parse a length, treating dimensionless numbers as pixels
///
/// https://www.w3.org/TR/SVG2/types.html#presentation-attribute-css-value
pub fn parse_numbers_are_pixels<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<LengthOrPercentage, ParseError<'i>> {
if let Ok(lop) = input.try(|i| Self::parse(context, i)) {
return Ok(lop)
}
// TODO(emilio): Probably should use Number::parse_non_negative to
// handle calc()?
let num = input.expect_number()?;
Ok(LengthOrPercentage::Length(NoCalcLength::Absolute(AbsoluteLength::Px(num))))
}
/// Parse a non-negative length, treating dimensionless numbers as pixels
///
/// This is nonstandard behavior used by Firefox for SVG
pub fn parse_numbers_are_pixels_non_negative<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>)
-> Result<LengthOrPercentage, ParseError<'i>> {
if let Ok(lop) = input.try(|i| Self::parse_non_negative(context, i)) {
return Ok(lop)
}
// TODO(emilio): Probably should use Number::parse_non_negative to
// handle calc()?
let num = input.expect_number()?;
if num >= 0. {
Ok(LengthOrPercentage::Length(NoCalcLength::Absolute(AbsoluteLength::Px(num))))
} else {
Err(StyleParseError::UnspecifiedError.into())
}
}
/// Extract value from ref without a clone, replacing it with a 0 Au
///
/// Use when you need to move out of a length array without cloning
#[inline]
pub fn take(&mut self) -> Self {
mem::replace(self, LengthOrPercentage::zero())
}
}
impl Parse for LengthOrPercentage {
#[inline]
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Self::parse_quirky(context, input, AllowQuirks::No)
}
}
impl LengthOrPercentage {
/// Parses a length or a percentage, allowing the unitless length quirk.
/// https://quirks.spec.whatwg.org/#the-unitless-length-quirk
#[inline]
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks) -> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::All, allow_quirks)
}
}
/// Either a `<length>`, a `<percentage>`, or the `auto` keyword.
#[allow(missing_docs)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub enum LengthOrPercentageOrAuto {
Length(NoCalcLength),
Percentage(computed::Percentage),
Auto,
Calc(Box<CalcLengthOrPercentage>),
}
impl From<NoCalcLength> for LengthOrPercentageOrAuto {
#[inline]
fn from(len: NoCalcLength) -> Self {
LengthOrPercentageOrAuto::Length(len)
}
}
impl From<computed::Percentage> for LengthOrPercentageOrAuto {
#[inline]
fn from(pc: computed::Percentage) -> Self {
LengthOrPercentageOrAuto::Percentage(pc)
}
}
impl LengthOrPercentageOrAuto {
fn parse_internal<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
num_context: AllowedNumericType,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
// FIXME: remove early returns when lifetimes are non-lexical
{
let token = input.next()?;
match *token {
Token::Dimension { value, ref unit, .. } if num_context.is_ok(context.parsing_mode, value) => {
return NoCalcLength::parse_dimension(context, value, unit)
.map(LengthOrPercentageOrAuto::Length)
.map_err(|()| BasicParseError::UnexpectedToken(token.clone()).into())
}
Token::Percentage { unit_value, .. } if num_context.is_ok(context.parsing_mode, unit_value) => {
return Ok(LengthOrPercentageOrAuto::Percentage(computed::Percentage(unit_value)))
}
Token::Number { value, .. } if num_context.is_ok(context.parsing_mode, value) => {
if value != 0. &&
!context.parsing_mode.allows_unitless_lengths() &&
!allow_quirks.allowed(context.quirks_mode) {
return Err(StyleParseError::UnspecifiedError.into())
}
return Ok(LengthOrPercentageOrAuto::Length(
NoCalcLength::Absolute(AbsoluteLength::Px(value))
))
}
Token::Ident(ref value) if value.eq_ignore_ascii_case("auto") => {
return Ok(LengthOrPercentageOrAuto::Auto)
}
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {}
_ => return Err(BasicParseError::UnexpectedToken(token.clone()).into())
}
}
let calc = input.parse_nested_block(|i| {
CalcNode::parse_length_or_percentage(context, i, num_context)
})?;
Ok(LengthOrPercentageOrAuto::Calc(Box::new(calc)))
}
/// Parse a non-negative length, percentage, or auto.
#[inline]
pub fn parse_non_negative<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<LengthOrPercentageOrAuto, ParseError<'i>> {
Self::parse_non_negative_quirky(context, input, AllowQuirks::No)
}
/// Parse a non-negative length, percentage, or auto.
#[inline]
pub fn parse_non_negative_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::NonNegative, allow_quirks)
}
/// Returns the `auto` value.
pub fn auto() -> Self {
LengthOrPercentageOrAuto::Auto
}
/// Returns a value representing a `0` length.
pub fn zero() -> Self {
LengthOrPercentageOrAuto::Length(NoCalcLength::zero())
}
/// Returns a value representing `0%`.
pub fn zero_percent() -> Self {
LengthOrPercentageOrAuto::Percentage(computed::Percentage::zero())
}
}
impl Parse for LengthOrPercentageOrAuto {
#[inline]
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Self::parse_quirky(context, input, AllowQuirks::No)
}
}
impl LengthOrPercentageOrAuto {
/// Parses, with quirks.
#[inline]
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::All, allow_quirks)
}
}
/// Either a `<length>`, a `<percentage>`, or the `none` keyword.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
#[allow(missing_docs)]
pub enum LengthOrPercentageOrNone {
Length(NoCalcLength),
Percentage(computed::Percentage),
Calc(Box<CalcLengthOrPercentage>),
None,
}
impl LengthOrPercentageOrNone {
fn parse_internal<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
num_context: AllowedNumericType,
allow_quirks: AllowQuirks)
-> Result<LengthOrPercentageOrNone, ParseError<'i>>
{
// FIXME: remove early returns when lifetimes are non-lexical
{
let token = input.next()?;
match *token {
Token::Dimension { value, ref unit, .. } if num_context.is_ok(context.parsing_mode, value) => {
return NoCalcLength::parse_dimension(context, value, unit)
.map(LengthOrPercentageOrNone::Length)
.map_err(|()| BasicParseError::UnexpectedToken(token.clone()).into())
}
Token::Percentage { unit_value, .. } if num_context.is_ok(context.parsing_mode, unit_value) => {
return Ok(LengthOrPercentageOrNone::Percentage(computed::Percentage(unit_value)))
}
Token::Number { value, .. } if num_context.is_ok(context.parsing_mode, value) => {
if value != 0. && !context.parsing_mode.allows_unitless_lengths() &&
!allow_quirks.allowed(context.quirks_mode) {
return Err(StyleParseError::UnspecifiedError.into())
}
return Ok(LengthOrPercentageOrNone::Length(
NoCalcLength::Absolute(AbsoluteLength::Px(value))
))
}
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {}
Token::Ident(ref value) if value.eq_ignore_ascii_case("none") => {
return Ok(LengthOrPercentageOrNone::None)
}
_ => return Err(BasicParseError::UnexpectedToken(token.clone()).into())
}
}
let calc = input.parse_nested_block(|i| {
CalcNode::parse_length_or_percentage(context, i, num_context)
})?;
Ok(LengthOrPercentageOrNone::Calc(Box::new(calc)))
}
/// Parse a non-negative LengthOrPercentageOrNone.
#[inline]
pub fn parse_non_negative<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Self, ParseError<'i>> {
Self::parse_non_negative_quirky(context, input, AllowQuirks::No)
}
/// Parse a non-negative LengthOrPercentageOrNone, with quirks.
#[inline]
pub fn parse_non_negative_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::NonNegative, allow_quirks)
}
}
impl Parse for LengthOrPercentageOrNone {
#[inline]
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Self::parse_internal(context, input, AllowedNumericType::All, AllowQuirks::No)
}
}
/// A wrapper of LengthOrPercentage, whose value must be >= 0.
pub type NonNegativeLengthOrPercentage = NonNegative<LengthOrPercentage>;
impl From<NoCalcLength> for NonNegativeLengthOrPercentage {
#[inline]
fn from(len: NoCalcLength) -> Self {
NonNegative::<LengthOrPercentage>(LengthOrPercentage::from(len))
}
}
impl Parse for NonNegativeLengthOrPercentage {
#[inline]
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
LengthOrPercentage::parse_non_negative(context, input).map(NonNegative::<LengthOrPercentage>)
}
}
impl NonNegativeLengthOrPercentage {
#[inline]
/// Returns a `zero` length.
pub fn zero() -> Self {
NonNegative::<LengthOrPercentage>(LengthOrPercentage::zero())
}
/// Parses a length or a percentage, allowing the unitless length quirk.
/// https://quirks.spec.whatwg.org/#the-unitless-length-quirk
#[inline]
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks) -> Result<Self, ParseError<'i>> {
LengthOrPercentage::parse_non_negative_quirky(context, input, allow_quirks)
.map(NonNegative::<LengthOrPercentage>)
}
}
/// Either a `<length>` or the `none` keyword.
pub type LengthOrNone = Either<Length, None_>;
/// Either a `<length>` or the `normal` keyword.
pub type LengthOrNormal = Either<Length, Normal>;
/// Either a `<length>` or the `auto` keyword.
pub type LengthOrAuto = Either<Length, Auto>;
/// Either a `<length>` or a `<number>`.
pub type LengthOrNumber = Either<Length, Number>;
impl LengthOrNumber {
/// Parse a non-negative LengthOrNumber.
pub fn parse_non_negative<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<Self, ParseError<'i>> {
// We try to parse as a Number first because, for cases like
// LengthOrNumber, we want "0" to be parsed as a plain Number rather
// than a Length (0px); this matches the behaviour of all major browsers
if let Ok(v) = input.try(|i| Number::parse_non_negative(context, i)) {
return Ok(Either::Second(v))
}
Length::parse_non_negative(context, input).map(Either::First)
}
/// Returns `0`.
#[inline]
pub fn zero() -> Self {
Either::Second(Number::new(0.))
}
}
/// A value suitable for a `min-width` or `min-height` property.
/// Unlike `max-width` or `max-height` properties, a MozLength can be
/// `auto`, and cannot be `none`.
#[allow(missing_docs)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub enum MozLength {
LengthOrPercentageOrAuto(LengthOrPercentageOrAuto),
ExtremumLength(ExtremumLength),
}
impl Parse for MozLength {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
MozLength::parse_quirky(context, input, AllowQuirks::No)
}
}
impl MozLength {
/// Parses, with quirks.
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks) -> Result<Self, ParseError<'i>> {
input.try(ExtremumLength::parse).map(MozLength::ExtremumLength)
.or_else(|_| input.try(|i| LengthOrPercentageOrAuto::parse_non_negative_quirky(context, i, allow_quirks))
.map(MozLength::LengthOrPercentageOrAuto))
}
}
/// A value suitable for a `max-width` or `max-height` property.
#[allow(missing_docs)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub enum MaxLength {
LengthOrPercentageOrNone(LengthOrPercentageOrNone),
ExtremumLength(ExtremumLength),
}
impl Parse for MaxLength {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
MaxLength::parse_quirky(context, input, AllowQuirks::No)
}
}
impl MaxLength {
/// Parses, with quirks.
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks) -> Result<Self, ParseError<'i>> {
input.try(ExtremumLength::parse).map(MaxLength::ExtremumLength)
.or_else(|_| input.try(|i| LengthOrPercentageOrNone::parse_non_negative_quirky(context, i, allow_quirks))
.map(MaxLength::LengthOrPercentageOrNone))
}
}
|
use std::io;
use std::io::{Read, Cursor};
use std::result::Result;
use std::str::{Utf8Error, from_utf8};
use byteorder;
use byteorder::ReadBytesExt;
use super::Marker;
/// Represents an error that can occur when attempting to read bytes from the reader.
///
/// This is a thin wrapper over the standard `io::Error` type. Namely, it adds one additional error
/// case: an unexpected EOF.
#[derive(Debug)]
pub enum ReadError {
/// Unexpected end of file reached while reading bytes.
UnexpectedEOF,
/// I/O error occurred while reading bytes.
Io(io::Error),
}
impl From<io::Error> for ReadError {
fn from(err: io::Error) -> ReadError {
ReadError::Io(err)
}
}
impl From<byteorder::Error> for ReadError {
fn from(err: byteorder::Error) -> ReadError {
match err {
byteorder::Error::UnexpectedEOF => ReadError::UnexpectedEOF,
byteorder::Error::Io(err) => ReadError::Io(err),
}
}
}
/// Represents an error that can occur when attempting to read a MessagePack marker from the reader.
///
/// This is a thin wrapper over the standard `io::Error` type. Namely, it adds one additional error
/// case: an unexpected EOF.
#[derive(Debug)]
pub enum MarkerReadError {
/// Unexpected end of file reached while reading the marker.
UnexpectedEOF,
/// I/O error occurred while reading the marker.
Io(io::Error),
}
impl From<byteorder::Error> for MarkerReadError {
fn from(err: byteorder::Error) -> MarkerReadError {
match err {
byteorder::Error::UnexpectedEOF => MarkerReadError::UnexpectedEOF,
byteorder::Error::Io(err) => MarkerReadError::Io(err),
}
}
}
impl From<MarkerReadError> for ReadError {
fn from(err: MarkerReadError) -> ReadError {
match err {
MarkerReadError::UnexpectedEOF => ReadError::UnexpectedEOF,
MarkerReadError::Io(err) => ReadError::Io(err),
}
}
}
/// Represents an error that can occur when attempting to read a MessagePack'ed single-byte value
/// from the reader.
#[derive(Debug)]
pub enum FixedValueReadError {
/// Unexpected end of file reached while reading the value.
UnexpectedEOF,
/// I/O error occurred while reading the value.
Io(io::Error),
/// The type decoded isn't match with the expected one.
TypeMismatch(Marker),
}
impl From<MarkerReadError> for FixedValueReadError {
fn from(err: MarkerReadError) -> FixedValueReadError {
match err {
MarkerReadError::UnexpectedEOF => FixedValueReadError::UnexpectedEOF,
MarkerReadError::Io(err) => FixedValueReadError::Io(err),
}
}
}
/// Represents an error that can occur when attempting to read a MessagePack'ed complex value from
/// the reader.
#[derive(Debug)]
pub enum ValueReadError {
/// Failed to read the marker.
InvalidMarkerRead(ReadError),
/// Failed to read the data.
InvalidDataRead(ReadError),
/// The type decoded isn't match with the expected one.
TypeMismatch(Marker),
}
impl From<MarkerReadError> for ValueReadError {
fn from(err: MarkerReadError) -> ValueReadError {
ValueReadError::InvalidMarkerRead(From::from(err))
}
}
#[derive(Debug)]
pub enum DecodeStringError<'a> {
InvalidMarkerRead(ReadError),
InvalidDataRead(ReadError),
TypeMismatch(Marker),
/// The given buffer is not large enough to accumulate the specified amount of bytes.
BufferSizeTooSmall(u32),
InvalidDataCopy(&'a [u8], ReadError),
InvalidUtf8(&'a [u8], Utf8Error),
}
impl<'a> From<ValueReadError> for DecodeStringError<'a> {
fn from(err: ValueReadError) -> DecodeStringError<'a> {
match err {
ValueReadError::InvalidMarkerRead(err) => DecodeStringError::InvalidMarkerRead(err),
ValueReadError::InvalidDataRead(err) => DecodeStringError::InvalidDataRead(err),
ValueReadError::TypeMismatch(marker) => DecodeStringError::TypeMismatch(marker),
}
}
}
/// Attempts to read a single byte from the given reader and decodes it as a MessagePack marker.
fn read_marker<R>(rd: &mut R) -> Result<Marker, MarkerReadError>
where R: Read
{
match rd.read_u8() {
Ok(val) => Ok(Marker::from_u8(val)),
Err(err) => Err(From::from(err)),
}
}
/// Attempts to read a single byte from the given reader and to decode it as a nil value.
///
/// According to the MessagePack specification, a nil value is represented as a single `0xc0` byte.
///
/// # Errors
///
/// This function will return `FixedValueReadError` on any I/O error while reading the nil marker.
///
/// It also returns `FixedValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_nil<R>(rd: &mut R) -> Result<(), FixedValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::Null => Ok(()),
marker => Err(FixedValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read a single byte from the given reader and to decode it as a boolean value.
///
/// According to the MessagePack specification, an encoded boolean value is represented as a single
/// byte.
///
/// # Errors
///
/// This function will return `FixedValueReadError` on any I/O error while reading the bool marker.
///
/// It also returns `FixedValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_bool<R>(rd: &mut R) -> Result<bool, FixedValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::True => Ok(true),
Marker::False => Ok(false),
marker => Err(FixedValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read a single byte from the given reader and to decode it as a positive fixnum
/// value.
///
/// According to the MessagePack specification, a positive fixed integer value is represented using
/// a single byte in `[0x00; 0x7f]` range inclusively, prepended with a special marker mask.
///
/// # Errors
///
/// This function will return `FixedValueReadError` on any I/O error while reading the marker.
///
/// It also returns `FixedValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_pfix<R>(rd: &mut R) -> Result<u8, FixedValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::PositiveFixnum(val) => Ok(val),
marker => Err(FixedValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read a single byte from the given reader and to decode it as a negative fixnum
/// value.
///
/// According to the MessagePack specification, a negative fixed integer value is represented using
/// a single byte in `[0xe0; 0xff]` range inclusively, prepended with a special marker mask.
///
/// # Errors
///
/// This function will return `FixedValueReadError` on any I/O error while reading the marker.
///
/// It also returns `FixedValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_nfix<R>(rd: &mut R) -> Result<i8, FixedValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::NegativeFixnum(val) => Ok(val),
marker => Err(FixedValueReadError::TypeMismatch(marker)),
}
}
macro_rules! make_read_data_fn {
(deduce, $reader:ident, $decoder:ident, 0)
=> ($reader.$decoder(););
(deduce, $reader:ident, $decoder:ident, 1)
=> ($reader.$decoder::<byteorder::BigEndian>(););
(gen, $t:ty, $d:tt, $name:ident, $decoder:ident) => {
fn $name<R>(rd: &mut R) -> Result<$t, ValueReadError>
where R: Read
{
match make_read_data_fn!(deduce, rd, $decoder, $d) {
Ok(data) => Ok(data),
Err(err) => Err(ValueReadError::InvalidDataRead(From::from(err))),
}
}
};
(u8, $name:ident, $decoder:ident) => (make_read_data_fn!(gen, u8, 0, $name, $decoder););
(i8, $name:ident, $decoder:ident) => (make_read_data_fn!(gen, i8, 0, $name, $decoder););
($t:ty, $name:ident, $decoder:ident) => (make_read_data_fn!(gen, $t, 1, $name, $decoder););
}
make_read_data_fn!(u8, read_data_u8, read_u8);
make_read_data_fn!(u16, read_data_u16, read_u16);
make_read_data_fn!(u32, read_data_u32, read_u32);
make_read_data_fn!(u64, read_data_u64, read_u64);
make_read_data_fn!(i8, read_data_i8, read_i8);
make_read_data_fn!(i16, read_data_i16, read_i16);
make_read_data_fn!(i32, read_data_i32, read_i32);
make_read_data_fn!(i64, read_data_i64, read_i64);
make_read_data_fn!(f32, read_data_f32, read_f32);
make_read_data_fn!(f64, read_data_f64, read_f64);
/// Attempts to read exactly 2 bytes from the given reader and to decode them as `u8` value.
///
/// The first byte should be the marker and the second one should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u8<R>(rd: &mut R) -> Result<u8, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::U8 => Ok(try!(read_data_u8(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 3 bytes from the given reader and to decode them as `u16` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u16<R>(rd: &mut R) -> Result<u16, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::U16 => Ok(try!(read_data_u16(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 5 bytes from the given reader and to decode them as `u32` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u32<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::U32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 9 bytes from the given reader and to decode them as `u64` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u64<R>(rd: &mut R) -> Result<u64, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::U64 => Ok(try!(read_data_u64(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 2 bytes from the given reader and to decode them as `i8` value.
///
/// The first byte should be the marker and the second one should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i8<R>(rd: &mut R) -> Result<i8, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::I8 => Ok(try!(read_data_i8(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 3 bytes from the given reader and to decode them as `i16` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i16<R>(rd: &mut R) -> Result<i16, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::I16 => Ok(try!(read_data_i16(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 5 bytes from the given reader and to decode them as `i32` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i32<R>(rd: &mut R) -> Result<i32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::I32 => Ok(try!(read_data_i32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 9 bytes from the given reader and to decode them as `i64` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i64<R>(rd: &mut R) -> Result<i64, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::I64 => Ok(try!(read_data_i64(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 2 bytes from the given reader and to decode them as `u8` value.
///
/// Unlike the `read_u8`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode signed integers will result in `TypeMismatch` error even if the
/// value fits in `u8`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u8_loosely<R>(rd: &mut R) -> Result<u8, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::PositiveFixnum(val) => Ok(val),
Marker::U8 => Ok(try!(read_data_u8(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 3 bytes from the given reader and to decode them as `u16` value.
///
/// Unlike the `read_u16`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode signed integers will result in `TypeMismatch` error even if the
/// value fits in `u16`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u16_loosely<R>(rd: &mut R) -> Result<u16, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::PositiveFixnum(val) => Ok(val as u16),
Marker::U8 => Ok(try!(read_data_u8(rd)) as u16),
Marker::U16 => Ok(try!(read_data_u16(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 5 bytes from the given reader and to decode them as `u32` value.
///
/// Unlike the `read_u32`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode signed integers will result in `TypeMismatch` error even if the
/// value fits in `u32`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u32_loosely<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::PositiveFixnum(val) => Ok(val as u32),
Marker::U8 => Ok(try!(read_data_u8(rd)) as u32),
Marker::U16 => Ok(try!(read_data_u16(rd)) as u32),
Marker::U32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 9 bytes from the given reader and to decode them as `u64` value.
///
/// This function will try to read up to 9 bytes from the reader (1 for marker and up to 8 for data)
/// and interpret them as a big-endian u64.
///
/// Unlike the `read_u64`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode signed integers will result in `TypeMismatch` error even if the
/// value fits in `u64`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u64_loosely<R>(rd: &mut R) -> Result<u64, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::PositiveFixnum(val) => Ok(val as u64),
Marker::U8 => Ok(try!(read_data_u8(rd)) as u64),
Marker::U16 => Ok(try!(read_data_u16(rd)) as u64),
Marker::U32 => Ok(try!(read_data_u32(rd)) as u64),
Marker::U64 => Ok(try!(read_data_u64(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 2 bytes from the given reader and to decode them as `i8` value.
///
/// Unlike the `read_i8`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode unsigned integers will result in `TypeMismatch` error even if the
/// value fits in `i8`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i8_loosely<R>(rd: &mut R) -> Result<i8, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::NegativeFixnum(val) => Ok(val),
Marker::I8 => Ok(try!(read_data_i8(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 3 bytes from the given reader and to decode them as `i16` value.
///
/// Unlike the `read_i16`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode unsigned integers will result in `TypeMismatch` error even if the
/// value fits in `i16`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i16_loosely<R>(rd: &mut R) -> Result<i16, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::NegativeFixnum(val) => Ok(val as i16),
Marker::I8 => Ok(try!(read_data_i8(rd)) as i16),
Marker::I16 => Ok(try!(read_data_i16(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 5 bytes from the given reader and to decode them as `i32` value.
///
/// Unlike the `read_i32`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode unsigned integers will result in `TypeMismatch` error even if the
/// value fits in `i32`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i32_loosely<R>(rd: &mut R) -> Result<i32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::NegativeFixnum(val) => Ok(val as i32),
Marker::I8 => Ok(try!(read_data_i8(rd)) as i32),
Marker::I16 => Ok(try!(read_data_i16(rd)) as i32),
Marker::I32 => Ok(try!(read_data_i32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 9 bytes from the given reader and to decode them as `i64` value.
///
/// This function will try to read up to 9 bytes from the reader (1 for marker and up to 8 for data)
/// and interpret them as a big-endian i64.
///
/// Unlike the `read_i64`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode signed integers will result in `TypeMismatch` error even if the
/// value fits in `i64`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i64_loosely<R>(rd: &mut R) -> Result<i64, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::NegativeFixnum(val) => Ok(val as i64),
Marker::I8 => Ok(try!(read_data_i8(rd)) as i64),
Marker::I16 => Ok(try!(read_data_i16(rd)) as i64),
Marker::I32 => Ok(try!(read_data_i32(rd)) as i64),
Marker::I64 => Ok(try!(read_data_i64(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 5 bytes from the given reader and to decode them as `f32` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_f32<R>(rd: &mut R) -> Result<f32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::F32 => Ok(try!(read_data_f32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read exactly 9 bytes from the given reader and to decode them as `f64` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_f64<R>(rd: &mut R) -> Result<f64, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::F64 => Ok(try!(read_data_f64(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read up to 9 bytes from the given reader and to decode them as a string `u32` size
/// value.
///
/// According to the MessagePack specification, the string format family stores an byte array in 1,
/// 2, 3, or 5 bytes of extra bytes in addition to the size of the byte array.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_str_len<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixedString(size) => Ok(size as u32),
Marker::Str8 => Ok(try!(read_data_u8(rd)) as u32),
Marker::Str16 => Ok(try!(read_data_u16(rd)) as u32),
Marker::Str32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read a string data from the given reader and copy it to the buffer provided.
///
/// On success returns a borrowed string type, allowing to view the copyed bytes as properly utf-8
/// string.
/// According to the spec, the string's data must to be encoded using utf-8.
///
/// # Errors
///
/// Returns `Err` in the following cases:
///
/// - if any IO error (including unexpected EOF) occurs, while reading an `rd`.
/// - if the `out` buffer size is not large enough to keep all the data copyed.
/// - if the data is not utf-8, with a description as to why the provided data is not utf-8 and
/// with a size of bytes actually copyed to be able to get them from `out`.
///
/// # Examples
/// ```
/// use rmp::decode::read_str;
///
/// let buf = [0xaa, 0x6c, 0x65, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65];
/// let mut out = [0u8; 16];
///
/// assert_eq!("le message", read_str(&mut &buf[..], &mut &mut out[..]).unwrap());
/// ```
///
/// # Unstable
///
/// This function is **unstable**, because it needs review.
pub fn read_str<'r, R>(rd: &mut R, mut buf: &'r mut [u8]) -> Result<&'r str, DecodeStringError<'r>>
where R: Read
{
let len = try!(read_str_len(rd));
let ulen = len as usize;
if buf.len() < ulen {
return Err(DecodeStringError::BufferSizeTooSmall(len))
}
read_str_data(rd, len, &mut buf[0..ulen])
}
fn read_str_data<'r, R>(rd: &mut R, len: u32, buf: &'r mut[u8]) -> Result<&'r str, DecodeStringError<'r>>
where R: Read
{
debug_assert_eq!(len as usize, buf.len());
// We need cursor here, because in the common case we cannot guarantee, that copying will be
// performed in a single step.
let mut cur = Cursor::new(buf);
// Trying to copy exact `len` bytes.
match io::copy(&mut rd.take(len as u64), &mut cur) {
Ok(size) if size == len as u64 => {
// Release buffer owning from cursor.
let buf = cur.into_inner();
match from_utf8(buf) {
Ok(decoded) => Ok(decoded),
Err(err) => Err(DecodeStringError::InvalidUtf8(buf, err)),
}
}
Ok(size) => {
let buf = cur.into_inner();
Err(DecodeStringError::InvalidDataCopy(&buf[..size as usize], ReadError::UnexpectedEOF))
}
Err(err) => Err(DecodeStringError::InvalidDataRead(From::from(err))),
}
}
/// Attempts to read and decode a string value from the reader, returning a borrowed slice from it.
///
// TODO: it is better to return &str; may panic on len mismatch; extend documentation.
pub fn read_str_ref(rd: &[u8]) -> Result<&[u8], DecodeStringError> {
let mut cur = io::Cursor::new(rd);
let len = try!(read_str_len(&mut cur));
let start = cur.position() as usize;
Ok(&rd[start .. start + len as usize])
}
/// Attempts to read up to 5 bytes from the given reader and to decode them as a big-endian u32
/// array size.
///
/// Array format family stores a sequence of elements in 1, 3, or 5 bytes of extra bytes in addition
/// to the elements.
// TODO: Docs.
pub fn read_array_size<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixedArray(size) => Ok(size as u32),
Marker::Array16 => Ok(try!(read_data_u16(rd)) as u32),
Marker::Array32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read up to 5 bytes from the given reader and to decode them as a big-endian u32
/// map size.
///
/// Map format family stores a sequence of elements in 1, 3, or 5 bytes of extra bytes in addition
/// to the elements.
// TODO: Docs.
pub fn read_map_size<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixedMap(size) => Ok(size as u32),
Marker::Map16 => Ok(try!(read_data_u16(rd)) as u32),
Marker::Map32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
// TODO: Docs.
pub fn read_bin_len<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::Bin8 => Ok(try!(read_data_u8(rd)) as u32),
Marker::Bin16 => Ok(try!(read_data_u16(rd)) as u32),
Marker::Bin32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
// TODO: Docs; not sure about naming.
pub fn read_bin_borrow(rd: &[u8]) -> Result<&[u8], ValueReadError> {
let mut cur = io::Cursor::new(rd);
let len = try!(read_bin_len(&mut cur)) as usize;
let pos = cur.position() as usize;
if rd.len() < pos + len {
Err(ValueReadError::InvalidDataRead(ReadError::UnexpectedEOF))
} else {
Ok(&rd[pos .. pos + len])
}
}
/// Attempts to read exactly 3 bytes from the given reader and interpret them as a fixext1 type
/// with data attached.
///
/// According to the MessagePack specification, a fixext1 stores an integer and a byte array whose
/// length is 1 byte. Its marker byte is `0xd4`.
///
/// Note, that this function copies a byte array from the reader to the output `u8` variable.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
pub fn read_fixext1<R>(rd: &mut R) -> Result<(i8, u8), ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixExt1 => {
let ty = try!(read_data_i8(rd));
let data = try!(read_data_u8(rd));
Ok((ty, data))
}
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read exactly 4 bytes from the given reader and interpret them as a fixext2 type
/// with data attached.
///
/// According to the MessagePack specification, a fixext2 stores an integer and a byte array whose
/// length is 2 bytes. Its marker byte is `0xd5`.
///
/// Note, that this function copies a byte array from the reader to the output buffer, which is
/// unlikely if you want zero-copy functionality.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
pub fn read_fixext2<R>(rd: &mut R) -> Result<(i8, [u8; 2]), ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixExt2 => {
let mut buf = [0; 2];
read_fixext_data(rd, &mut buf).map(|ty| (ty, buf))
}
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read exactly 6 bytes from the given reader and interpret them as a fixext4 type
/// with data attached.
///
/// According to the MessagePack specification, a fixext4 stores an integer and a byte array whose
/// length is 4 bytes. Its marker byte is `0xd6`.
///
/// Note, that this function copies a byte array from the reader to the output buffer, which is
/// unlikely if you want zero-copy functionality.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
pub fn read_fixext4<R>(rd: &mut R) -> Result<(i8, [u8; 4]), ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixExt4 => {
let mut buf = [0; 4];
read_fixext_data(rd, &mut buf).map(|ty| (ty, buf))
}
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read exactly 10 bytes from the given reader and interpret them as a fixext8 type
/// with data attached.
///
/// According to the MessagePack specification, a fixext8 stores an integer and a byte array whose
/// length is 8 bytes. Its marker byte is `0xd7`.
///
/// Note, that this function copies a byte array from the reader to the output buffer, which is
/// unlikely if you want zero-copy functionality.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
pub fn read_fixext8<R>(rd: &mut R) -> Result<(i8, [u8; 8]), ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixExt8 => {
let mut buf = [0; 8];
read_fixext_data(rd, &mut buf).map(|ty| (ty, buf))
}
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read exactly 18 bytes from the given reader and interpret them as a fixext16 type
/// with data attached.
///
/// According to the MessagePack specification, a fixext16 stores an integer and a byte array whose
/// length is 16 bytes. Its marker byte is `0xd8`.
///
/// Note, that this function copies a byte array from the reader to the output buffer, which is
/// unlikely if you want zero-copy functionality.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
pub fn read_fixext16<R>(rd: &mut R) -> Result<(i8, [u8; 16]), ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixExt16 => {
let mut buf = [0; 16];
read_fixext_data(rd, &mut buf).map(|ty| (ty, buf))
}
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
fn read_fixext_data<R>(rd: &mut R, buf: &mut [u8]) -> Result<i8, ValueReadError>
where R: Read
{
let id = try!(read_data_i8(rd));
match read_full(rd, buf) {
Ok(()) => Ok(id),
Err(err) => Err(ValueReadError::InvalidDataRead(err)),
}
}
fn read_full<R: Read>(rd: &mut R, buf: &mut [u8]) -> Result<(), ReadError> {
let mut nread = 0usize;
while nread < buf.len() {
match rd.read(&mut buf[nread..]) {
Ok(0) => return Err(ReadError::UnexpectedEOF),
Ok(n) => nread += n,
Err(ref err) if err.kind() == io::ErrorKind::Interrupted => {},
Err(err) => return Err(From::from(err))
}
}
Ok(())
}
#[derive(Debug, PartialEq)]
pub struct ExtMeta {
pub typeid: i8,
pub size: u32,
}
/// Unstable: docs, errors
pub fn read_ext_meta<R>(rd: &mut R) -> Result<ExtMeta, ValueReadError>
where R: Read
{
let size = match try!(read_marker(rd)) {
Marker::FixExt1 => 1,
Marker::FixExt2 => 2,
Marker::FixExt4 => 4,
Marker::FixExt8 => 8,
Marker::FixExt16 => 16,
Marker::Ext8 => try!(read_data_u8(rd)) as u32,
Marker::Ext16 => try!(read_data_u16(rd)) as u32,
Marker::Ext32 => try!(read_data_u32(rd)),
marker => return Err(ValueReadError::TypeMismatch(marker))
};
let typeid = try!(read_data_i8(rd));
let meta = ExtMeta { typeid: typeid, size: size };
Ok(meta)
}
/// Contains: owned value decoding, owned error; owned result.
// TODO: docs.
mod value {
//use std::convert::From;
use std::io::Read;
use std::result::Result;
use super::super::Marker;
pub use super::super::value::{Integer, Value};
use super::{
ReadError,
MarkerReadError,
ValueReadError,
read_marker,
read_data_u8,
};
#[derive(Debug)]
pub enum Error<'r> {
InvalidMarkerRead(ReadError),
InvalidDataRead(ReadError),
InvalidArrayRead(&'r Error<'r>),
}
impl<'r> From<MarkerReadError> for Error<'r> {
fn from(err: MarkerReadError) -> Error<'r> {
Error::InvalidMarkerRead(From::from(err))
}
}
impl<'r> From<ValueReadError> for Error<'r> {
fn from(err: ValueReadError) -> Error<'r> {
match err {
ValueReadError::InvalidMarkerRead(err) => Error::InvalidMarkerRead(err),
ValueReadError::InvalidDataRead(err) => Error::InvalidDataRead(err),
ValueReadError::TypeMismatch(..) => unimplemented!()
}
}
}
// TODO: docs; examples; incomplete.
pub fn read_value<R>(rd: &mut R) -> Result<Value, Error>
where R: Read
{
let val = match try!(read_marker(rd)) {
Marker::Null => Value::Nil,
Marker::True => Value::Boolean(true),
Marker::False => Value::Boolean(false),
Marker::PositiveFixnum(val) => Value::Integer(Integer::U64(val as u64)),
Marker::U8 => {
Value::Integer(Integer::U64(try!(read_data_u8(rd)) as u64))
}
// Marker::U16
// Marker::U32
// Marker::U64
// Marker::I32 => Ok(Value::Integer(Integer::I64(try!(read_data_i32(rd)) as i64))),
// // TODO: Other integers.
// // TODO: Floats.
// Marker::Str8 => {
// let len = try!(read_data_u8(rd)) as u64;
// let mut buf: Vec<u8> = (0..len).map(|_| 0u8).collect();
// Ok(Value::String(try!(read_str_data(rd, len as u32, &mut buf[..])).to_string()))
// }
// // TODO: Other strings.
// Marker::FixedArray(len) => {
// let mut vec = Vec::with_capacity(len as usize);
// for _ in 0..len {
// vec.push(try!(read_value(rd)));
// }
// Ok(Value::Array(vec))
// }
// // TODO: Map/Bin/Ext.
_ => unimplemented!()
};
Ok(val)
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use super::*;
#[test]
fn from_null_decode_value() {
let buf = [0xc0, 0x00];
let mut cur = Cursor::new(&buf[..]);
assert_eq!(Value::Nil, read_value(&mut cur).unwrap());
assert_eq!(1, cur.position());
}
#[test]
fn from_pfix_decode_value() {
let buf: &[u8] = &[0x1f];
let mut cur = Cursor::new(buf);
assert_eq!(Value::Integer(Integer::U64(31)), read_value(&mut cur).unwrap());
assert_eq!(1, cur.position());
}
#[test]
fn from_i32_decode_value() {
let buf: &[u8] = &[0xd2, 0xff, 0xff, 0xff, 0xff];
let mut cur = Cursor::new(buf);
assert_eq!(Value::Integer(Integer::I64(-1)), read_value(&mut cur).unwrap());
assert_eq!(5, cur.position());
}
} // mod tests
} // mod value
pub mod serialize {
use std::convert::From;
use std::io::Read;
use std::result;
use serialize;
use super::super::Marker;
use super::{
ReadError,
FixedValueReadError,
ValueReadError,
DecodeStringError,
read_nil,
read_bool,
read_u8_loosely,
read_u16_loosely,
read_u32_loosely,
read_u64_loosely,
read_i8_loosely,
read_i16_loosely,
read_i32_loosely,
read_i64_loosely,
read_f32,
read_f64,
read_str_len,
read_str_data,
read_array_size,
read_map_size,
};
/// Unstable: docs; incomplete
#[derive(Debug)]
pub enum Error {
/// The actual value type isn't equal with the expected one.
TypeMismatch(Marker),
InvalidMarkerRead(ReadError),
InvalidDataRead(ReadError),
LengthMismatch(u32),
/// Uncategorized error.
Uncategorized(String),
}
impl From<FixedValueReadError> for Error {
fn from(err: FixedValueReadError) -> Error {
match err {
FixedValueReadError::UnexpectedEOF => Error::InvalidMarkerRead(ReadError::UnexpectedEOF),
FixedValueReadError::Io(err) => Error::InvalidMarkerRead(ReadError::Io(err)),
FixedValueReadError::TypeMismatch(marker) => Error::TypeMismatch(marker),
}
}
}
impl From<ValueReadError> for Error {
fn from(err: ValueReadError) -> Error {
match err {
ValueReadError::TypeMismatch(marker) => Error::TypeMismatch(marker),
ValueReadError::InvalidMarkerRead(err) => Error::InvalidMarkerRead(err),
ValueReadError::InvalidDataRead(err) => Error::InvalidDataRead(err),
}
}
}
/// Unstable: docs; incomplete
impl<'a> From<DecodeStringError<'a>> for Error {
fn from(err: DecodeStringError) -> Error {
match err {
DecodeStringError::InvalidMarkerRead(err) => Error::InvalidMarkerRead(err),
DecodeStringError::InvalidDataRead(..) => Error::Uncategorized("InvalidDataRead".to_string()),
DecodeStringError::TypeMismatch(..) => Error::Uncategorized("TypeMismatch".to_string()),
DecodeStringError::BufferSizeTooSmall(..) => Error::Uncategorized("BufferSizeTooSmall".to_string()),
DecodeStringError::InvalidDataCopy(..) => Error::Uncategorized("InvalidDataCopy".to_string()),
DecodeStringError::InvalidUtf8(..) => Error::Uncategorized("InvalidUtf8".to_string()),
}
}
}
pub type Result<T> = result::Result<T, Error>;
pub struct Decoder<R: Read> {
rd: R,
}
impl<R: Read> Decoder<R> {
pub fn new(rd: R) -> Decoder<R> {
Decoder {
rd: rd
}
}
}
/// Unstable: docs; examples; incomplete
impl<R: Read> serialize::Decoder for Decoder<R> {
type Error = Error;
fn read_nil(&mut self) -> Result<()> {
Ok(try!(read_nil(&mut self.rd)))
}
fn read_bool(&mut self) -> Result<bool> {
Ok(try!(read_bool(&mut self.rd)))
}
fn read_u8(&mut self) -> Result<u8> {
Ok(try!(read_u8_loosely(&mut self.rd)))
}
fn read_u16(&mut self) -> Result<u16> {
Ok(try!(read_u16_loosely(&mut self.rd)))
}
fn read_u32(&mut self) -> Result<u32> {
Ok(try!(read_u32_loosely(&mut self.rd)))
}
fn read_u64(&mut self) -> Result<u64> {
Ok(try!(read_u64_loosely(&mut self.rd)))
}
/// TODO: Doesn't look safe.
fn read_usize(&mut self) -> Result<usize> {
let v = try!(self.read_u64());
Ok(v as usize)
}
fn read_i8(&mut self) -> Result<i8> {
Ok(try!(read_i8_loosely(&mut self.rd)))
}
fn read_i16(&mut self) -> Result<i16> {
Ok(try!(read_i16_loosely(&mut self.rd)))
}
fn read_i32(&mut self) -> Result<i32> {
Ok(try!(read_i32_loosely(&mut self.rd)))
}
fn read_i64(&mut self) -> Result<i64> {
Ok(try!(read_i64_loosely(&mut self.rd)))
}
/// TODO: Doesn't look safe.
fn read_isize(&mut self) -> Result<isize> {
Ok(try!(self.read_i64()) as isize)
}
fn read_f32(&mut self) -> Result<f32> {
Ok(try!(read_f32(&mut self.rd)))
}
fn read_f64(&mut self) -> Result<f64> {
Ok(try!(read_f64(&mut self.rd)))
}
fn read_char(&mut self) -> Result<char> {
let mut res = try!(self.read_str());
if res.len() == 1 {
Ok(res.pop().unwrap())
} else {
Err(self.error("length mismatch"))
}
}
fn read_str(&mut self) -> Result<String> {
let len = try!(read_str_len(&mut self.rd));
let mut buf: Vec<u8> = (0..len).map(|_| 0u8).collect();
Ok(try!(read_str_data(&mut self.rd, len, &mut buf[..])).to_string())
}
fn read_enum<T, F>(&mut self, _name: &str, _f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T> { unimplemented!() }
fn read_enum_variant<T, F>(&mut self, _names: &[&str], _f: F) -> Result<T>
where F: FnMut(&mut Self, usize) -> Result<T> { unimplemented!() }
fn read_enum_variant_arg<T, F>(&mut self, _idx: usize, _f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T> { unimplemented!() }
fn read_enum_struct_variant<T, F>(&mut self, _names: &[&str], _f: F) -> Result<T>
where F: FnMut(&mut Self, usize) -> Result<T> { unimplemented!() }
fn read_enum_struct_variant_field<T, F>(&mut self, _name: &str, _idx: usize, _f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T> { unimplemented!() }
fn read_struct<T, F>(&mut self, _name: &str, len: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
self.read_tuple(len, f)
}
fn read_struct_field<T, F>(&mut self, _name: &str, _idx: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
f(self)
}
fn read_tuple<T, F>(&mut self, len: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
let actual = try!(read_array_size(&mut self.rd));
if len == actual as usize {
f(self)
} else {
Err(Error::LengthMismatch(actual))
}
}
// In case of MessagePack don't care about argument indexing.
fn read_tuple_arg<T, F>(&mut self, _idx: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
f(self)
}
fn read_tuple_struct<T, F>(&mut self, _name: &str, _len: usize, _f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T> { unimplemented!() }
fn read_tuple_struct_arg<T, F>(&mut self, _idx: usize, _f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T> { unimplemented!() }
/// We treat Value::Null as None.
fn read_option<T, F>(&mut self, mut f: F) -> Result<T>
where F: FnMut(&mut Self, bool) -> Result<T>
{
// Primarily try to read optimisticly.
match f(self, true) {
Ok(val) => Ok(val),
Err(Error::TypeMismatch(Marker::Null)) => f(self, false),
Err(err) => Err(err)
}
}
fn read_seq<T, F>(&mut self, f: F) -> Result<T>
where F: FnOnce(&mut Self, usize) -> Result<T>
{
let len = try!(read_array_size(&mut self.rd)) as usize;
f(self, len)
}
fn read_seq_elt<T, F>(&mut self, _idx: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
f(self)
}
fn read_map<T, F>(&mut self, f: F) -> Result<T>
where F: FnOnce(&mut Self, usize) -> Result<T>
{
let len = try!(read_map_size(&mut self.rd)) as usize;
f(self, len)
}
fn read_map_elt_key<T, F>(&mut self, _idx: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
f(self)
}
fn read_map_elt_val<T, F>(&mut self, _idx: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
f(self)
}
fn error(&mut self, err: &str) -> Error {
Error::Uncategorized(err.to_string())
}
}
}
[Decode] Integer decoding into a value.
use std::io;
use std::io::{Read, Cursor};
use std::result::Result;
use std::str::{Utf8Error, from_utf8};
use byteorder;
use byteorder::ReadBytesExt;
use super::Marker;
/// Represents an error that can occur when attempting to read bytes from the reader.
///
/// This is a thin wrapper over the standard `io::Error` type. Namely, it adds one additional error
/// case: an unexpected EOF.
#[derive(Debug)]
pub enum ReadError {
/// Unexpected end of file reached while reading bytes.
UnexpectedEOF,
/// I/O error occurred while reading bytes.
Io(io::Error),
}
impl From<io::Error> for ReadError {
fn from(err: io::Error) -> ReadError {
ReadError::Io(err)
}
}
impl From<byteorder::Error> for ReadError {
fn from(err: byteorder::Error) -> ReadError {
match err {
byteorder::Error::UnexpectedEOF => ReadError::UnexpectedEOF,
byteorder::Error::Io(err) => ReadError::Io(err),
}
}
}
/// Represents an error that can occur when attempting to read a MessagePack marker from the reader.
///
/// This is a thin wrapper over the standard `io::Error` type. Namely, it adds one additional error
/// case: an unexpected EOF.
#[derive(Debug)]
pub enum MarkerReadError {
/// Unexpected end of file reached while reading the marker.
UnexpectedEOF,
/// I/O error occurred while reading the marker.
Io(io::Error),
}
impl From<byteorder::Error> for MarkerReadError {
fn from(err: byteorder::Error) -> MarkerReadError {
match err {
byteorder::Error::UnexpectedEOF => MarkerReadError::UnexpectedEOF,
byteorder::Error::Io(err) => MarkerReadError::Io(err),
}
}
}
impl From<MarkerReadError> for ReadError {
fn from(err: MarkerReadError) -> ReadError {
match err {
MarkerReadError::UnexpectedEOF => ReadError::UnexpectedEOF,
MarkerReadError::Io(err) => ReadError::Io(err),
}
}
}
/// Represents an error that can occur when attempting to read a MessagePack'ed single-byte value
/// from the reader.
#[derive(Debug)]
pub enum FixedValueReadError {
/// Unexpected end of file reached while reading the value.
UnexpectedEOF,
/// I/O error occurred while reading the value.
Io(io::Error),
/// The type decoded isn't match with the expected one.
TypeMismatch(Marker),
}
impl From<MarkerReadError> for FixedValueReadError {
fn from(err: MarkerReadError) -> FixedValueReadError {
match err {
MarkerReadError::UnexpectedEOF => FixedValueReadError::UnexpectedEOF,
MarkerReadError::Io(err) => FixedValueReadError::Io(err),
}
}
}
/// Represents an error that can occur when attempting to read a MessagePack'ed complex value from
/// the reader.
#[derive(Debug)]
pub enum ValueReadError {
/// Failed to read the marker.
InvalidMarkerRead(ReadError),
/// Failed to read the data.
InvalidDataRead(ReadError),
/// The type decoded isn't match with the expected one.
TypeMismatch(Marker),
}
impl From<MarkerReadError> for ValueReadError {
fn from(err: MarkerReadError) -> ValueReadError {
ValueReadError::InvalidMarkerRead(From::from(err))
}
}
#[derive(Debug)]
pub enum DecodeStringError<'a> {
InvalidMarkerRead(ReadError),
InvalidDataRead(ReadError),
TypeMismatch(Marker),
/// The given buffer is not large enough to accumulate the specified amount of bytes.
BufferSizeTooSmall(u32),
InvalidDataCopy(&'a [u8], ReadError),
InvalidUtf8(&'a [u8], Utf8Error),
}
impl<'a> From<ValueReadError> for DecodeStringError<'a> {
fn from(err: ValueReadError) -> DecodeStringError<'a> {
match err {
ValueReadError::InvalidMarkerRead(err) => DecodeStringError::InvalidMarkerRead(err),
ValueReadError::InvalidDataRead(err) => DecodeStringError::InvalidDataRead(err),
ValueReadError::TypeMismatch(marker) => DecodeStringError::TypeMismatch(marker),
}
}
}
/// Attempts to read a single byte from the given reader and decodes it as a MessagePack marker.
fn read_marker<R>(rd: &mut R) -> Result<Marker, MarkerReadError>
where R: Read
{
match rd.read_u8() {
Ok(val) => Ok(Marker::from_u8(val)),
Err(err) => Err(From::from(err)),
}
}
/// Attempts to read a single byte from the given reader and to decode it as a nil value.
///
/// According to the MessagePack specification, a nil value is represented as a single `0xc0` byte.
///
/// # Errors
///
/// This function will return `FixedValueReadError` on any I/O error while reading the nil marker.
///
/// It also returns `FixedValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_nil<R>(rd: &mut R) -> Result<(), FixedValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::Null => Ok(()),
marker => Err(FixedValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read a single byte from the given reader and to decode it as a boolean value.
///
/// According to the MessagePack specification, an encoded boolean value is represented as a single
/// byte.
///
/// # Errors
///
/// This function will return `FixedValueReadError` on any I/O error while reading the bool marker.
///
/// It also returns `FixedValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_bool<R>(rd: &mut R) -> Result<bool, FixedValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::True => Ok(true),
Marker::False => Ok(false),
marker => Err(FixedValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read a single byte from the given reader and to decode it as a positive fixnum
/// value.
///
/// According to the MessagePack specification, a positive fixed integer value is represented using
/// a single byte in `[0x00; 0x7f]` range inclusively, prepended with a special marker mask.
///
/// # Errors
///
/// This function will return `FixedValueReadError` on any I/O error while reading the marker.
///
/// It also returns `FixedValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_pfix<R>(rd: &mut R) -> Result<u8, FixedValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::PositiveFixnum(val) => Ok(val),
marker => Err(FixedValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read a single byte from the given reader and to decode it as a negative fixnum
/// value.
///
/// According to the MessagePack specification, a negative fixed integer value is represented using
/// a single byte in `[0xe0; 0xff]` range inclusively, prepended with a special marker mask.
///
/// # Errors
///
/// This function will return `FixedValueReadError` on any I/O error while reading the marker.
///
/// It also returns `FixedValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_nfix<R>(rd: &mut R) -> Result<i8, FixedValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::NegativeFixnum(val) => Ok(val),
marker => Err(FixedValueReadError::TypeMismatch(marker)),
}
}
macro_rules! make_read_data_fn {
(deduce, $reader:ident, $decoder:ident, 0)
=> ($reader.$decoder(););
(deduce, $reader:ident, $decoder:ident, 1)
=> ($reader.$decoder::<byteorder::BigEndian>(););
(gen, $t:ty, $d:tt, $name:ident, $decoder:ident) => {
fn $name<R>(rd: &mut R) -> Result<$t, ValueReadError>
where R: Read
{
match make_read_data_fn!(deduce, rd, $decoder, $d) {
Ok(data) => Ok(data),
Err(err) => Err(ValueReadError::InvalidDataRead(From::from(err))),
}
}
};
(u8, $name:ident, $decoder:ident) => (make_read_data_fn!(gen, u8, 0, $name, $decoder););
(i8, $name:ident, $decoder:ident) => (make_read_data_fn!(gen, i8, 0, $name, $decoder););
($t:ty, $name:ident, $decoder:ident) => (make_read_data_fn!(gen, $t, 1, $name, $decoder););
}
make_read_data_fn!(u8, read_data_u8, read_u8);
make_read_data_fn!(u16, read_data_u16, read_u16);
make_read_data_fn!(u32, read_data_u32, read_u32);
make_read_data_fn!(u64, read_data_u64, read_u64);
make_read_data_fn!(i8, read_data_i8, read_i8);
make_read_data_fn!(i16, read_data_i16, read_i16);
make_read_data_fn!(i32, read_data_i32, read_i32);
make_read_data_fn!(i64, read_data_i64, read_i64);
make_read_data_fn!(f32, read_data_f32, read_f32);
make_read_data_fn!(f64, read_data_f64, read_f64);
/// Attempts to read exactly 2 bytes from the given reader and to decode them as `u8` value.
///
/// The first byte should be the marker and the second one should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u8<R>(rd: &mut R) -> Result<u8, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::U8 => Ok(try!(read_data_u8(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 3 bytes from the given reader and to decode them as `u16` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u16<R>(rd: &mut R) -> Result<u16, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::U16 => Ok(try!(read_data_u16(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 5 bytes from the given reader and to decode them as `u32` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u32<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::U32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 9 bytes from the given reader and to decode them as `u64` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u64<R>(rd: &mut R) -> Result<u64, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::U64 => Ok(try!(read_data_u64(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 2 bytes from the given reader and to decode them as `i8` value.
///
/// The first byte should be the marker and the second one should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i8<R>(rd: &mut R) -> Result<i8, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::I8 => Ok(try!(read_data_i8(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 3 bytes from the given reader and to decode them as `i16` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i16<R>(rd: &mut R) -> Result<i16, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::I16 => Ok(try!(read_data_i16(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 5 bytes from the given reader and to decode them as `i32` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i32<R>(rd: &mut R) -> Result<i32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::I32 => Ok(try!(read_data_i32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 9 bytes from the given reader and to decode them as `i64` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i64<R>(rd: &mut R) -> Result<i64, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::I64 => Ok(try!(read_data_i64(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 2 bytes from the given reader and to decode them as `u8` value.
///
/// Unlike the `read_u8`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode signed integers will result in `TypeMismatch` error even if the
/// value fits in `u8`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u8_loosely<R>(rd: &mut R) -> Result<u8, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::PositiveFixnum(val) => Ok(val),
Marker::U8 => Ok(try!(read_data_u8(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 3 bytes from the given reader and to decode them as `u16` value.
///
/// Unlike the `read_u16`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode signed integers will result in `TypeMismatch` error even if the
/// value fits in `u16`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u16_loosely<R>(rd: &mut R) -> Result<u16, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::PositiveFixnum(val) => Ok(val as u16),
Marker::U8 => Ok(try!(read_data_u8(rd)) as u16),
Marker::U16 => Ok(try!(read_data_u16(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 5 bytes from the given reader and to decode them as `u32` value.
///
/// Unlike the `read_u32`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode signed integers will result in `TypeMismatch` error even if the
/// value fits in `u32`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u32_loosely<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::PositiveFixnum(val) => Ok(val as u32),
Marker::U8 => Ok(try!(read_data_u8(rd)) as u32),
Marker::U16 => Ok(try!(read_data_u16(rd)) as u32),
Marker::U32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 9 bytes from the given reader and to decode them as `u64` value.
///
/// This function will try to read up to 9 bytes from the reader (1 for marker and up to 8 for data)
/// and interpret them as a big-endian u64.
///
/// Unlike the `read_u64`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode signed integers will result in `TypeMismatch` error even if the
/// value fits in `u64`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_u64_loosely<R>(rd: &mut R) -> Result<u64, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::PositiveFixnum(val) => Ok(val as u64),
Marker::U8 => Ok(try!(read_data_u8(rd)) as u64),
Marker::U16 => Ok(try!(read_data_u16(rd)) as u64),
Marker::U32 => Ok(try!(read_data_u32(rd)) as u64),
Marker::U64 => Ok(try!(read_data_u64(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 2 bytes from the given reader and to decode them as `i8` value.
///
/// Unlike the `read_i8`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode unsigned integers will result in `TypeMismatch` error even if the
/// value fits in `i8`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i8_loosely<R>(rd: &mut R) -> Result<i8, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::NegativeFixnum(val) => Ok(val),
Marker::I8 => Ok(try!(read_data_i8(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 3 bytes from the given reader and to decode them as `i16` value.
///
/// Unlike the `read_i16`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode unsigned integers will result in `TypeMismatch` error even if the
/// value fits in `i16`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i16_loosely<R>(rd: &mut R) -> Result<i16, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::NegativeFixnum(val) => Ok(val as i16),
Marker::I8 => Ok(try!(read_data_i8(rd)) as i16),
Marker::I16 => Ok(try!(read_data_i16(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 5 bytes from the given reader and to decode them as `i32` value.
///
/// Unlike the `read_i32`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode unsigned integers will result in `TypeMismatch` error even if the
/// value fits in `i32`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i32_loosely<R>(rd: &mut R) -> Result<i32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::NegativeFixnum(val) => Ok(val as i32),
Marker::I8 => Ok(try!(read_data_i8(rd)) as i32),
Marker::I16 => Ok(try!(read_data_i16(rd)) as i32),
Marker::I32 => Ok(try!(read_data_i32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read up to 9 bytes from the given reader and to decode them as `i64` value.
///
/// This function will try to read up to 9 bytes from the reader (1 for marker and up to 8 for data)
/// and interpret them as a big-endian i64.
///
/// Unlike the `read_i64`, this function weakens type restrictions, allowing you to safely decode
/// packed values even if you aren't sure about the actual type.
///
/// Note, that trying to decode signed integers will result in `TypeMismatch` error even if the
/// value fits in `i64`.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_i64_loosely<R>(rd: &mut R) -> Result<i64, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::NegativeFixnum(val) => Ok(val as i64),
Marker::I8 => Ok(try!(read_data_i8(rd)) as i64),
Marker::I16 => Ok(try!(read_data_i16(rd)) as i64),
Marker::I32 => Ok(try!(read_data_i32(rd)) as i64),
Marker::I64 => Ok(try!(read_data_i64(rd))),
marker => Err(ValueReadError::TypeMismatch(marker)),
}
}
/// Attempts to read exactly 5 bytes from the given reader and to decode them as `f32` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_f32<R>(rd: &mut R) -> Result<f32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::F32 => Ok(try!(read_data_f32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read exactly 9 bytes from the given reader and to decode them as `f64` value.
///
/// The first byte should be the marker and the others should represent the data itself.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_f64<R>(rd: &mut R) -> Result<f64, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::F64 => Ok(try!(read_data_f64(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read up to 9 bytes from the given reader and to decode them as a string `u32` size
/// value.
///
/// According to the MessagePack specification, the string format family stores an byte array in 1,
/// 2, 3, or 5 bytes of extra bytes in addition to the size of the byte array.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
///
/// It also returns `ValueReadError::TypeMismatch` if the actual type is not equal with the
/// expected one, indicating you with the actual type.
pub fn read_str_len<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixedString(size) => Ok(size as u32),
Marker::Str8 => Ok(try!(read_data_u8(rd)) as u32),
Marker::Str16 => Ok(try!(read_data_u16(rd)) as u32),
Marker::Str32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read a string data from the given reader and copy it to the buffer provided.
///
/// On success returns a borrowed string type, allowing to view the copyed bytes as properly utf-8
/// string.
/// According to the spec, the string's data must to be encoded using utf-8.
///
/// # Errors
///
/// Returns `Err` in the following cases:
///
/// - if any IO error (including unexpected EOF) occurs, while reading an `rd`.
/// - if the `out` buffer size is not large enough to keep all the data copyed.
/// - if the data is not utf-8, with a description as to why the provided data is not utf-8 and
/// with a size of bytes actually copyed to be able to get them from `out`.
///
/// # Examples
/// ```
/// use rmp::decode::read_str;
///
/// let buf = [0xaa, 0x6c, 0x65, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65];
/// let mut out = [0u8; 16];
///
/// assert_eq!("le message", read_str(&mut &buf[..], &mut &mut out[..]).unwrap());
/// ```
///
/// # Unstable
///
/// This function is **unstable**, because it needs review.
pub fn read_str<'r, R>(rd: &mut R, mut buf: &'r mut [u8]) -> Result<&'r str, DecodeStringError<'r>>
where R: Read
{
let len = try!(read_str_len(rd));
let ulen = len as usize;
if buf.len() < ulen {
return Err(DecodeStringError::BufferSizeTooSmall(len))
}
read_str_data(rd, len, &mut buf[0..ulen])
}
fn read_str_data<'r, R>(rd: &mut R, len: u32, buf: &'r mut[u8]) -> Result<&'r str, DecodeStringError<'r>>
where R: Read
{
debug_assert_eq!(len as usize, buf.len());
// We need cursor here, because in the common case we cannot guarantee, that copying will be
// performed in a single step.
let mut cur = Cursor::new(buf);
// Trying to copy exact `len` bytes.
match io::copy(&mut rd.take(len as u64), &mut cur) {
Ok(size) if size == len as u64 => {
// Release buffer owning from cursor.
let buf = cur.into_inner();
match from_utf8(buf) {
Ok(decoded) => Ok(decoded),
Err(err) => Err(DecodeStringError::InvalidUtf8(buf, err)),
}
}
Ok(size) => {
let buf = cur.into_inner();
Err(DecodeStringError::InvalidDataCopy(&buf[..size as usize], ReadError::UnexpectedEOF))
}
Err(err) => Err(DecodeStringError::InvalidDataRead(From::from(err))),
}
}
/// Attempts to read and decode a string value from the reader, returning a borrowed slice from it.
///
// TODO: it is better to return &str; may panic on len mismatch; extend documentation.
pub fn read_str_ref(rd: &[u8]) -> Result<&[u8], DecodeStringError> {
let mut cur = io::Cursor::new(rd);
let len = try!(read_str_len(&mut cur));
let start = cur.position() as usize;
Ok(&rd[start .. start + len as usize])
}
/// Attempts to read up to 5 bytes from the given reader and to decode them as a big-endian u32
/// array size.
///
/// Array format family stores a sequence of elements in 1, 3, or 5 bytes of extra bytes in addition
/// to the elements.
// TODO: Docs.
pub fn read_array_size<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixedArray(size) => Ok(size as u32),
Marker::Array16 => Ok(try!(read_data_u16(rd)) as u32),
Marker::Array32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read up to 5 bytes from the given reader and to decode them as a big-endian u32
/// map size.
///
/// Map format family stores a sequence of elements in 1, 3, or 5 bytes of extra bytes in addition
/// to the elements.
// TODO: Docs.
pub fn read_map_size<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixedMap(size) => Ok(size as u32),
Marker::Map16 => Ok(try!(read_data_u16(rd)) as u32),
Marker::Map32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
// TODO: Docs.
pub fn read_bin_len<R>(rd: &mut R) -> Result<u32, ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::Bin8 => Ok(try!(read_data_u8(rd)) as u32),
Marker::Bin16 => Ok(try!(read_data_u16(rd)) as u32),
Marker::Bin32 => Ok(try!(read_data_u32(rd))),
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
// TODO: Docs; not sure about naming.
pub fn read_bin_borrow(rd: &[u8]) -> Result<&[u8], ValueReadError> {
let mut cur = io::Cursor::new(rd);
let len = try!(read_bin_len(&mut cur)) as usize;
let pos = cur.position() as usize;
if rd.len() < pos + len {
Err(ValueReadError::InvalidDataRead(ReadError::UnexpectedEOF))
} else {
Ok(&rd[pos .. pos + len])
}
}
/// Attempts to read exactly 3 bytes from the given reader and interpret them as a fixext1 type
/// with data attached.
///
/// According to the MessagePack specification, a fixext1 stores an integer and a byte array whose
/// length is 1 byte. Its marker byte is `0xd4`.
///
/// Note, that this function copies a byte array from the reader to the output `u8` variable.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
pub fn read_fixext1<R>(rd: &mut R) -> Result<(i8, u8), ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixExt1 => {
let ty = try!(read_data_i8(rd));
let data = try!(read_data_u8(rd));
Ok((ty, data))
}
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read exactly 4 bytes from the given reader and interpret them as a fixext2 type
/// with data attached.
///
/// According to the MessagePack specification, a fixext2 stores an integer and a byte array whose
/// length is 2 bytes. Its marker byte is `0xd5`.
///
/// Note, that this function copies a byte array from the reader to the output buffer, which is
/// unlikely if you want zero-copy functionality.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
pub fn read_fixext2<R>(rd: &mut R) -> Result<(i8, [u8; 2]), ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixExt2 => {
let mut buf = [0; 2];
read_fixext_data(rd, &mut buf).map(|ty| (ty, buf))
}
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read exactly 6 bytes from the given reader and interpret them as a fixext4 type
/// with data attached.
///
/// According to the MessagePack specification, a fixext4 stores an integer and a byte array whose
/// length is 4 bytes. Its marker byte is `0xd6`.
///
/// Note, that this function copies a byte array from the reader to the output buffer, which is
/// unlikely if you want zero-copy functionality.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
pub fn read_fixext4<R>(rd: &mut R) -> Result<(i8, [u8; 4]), ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixExt4 => {
let mut buf = [0; 4];
read_fixext_data(rd, &mut buf).map(|ty| (ty, buf))
}
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read exactly 10 bytes from the given reader and interpret them as a fixext8 type
/// with data attached.
///
/// According to the MessagePack specification, a fixext8 stores an integer and a byte array whose
/// length is 8 bytes. Its marker byte is `0xd7`.
///
/// Note, that this function copies a byte array from the reader to the output buffer, which is
/// unlikely if you want zero-copy functionality.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
pub fn read_fixext8<R>(rd: &mut R) -> Result<(i8, [u8; 8]), ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixExt8 => {
let mut buf = [0; 8];
read_fixext_data(rd, &mut buf).map(|ty| (ty, buf))
}
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
/// Attempts to read exactly 18 bytes from the given reader and interpret them as a fixext16 type
/// with data attached.
///
/// According to the MessagePack specification, a fixext16 stores an integer and a byte array whose
/// length is 16 bytes. Its marker byte is `0xd8`.
///
/// Note, that this function copies a byte array from the reader to the output buffer, which is
/// unlikely if you want zero-copy functionality.
///
/// # Errors
///
/// This function will return `ValueReadError` on any I/O error while reading either the marker or
/// the data.
pub fn read_fixext16<R>(rd: &mut R) -> Result<(i8, [u8; 16]), ValueReadError>
where R: Read
{
match try!(read_marker(rd)) {
Marker::FixExt16 => {
let mut buf = [0; 16];
read_fixext_data(rd, &mut buf).map(|ty| (ty, buf))
}
marker => Err(ValueReadError::TypeMismatch(marker))
}
}
fn read_fixext_data<R>(rd: &mut R, buf: &mut [u8]) -> Result<i8, ValueReadError>
where R: Read
{
let id = try!(read_data_i8(rd));
match read_full(rd, buf) {
Ok(()) => Ok(id),
Err(err) => Err(ValueReadError::InvalidDataRead(err)),
}
}
fn read_full<R: Read>(rd: &mut R, buf: &mut [u8]) -> Result<(), ReadError> {
let mut nread = 0usize;
while nread < buf.len() {
match rd.read(&mut buf[nread..]) {
Ok(0) => return Err(ReadError::UnexpectedEOF),
Ok(n) => nread += n,
Err(ref err) if err.kind() == io::ErrorKind::Interrupted => {},
Err(err) => return Err(From::from(err))
}
}
Ok(())
}
#[derive(Debug, PartialEq)]
pub struct ExtMeta {
pub typeid: i8,
pub size: u32,
}
/// Unstable: docs, errors
pub fn read_ext_meta<R>(rd: &mut R) -> Result<ExtMeta, ValueReadError>
where R: Read
{
let size = match try!(read_marker(rd)) {
Marker::FixExt1 => 1,
Marker::FixExt2 => 2,
Marker::FixExt4 => 4,
Marker::FixExt8 => 8,
Marker::FixExt16 => 16,
Marker::Ext8 => try!(read_data_u8(rd)) as u32,
Marker::Ext16 => try!(read_data_u16(rd)) as u32,
Marker::Ext32 => try!(read_data_u32(rd)),
marker => return Err(ValueReadError::TypeMismatch(marker))
};
let typeid = try!(read_data_i8(rd));
let meta = ExtMeta { typeid: typeid, size: size };
Ok(meta)
}
/// Contains: owned value decoding, owned error; owned result.
// TODO: docs.
mod value {
//use std::convert::From;
use std::io::Read;
use std::result::Result;
use super::super::Marker;
pub use super::super::value::{Integer, Value};
use super::{
ReadError,
MarkerReadError,
ValueReadError,
read_marker,
read_data_u8,
read_data_u16,
read_data_u32,
read_data_u64,
read_data_i8,
read_data_i16,
read_data_i32,
read_data_i64,
};
#[derive(Debug)]
pub enum Error<'r> {
InvalidMarkerRead(ReadError),
InvalidDataRead(ReadError),
InvalidArrayRead(&'r Error<'r>),
}
impl<'r> From<MarkerReadError> for Error<'r> {
fn from(err: MarkerReadError) -> Error<'r> {
Error::InvalidMarkerRead(From::from(err))
}
}
impl<'r> From<ValueReadError> for Error<'r> {
fn from(err: ValueReadError) -> Error<'r> {
match err {
ValueReadError::InvalidMarkerRead(err) => Error::InvalidMarkerRead(err),
ValueReadError::InvalidDataRead(err) => Error::InvalidDataRead(err),
ValueReadError::TypeMismatch(..) => unimplemented!()
}
}
}
// TODO: docs; examples; incomplete.
pub fn read_value<R>(rd: &mut R) -> Result<Value, Error>
where R: Read
{
let val = match try!(read_marker(rd)) {
Marker::Null => Value::Nil,
Marker::True => Value::Boolean(true),
Marker::False => Value::Boolean(false),
Marker::PositiveFixnum(val) => Value::Integer(Integer::U64(val as u64)),
Marker::NegativeFixnum(val) => Value::Integer(Integer::I64(val as i64)),
Marker::U8 => Value::Integer(Integer::U64(try!(read_data_u8(rd)) as u64)),
Marker::U16 => Value::Integer(Integer::U64(try!(read_data_u16(rd)) as u64)),
Marker::U32 => Value::Integer(Integer::U64(try!(read_data_u32(rd)) as u64)),
Marker::U64 => Value::Integer(Integer::U64(try!(read_data_u64(rd)))),
Marker::I8 => Value::Integer(Integer::I64(try!(read_data_i8(rd)) as i64)),
Marker::I16 => Value::Integer(Integer::I64(try!(read_data_i16(rd)) as i64)),
Marker::I32 => Value::Integer(Integer::I64(try!(read_data_i32(rd)) as i64)),
Marker::I64 => Value::Integer(Integer::I64(try!(read_data_i64(rd)))),
// // TODO: Other integers.
// // TODO: Floats.
// Marker::Str8 => {
// let len = try!(read_data_u8(rd)) as u64;
// let mut buf: Vec<u8> = (0..len).map(|_| 0u8).collect();
// Ok(Value::String(try!(read_str_data(rd, len as u32, &mut buf[..])).to_string()))
// }
// // TODO: Other strings.
// Marker::FixedArray(len) => {
// let mut vec = Vec::with_capacity(len as usize);
// for _ in 0..len {
// vec.push(try!(read_value(rd)));
// }
// Ok(Value::Array(vec))
// }
// // TODO: Map/Bin/Ext.
_ => unimplemented!()
};
Ok(val)
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use super::*;
#[test]
fn from_null_decode_value() {
let buf = [0xc0, 0x00];
let mut cur = Cursor::new(&buf[..]);
assert_eq!(Value::Nil, read_value(&mut cur).unwrap());
assert_eq!(1, cur.position());
}
#[test]
fn from_pfix_decode_value() {
let buf: &[u8] = &[0x1f];
let mut cur = Cursor::new(buf);
assert_eq!(Value::Integer(Integer::U64(31)), read_value(&mut cur).unwrap());
assert_eq!(1, cur.position());
}
#[test]
fn from_i32_decode_value() {
let buf: &[u8] = &[0xd2, 0xff, 0xff, 0xff, 0xff];
let mut cur = Cursor::new(buf);
assert_eq!(Value::Integer(Integer::I64(-1)), read_value(&mut cur).unwrap());
assert_eq!(5, cur.position());
}
} // mod tests
} // mod value
pub mod serialize {
use std::convert::From;
use std::io::Read;
use std::result;
use serialize;
use super::super::Marker;
use super::{
ReadError,
FixedValueReadError,
ValueReadError,
DecodeStringError,
read_nil,
read_bool,
read_u8_loosely,
read_u16_loosely,
read_u32_loosely,
read_u64_loosely,
read_i8_loosely,
read_i16_loosely,
read_i32_loosely,
read_i64_loosely,
read_f32,
read_f64,
read_str_len,
read_str_data,
read_array_size,
read_map_size,
};
/// Unstable: docs; incomplete
#[derive(Debug)]
pub enum Error {
/// The actual value type isn't equal with the expected one.
TypeMismatch(Marker),
InvalidMarkerRead(ReadError),
InvalidDataRead(ReadError),
LengthMismatch(u32),
/// Uncategorized error.
Uncategorized(String),
}
impl From<FixedValueReadError> for Error {
fn from(err: FixedValueReadError) -> Error {
match err {
FixedValueReadError::UnexpectedEOF => Error::InvalidMarkerRead(ReadError::UnexpectedEOF),
FixedValueReadError::Io(err) => Error::InvalidMarkerRead(ReadError::Io(err)),
FixedValueReadError::TypeMismatch(marker) => Error::TypeMismatch(marker),
}
}
}
impl From<ValueReadError> for Error {
fn from(err: ValueReadError) -> Error {
match err {
ValueReadError::TypeMismatch(marker) => Error::TypeMismatch(marker),
ValueReadError::InvalidMarkerRead(err) => Error::InvalidMarkerRead(err),
ValueReadError::InvalidDataRead(err) => Error::InvalidDataRead(err),
}
}
}
/// Unstable: docs; incomplete
impl<'a> From<DecodeStringError<'a>> for Error {
fn from(err: DecodeStringError) -> Error {
match err {
DecodeStringError::InvalidMarkerRead(err) => Error::InvalidMarkerRead(err),
DecodeStringError::InvalidDataRead(..) => Error::Uncategorized("InvalidDataRead".to_string()),
DecodeStringError::TypeMismatch(..) => Error::Uncategorized("TypeMismatch".to_string()),
DecodeStringError::BufferSizeTooSmall(..) => Error::Uncategorized("BufferSizeTooSmall".to_string()),
DecodeStringError::InvalidDataCopy(..) => Error::Uncategorized("InvalidDataCopy".to_string()),
DecodeStringError::InvalidUtf8(..) => Error::Uncategorized("InvalidUtf8".to_string()),
}
}
}
pub type Result<T> = result::Result<T, Error>;
pub struct Decoder<R: Read> {
rd: R,
}
impl<R: Read> Decoder<R> {
pub fn new(rd: R) -> Decoder<R> {
Decoder {
rd: rd
}
}
}
/// Unstable: docs; examples; incomplete
impl<R: Read> serialize::Decoder for Decoder<R> {
type Error = Error;
fn read_nil(&mut self) -> Result<()> {
Ok(try!(read_nil(&mut self.rd)))
}
fn read_bool(&mut self) -> Result<bool> {
Ok(try!(read_bool(&mut self.rd)))
}
fn read_u8(&mut self) -> Result<u8> {
Ok(try!(read_u8_loosely(&mut self.rd)))
}
fn read_u16(&mut self) -> Result<u16> {
Ok(try!(read_u16_loosely(&mut self.rd)))
}
fn read_u32(&mut self) -> Result<u32> {
Ok(try!(read_u32_loosely(&mut self.rd)))
}
fn read_u64(&mut self) -> Result<u64> {
Ok(try!(read_u64_loosely(&mut self.rd)))
}
/// TODO: Doesn't look safe.
fn read_usize(&mut self) -> Result<usize> {
let v = try!(self.read_u64());
Ok(v as usize)
}
fn read_i8(&mut self) -> Result<i8> {
Ok(try!(read_i8_loosely(&mut self.rd)))
}
fn read_i16(&mut self) -> Result<i16> {
Ok(try!(read_i16_loosely(&mut self.rd)))
}
fn read_i32(&mut self) -> Result<i32> {
Ok(try!(read_i32_loosely(&mut self.rd)))
}
fn read_i64(&mut self) -> Result<i64> {
Ok(try!(read_i64_loosely(&mut self.rd)))
}
/// TODO: Doesn't look safe.
fn read_isize(&mut self) -> Result<isize> {
Ok(try!(self.read_i64()) as isize)
}
fn read_f32(&mut self) -> Result<f32> {
Ok(try!(read_f32(&mut self.rd)))
}
fn read_f64(&mut self) -> Result<f64> {
Ok(try!(read_f64(&mut self.rd)))
}
fn read_char(&mut self) -> Result<char> {
let mut res = try!(self.read_str());
if res.len() == 1 {
Ok(res.pop().unwrap())
} else {
Err(self.error("length mismatch"))
}
}
fn read_str(&mut self) -> Result<String> {
let len = try!(read_str_len(&mut self.rd));
let mut buf: Vec<u8> = (0..len).map(|_| 0u8).collect();
Ok(try!(read_str_data(&mut self.rd, len, &mut buf[..])).to_string())
}
fn read_enum<T, F>(&mut self, _name: &str, _f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T> { unimplemented!() }
fn read_enum_variant<T, F>(&mut self, _names: &[&str], _f: F) -> Result<T>
where F: FnMut(&mut Self, usize) -> Result<T> { unimplemented!() }
fn read_enum_variant_arg<T, F>(&mut self, _idx: usize, _f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T> { unimplemented!() }
fn read_enum_struct_variant<T, F>(&mut self, _names: &[&str], _f: F) -> Result<T>
where F: FnMut(&mut Self, usize) -> Result<T> { unimplemented!() }
fn read_enum_struct_variant_field<T, F>(&mut self, _name: &str, _idx: usize, _f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T> { unimplemented!() }
fn read_struct<T, F>(&mut self, _name: &str, len: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
self.read_tuple(len, f)
}
fn read_struct_field<T, F>(&mut self, _name: &str, _idx: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
f(self)
}
fn read_tuple<T, F>(&mut self, len: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
let actual = try!(read_array_size(&mut self.rd));
if len == actual as usize {
f(self)
} else {
Err(Error::LengthMismatch(actual))
}
}
// In case of MessagePack don't care about argument indexing.
fn read_tuple_arg<T, F>(&mut self, _idx: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
f(self)
}
fn read_tuple_struct<T, F>(&mut self, _name: &str, _len: usize, _f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T> { unimplemented!() }
fn read_tuple_struct_arg<T, F>(&mut self, _idx: usize, _f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T> { unimplemented!() }
/// We treat Value::Null as None.
fn read_option<T, F>(&mut self, mut f: F) -> Result<T>
where F: FnMut(&mut Self, bool) -> Result<T>
{
// Primarily try to read optimisticly.
match f(self, true) {
Ok(val) => Ok(val),
Err(Error::TypeMismatch(Marker::Null)) => f(self, false),
Err(err) => Err(err)
}
}
fn read_seq<T, F>(&mut self, f: F) -> Result<T>
where F: FnOnce(&mut Self, usize) -> Result<T>
{
let len = try!(read_array_size(&mut self.rd)) as usize;
f(self, len)
}
fn read_seq_elt<T, F>(&mut self, _idx: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
f(self)
}
fn read_map<T, F>(&mut self, f: F) -> Result<T>
where F: FnOnce(&mut Self, usize) -> Result<T>
{
let len = try!(read_map_size(&mut self.rd)) as usize;
f(self, len)
}
fn read_map_elt_key<T, F>(&mut self, _idx: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
f(self)
}
fn read_map_elt_val<T, F>(&mut self, _idx: usize, f: F) -> Result<T>
where F: FnOnce(&mut Self) -> Result<T>
{
f(self)
}
fn error(&mut self, err: &str) -> Error {
Error::Uncategorized(err.to_string())
}
}
}
|
use alloc::raw_vec::RawVec;
use num::{self, PrimInt};
use std::cmp;
use std::ops::*;
use std::fmt::{self, Debug, Display};
use std::mem;
use std::ptr;
use std::marker::PhantomData;
pub trait Nbits {
fn bits() -> usize;
#[inline]
fn mask() -> usize {
(0..).take(Self::bits()).fold(0, |mask, _x| mask << 1 | 1)
}
}
pub struct NbitsVec<T: Nbits, B = usize> {
buf: RawVec<B>,
len: usize,
_marker: PhantomData<T>,
}
impl<
T: Nbits,
B: PrimInt,
> Default for NbitsVec<T, B> {
fn default() -> Self {
NbitsVec {
buf: RawVec::new(),
len: 0,
_marker: PhantomData,
}
}
}
impl<T: Nbits, B: PrimInt + fmt::LowerHex> Debug for NbitsVec<T, B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f,
"NbitsVec<{}> {{ len: {}, buf: RawVec {{ cap: {}, [",
T::bits(),
self.len,
self.buf.cap()));
let ptr = self.buf.ptr();
for i in 0..self.buf.cap() {
unsafe {
try!(write!(f, "{:#x}, ", ptr::read(ptr.offset(i as isize))));
}
}
write!(f, "] }}")
}
}
impl<
T: Nbits,
B: PrimInt
> NbitsVec<T, B> {
/// Constructs a new, empty NbitsVec<T>
///
/// The vector will not allocate until elements are pushed onto it.
///
/// # Examples
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::new();
/// # }
/// ```
#[inline]
pub fn new() -> Self {
NbitsVec {
buf: RawVec::new(),
len: 0,
_marker: PhantomData,
}
}
/// Constructs a new, empty Vec<T> with the specified capacity.
///
/// The vector will be able to hold exactly capacity elements without reallocating. If capacity
/// is 0, the vector will not allocate.
///
/// It is important to note that this function does not specify the length of the returned
/// vector, but only the capacity.
///
/// # Examples
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// assert!(vec.capacity() >= 10);
/// # }
/// ```
pub fn with_capacity(capacity: usize) -> Self {
NbitsVec {
buf: RawVec::with_capacity(Self::capacity_to_buf(capacity)),
len: 0,
_marker: PhantomData,
}
}
pub unsafe fn from_raw_parts(ptr: *mut B, length: usize, capacity: usize) -> Self {
NbitsVec {
buf: RawVec::from_raw_parts(ptr, Self::capacity_to_buf(capacity)),
len: length,
_marker: PhantomData,
}
}
/// Returns the number of elements the vector can hold without reallocating.
///
/// # Examples
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::{NbitsVec, As1bits};
/// # fn main() {
/// let v: NbitsVec<As1bits> = NbitsVec::with_capacity(10);
/// assert!(v.capacity() >= 10);
/// assert_eq!(v.capacity(), std::mem::size_of::<usize>() * 8);
/// # }
/// ```
#[inline(always)]
pub fn capacity(&self) -> usize {
Self::capacity_from_buf(self.buf.cap())
}
/// Reserves capacity for at least additional more elements to be inserted in the given
/// NbitsVec<T>.
/// The collection may reserve more space to avoid frequent reallocations.
///
/// # Panics
///
/// Panics if the new capacity overflows usize.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut v: NbitsVec<As2bits> = NbitsVec::new();
/// assert!(v.capacity() == 0);
/// v.reserve(100);
/// assert!(v.capacity() >= 100);
/// # }
/// ```
pub fn reserve(&mut self, additional: usize) {
let required_cap = self.len().checked_add(additional).expect("capacity overflow");
let used_cap = Self::capacity_to_buf(self.len());
let need_extra_cap = Self::capacity_to_buf(required_cap);
self.buf.reserve(used_cap, need_extra_cap);
}
/// Reserves the minimum capacity for exactly additional more elements to be inserted in the
/// given `NbitsVec<T>`. Does nothing if the capacity is already sufficient.
///
/// # Panics
///
/// Panics if the new capacity overflows usize.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// use bits_vec::*;
/// # fn main() {
/// let mut v: NbitsVec<As2bits> = NbitsVec::new();
/// assert!(v.capacity() == 0);
/// v.reserve_exact(64);
/// assert_eq!(v.capacity(), 64);
/// v.reserve_exact(127);
/// assert!(v.capacity() >= 127);
/// v.reserve_exact(128);
/// assert_eq!(v.capacity(), 128);
/// # }
/// ```
pub fn reserve_exact(&mut self, additional: usize) {
let required_cap = self.len().checked_add(additional).expect("capacity overflow");
let used_cap = Self::capacity_to_buf(self.len());
let need_extra_cap = Self::capacity_to_buf(required_cap);
self.buf.reserve_exact(used_cap, need_extra_cap);
}
/// Shrinks the capacity of the vector as much as possible.
///
/// It will drop down as close as possible to the length but the allocator may still inform the
/// vector that there is space for a few more elements.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// vec.shrink_to_fit();
/// assert_eq!(vec.capacity(), 0);
/// # }
/// ```
///
pub fn shrink_to_fit(&mut self) {
let fit_len = Self::capacity_to_buf(self.len());
self.buf.shrink_to_fit(fit_len);
}
/// Expands the length of the vector as much as possible with current capacity.
///
/// Be sure not to use the method if the capacity is not setted by yourself - means you didn't
/// expect the capacity so as the length.
pub fn expand_to_fit(&mut self) {
let fit_len = Self::capacity_to_buf(self.len());
unimplemented!();
}
pub fn into_boxed_slice(self) -> Box<[T]> {
unimplemented!();
}
/// Shorten a vector to be `len` elements long, dropping excess elements.
///
/// If `len` is greater than the vector's current length, this has no effect.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(2);
/// unsafe { vec.set_len(2) }
/// vec.truncate(3);
/// assert_eq!(vec.len(), 2);
/// vec.truncate(1);
/// assert_eq!(vec.len(), 1);
/// # }
/// ```
pub fn truncate(&mut self, len: usize) {
if self.len() > len {
self.len = len;
self.shrink_to_fit();
}
}
pub fn as_slice(&self) -> &[T] {
unimplemented!();
}
pub fn as_mut_slice(&mut self) -> &mut [T] {
unimplemented!();
}
/// Sets the length of a vector.
///
/// This will explicitly set the size of the vector, without actually modifying its buffers or
/// reserving additional capacity as needed, so it is up to the caller to ensure that the vector
/// is actually the specified size.
///
/// Recommend to use [resize()](#method.resize) when you actually want to `resize` the vector.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut v: NbitsVec<As2bits> = NbitsVec::new();
/// unsafe {
/// v.set_len(3);
/// }
/// assert_eq!(v.len(), 3);
/// assert_eq!(v.capacity(), 0); // as documented, the capacity will not change
/// unsafe {
/// v.set_len(1)
/// }
/// assert_eq!(v.len(), 1);
/// # }
/// ```
#[inline]
pub unsafe fn set_len(&mut self, len: usize) {
self.len = len;
}
pub fn swap_remove(&mut self, index: usize) -> T {
unimplemented!();
}
pub fn insert(&mut self, index: usize, element: T) {
unimplemented!();
}
pub fn remove(&mut self, index: usize) {
unimplemented!();
}
pub fn retain<F>(&mut self, f: F)
where F: FnMut(&T) -> bool
{
unimplemented!();
}
pub fn push(&mut self, value: T) {
unimplemented!();
}
pub fn pop(&mut self) -> Option<T> {
unimplemented!();
}
pub fn append(&mut self, other: &mut NbitsVec<T>) {
unimplemented!();
}
#[inline]
pub fn clear(&mut self) {
self.len = 0;
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
/// Returns the number of bits in current length.
///
/// It is related to the element numbers - not the capacity.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// assert_eq!(vec.bits(), 0);
/// # }
/// ```
#[inline]
pub fn bits(&self) -> usize {
self.len() * Self::unit_bits()
}
/// Total bits in buf.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// assert_eq!(vec.buf_bits(), std::mem::size_of::<usize>() * 8);
/// # }
/// ```
pub fn buf_bits(&self) -> usize {
self.buf.cap() * Self::buf_unit_bits()
}
/// Returns whether or not the vector is empty.
///
/// Alias to `len() == 0`.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// assert!(vec.is_empty());
/// # }
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn split_off(&mut self, at: usize) -> Self {
unimplemented!();
}
pub fn push_all(&mut self, other: &[T]) {
unimplemented!();
}
// And any lost functions from `dedup` to the end.
pub fn get_mut(&self, index: usize) {
unimplemented!();
}
/// Resizes the Vec in-place so that len() is equal to new_len.
///
/// If new_len is greater than len(), the Vec is extended by the difference, with each
/// additional slot filled with value. If new_len is less than len(), the Vec is simply
/// truncated. Note that `resize` expand memeory will use `reserve_exact` method to
/// fit size.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::new();
/// vec.resize(10, 0);
/// assert_eq!(vec.capacity(), std::mem::size_of::<usize>() * 8 / 2);
/// # }
/// ```
#[inline]
pub fn resize(&mut self, new_len: usize, value: B) {
let len = self.len();
if len < new_len {
let n = new_len - len;
self.reserve_exact(n);
unsafe {
self.fill_buf(len, n, value);
self.len = new_len;
}
} else {
self.truncate(new_len);
}
}
/// ## Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits, u8> = NbitsVec::new();
/// vec.resize(24, 0);
/// unsafe {
/// vec.fill_buf(0, 12, 1);
/// vec.fill_buf(12, 12, 2);
/// }
/// println!("{:?}", vec);
/// // Left align will reduce the length.
/// vec.align(1, 0);
/// assert_eq!(vec.len(), 23);
/// assert!((0..).take(11).all(|x| vec.get(x) == 1));
/// assert!((11..).take(12).all(|x| vec.get(x) == 2));
///
/// vec.align(11, 3);
/// assert_eq!(vec.len(), 23 - 8);
/// assert!((0..).take(3).all(|x| vec.get(x) == 1));
/// assert!((3..vec.len()).all(|x| vec.get(x) == 2));
/// // Right align will expand the length.
/// vec.align(6, 7);
/// assert_eq!(vec.len(), 23 - 8 + 1);
/// assert!((6..7).all(|x| vec.get(x) == 0));
/// assert!((7..vec.len()).all(|x| vec.get(x) == 2));
///
/// vec.align(13, 33);
/// assert_eq!(vec.len(), 23 - 8 + 1 + 33 - 13);
/// assert!((13..33).all(|x| vec.get(x) == 0));
/// assert!((33..vec.len()).all(|x| vec.get(x) == 2));
/// println!("{:?}", vec);
/// # }
/// ```
pub fn align(&mut self, offset: usize, to: usize) {
let unit = Self::unit_bits();
let buf_unit = Self::buf_unit_bits();
let unit_cap = buf_unit / unit;
if offset > to {
// Reduce `interval` length.
let interval = offset - to;
// e.g. N = 2, B = u8, interval = 4
if buf_unit % unit == 0 && interval % unit_cap == 0 {
// Copy previous offset * unit % buf_unit values.
let extra = offset % unit_cap;
let (offset, to) = (0..extra).fold((offset, to), |(offset, to), _i| {
let value = self.get(offset);
self.set(to, value);
(offset + 1, to + 1)
});
unsafe {
let ptr = self.buf.ptr();
let src = offset / unit_cap;
let dst = to / unit_cap;
let count = self.len() / unit_cap - src + 1;
ptr::copy(ptr.offset(src as isize), ptr.offset(dst as isize), count);
}
} else {
for offset in offset..self.len() {
let value = self.get(offset);
self.set(offset - interval, value);
}
}
self.len = self.len - interval;
} else {
// Expand with `interval` length values.
let interval = to - offset;
let len = self.len();
self.reserve_exact(interval);
if buf_unit % unit == 0 && interval % unit_cap == 0 {
unsafe {
let ptr = self.buf.ptr();
let src = offset / unit_cap;
let dst = to / unit_cap;
let count = len / unit_cap - src + 1;
ptr::copy(ptr.offset(src as isize), ptr.offset(dst as isize), count);
self.fill_buf(offset, interval, B::zero());
self.len = self.len() + interval;
}
} else {
self.len = len + interval;
for offset in (offset..len).rev() {
let value = self.get(offset);
self.set(offset + interval, value);
}
unsafe {
self.fill_buf(offset, interval, B::zero());
}
}
}
}
/// Fill vector buf as `value` from `index` with size `length`.
///
/// ## Unsafety
///
/// The method doesnot check the index validation of the vector.
///
/// ## Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits, u8> = NbitsVec::new();
/// vec.resize(24, 0);
/// println!("{:?}", vec);
/// unsafe {
/// vec.fill_buf(1, 2, 2); // length < buf_unit
/// assert!((1..).take(2).all(|x| vec.get(x) == 2));
/// vec.fill_buf(0, 8, 1); // offset: 0, 0
/// assert!((0..).take(8).all(|x| vec.get(x) == 1));
/// vec.fill_buf(7, 10, 2); // offset: n, n
/// assert!((7..).take(10).all(|x| vec.get(x) == 2));
/// vec.fill_buf(8, 11, 1); // offset: 0,n
/// assert!((8..).take(11).all(|x| vec.get(x) == 1));
/// }
/// # }
/// ```
#[inline]
pub unsafe fn fill_buf(&mut self, index: usize, length: usize, value: B) {
let unit = Self::unit_bits();
if length == 1 {
return self.set_buf_bits(index * unit, unit, value);
}
let buf_unit = Self::buf_unit_bits();
if (length <= buf_unit / unit) || buf_unit % unit != 0 {
println!("length is short");
for i in (index..).take(length) {
self.set_buf_bits(i * unit, unit, value);
}
}
let mul = buf_unit / unit;
let item = (0..mul).fold(B::zero(), |v, _x| v << unit | value);
let ptr = self.buf.ptr();
let write_buf = |start: usize, end: usize| {
(start..end).fold(ptr.offset(start as isize), |ptr, _x| {
ptr::write(ptr, item);
ptr.offset(1)
});
};
match Self::index_range_to_buf(index, length) {
((start_idx, start_offset), (end_idx, end_offset)) if start_offset == 0 &&
end_offset == 0 => {
write_buf(start_idx, end_idx)
}
((start_idx, start_offset), (end_idx, end_offset)) if start_offset == 0 => {
write_buf(start_idx, end_idx);
self.set_buf_unit_bits(end_idx * buf_unit, end_offset, item);
}
((start_idx, start_offset), (end_idx, end_offset)) if end_offset == 0 => {
self.set_buf_unit_bits(index * unit, buf_unit - start_offset, item);
write_buf(start_idx + 1, end_idx);
}
((start_idx, start_offset), (end_idx, end_offset)) => {
self.set_buf_unit_bits(index * unit, buf_unit - start_offset, item);
self.set_buf_unit_bits(end_idx * buf_unit, end_offset, item);
write_buf(start_idx + 1, end_idx);
}
}
}
/// ## Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// unsafe { vec.set_len(2) }
/// vec.set(0, 0b11);
/// # }
/// ```
#[inline]
pub fn set(&mut self, index: usize, value: B) {
if index >= self.len {
panic!("attempt to set at {} but only {}", index, self.len);
}
unsafe {
let unit = Self::unit_bits();
self.set_buf_bits(index * unit, unit, value);
}
}
/// Set `bit` at `index`.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// vec.reserve(10);
/// unsafe { vec.set_len(7) };
/// vec.set_bit(0, true);
/// # }
/// ```
///
#[inline]
pub fn set_bit(&mut self, index: usize, bit: bool) {
let bits = self.bits();
if index >= bits {
panic!("attempt to set bit out of bounds");
}
unsafe {
self.set_buf_unit_bit(index, bit);
}
}
/// Get `bit` at some bit index.
///
/// Returns `None` if required index is out of bounds, else return `bool` for bit value.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// vec.reserve(10);
/// assert!(vec.get_bit(0).is_none());
/// vec.resize(10, 0);
/// println!("{:?}", vec);
/// for i in 0..8 {
/// vec.set_bit(i, true);
/// println!("Set at {} as true", i);
/// println!("{:?}", vec);
/// assert_eq!(vec.get_bit(i), Some(true));
/// }
/// for i in 0..8 {
/// vec.set_bit(i, false);
/// assert_eq!(vec.get_bit(i), Some(false));
/// }
/// # }
/// ```
#[inline]
pub fn get_bit(&self, at: usize) -> Option<bool> {
let bits = self.bits();
if at >= bits {
return None;
} else {
unsafe { Some(self.get_buf_unit_bit(at) == B::one()) }
}
}
/// Set `length` bits of buf at `offset`th bit as `value`.
///
/// ## Unsafety
///
/// `set_buf_bits` will not check the `offset`. Users should ensure to do this manually.
///
/// ## Panics
///
/// This method should panic while required `length` is longer than the buf unit bits size.
///
/// ## Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
///
/// unsafe {
/// println!("Set buf 0 as 1");
/// vec.set_buf_bits(0, 1, 1);
/// println!("Set buf bits [1, 2] as `10`");
/// vec.set_buf_bits(1, 2, 2);
/// println!("Set buf bits [3, 6] as `1010`");
/// vec.set_buf_bits(3, 4, 0b1010);
/// }
/// println!("{:?}", vec);
/// unsafe {
/// assert_eq!(vec.get_buf_bits(0, 1), 1);
/// assert_eq!(vec.get_buf_bits(1, 2), 2);
/// assert_eq!(vec.get_buf_bits(3, 4), 0b1010);
/// }
/// # }
/// ```
#[inline]
pub unsafe fn set_buf_bits(&mut self, offset: usize, length: usize, value: B) {
let buf_unit = Self::buf_unit_bits();
if length > buf_unit {
panic!("set {} buf bits longer than buf unit bits {}",
length,
buf_unit);
}
if length == 1 {
return self.set_buf_unit_bit(offset, value & B::one() == B::one());
}
match Self::unit_bits() {
unit if unit == buf_unit => {
// NOTE: maybe unreachable!() is better.
self.set_buf_unit_bits(offset, length, value);
}
unit if unit < buf_unit && buf_unit % unit == 0 => {
self.set_buf_unit_bits(offset, length, value);
}
_ => {
let mut v = value;
for x in offset..cmp::min(offset + length, self.buf_bits()) {
self.set_buf_unit_bit(x, v & B::one() == B::one());
v = v >> 1;
}
}
}
}
/// Mask buf element of `index` at offset `(from, to)` as zero.
#[inline]
unsafe fn zero_buf_unit_bits(&mut self, offset: usize, length: usize) {
self.set_buf_unit_bits(offset, length, B::zero());
}
/// Set buf element of `index` at offset `from` to `to` as `value`.
#[inline]
unsafe fn set_buf_unit_bits(&mut self, offset: usize, length: usize, value: B) {
let (index, offset) = Self::bit_index_to_buf(offset);
let mask = (offset..)
.take(length)
.fold(B::zero(), |mask, _x| mask << 1 | B::one()) <<
offset;
let ptr = self.buf.ptr().offset(index as isize);
let cur = ptr::read(ptr);
let new = mask & (value << offset);
let old = mask & cur;
if old != new {
ptr::write(ptr, cur & !mask | new);
}
}
/// Set buf unit bit at `index`th unit of `offset`bit.
#[inline]
unsafe fn set_buf_unit_bit(&mut self, offset: usize, bit: bool) {
let (index, offset) = Self::bit_index_to_buf(offset);
let mask = B::one() << offset;
let ptr = self.buf.ptr().offset(index as isize);
let cur = ptr::read(ptr);
let old = cur >> offset & B::one();
match (old == B::one(), bit) {
(lhs, rhs) if lhs == rhs => (),
(_, true) => ptr::write(ptr, cur | mask),
(_, false) => ptr::write(ptr, cur & mask.not()),
}
}
/// Get `N` bits value as `B`.
///
/// ## TODO
///
/// ?? Is a `Nbits` object is better than `B` ??
///
/// ## Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// unsafe { vec.set_len(2) }
/// vec.set(0, 0b11);
/// assert_eq!(vec.get(0), 0b11);
/// # }
/// ```
pub fn get(&self, index: usize) -> B {
if index >= self.len {
panic!("attempt to get at {} but only {}", index, self.len);
}
let unit = Self::unit_bits();
unsafe { self.get_buf_bits(index * unit, unit) }
}
/// Get `length` bits of buf at `offset`th bit.
///
/// # Unsafety
///
/// `get_buf_bits` will not check the `offset`. Users should ensure to do this manually.
///
/// # Panics
///
/// This method should panic while required `length` is longer than the buf unit bits size.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::new();
/// vec.resize(10, 0);
/// println!("{:?}", vec);
/// for i in 0..8 {
/// vec.set_bit(i, if i % 2 == 0 { true } else { false });
/// }
/// println!("{:?}", vec);
/// unsafe {
/// println!("Get buf bits at 0 with length 1");
/// assert_eq!(vec.get_buf_bits(0, 1), 1);
/// println!("Get buf bits at 1 with length 2");
/// assert_eq!(vec.get_buf_bits(1, 2), 2);
/// println!("Get buf bits at 3 with length 4");
/// assert_eq!(vec.get_buf_bits(3, 4), 0b1010);
/// }
/// # }
/// ```
#[inline]
pub unsafe fn get_buf_bits(&self, offset: usize, length: usize) -> B {
let buf_unit = Self::buf_unit_bits();
if length > buf_unit {
panic!("get {} buf bits longer than buf unit bits {}",
length,
buf_unit);
}
if length == 1 {
return self.get_buf_unit_bit(offset);
}
match (Self::unit_bits(), Self::buf_unit_bits()) {
(unit, buf_unit) if unit == buf_unit => {
// NOTE: maybe unreachable!() is better
self.get_buf_unit_bits(offset, length)
}
(unit, buf_unit) if unit < buf_unit && buf_unit % unit == 0 => {
self.get_buf_unit_bits(offset, length)
}
(_, _) => {
(offset..cmp::min(offset + length, self.buf_bits()))
.map(|x| self.get_buf_unit_bit(x))
.fold(B::zero(), |v, x| v << 1 | x)
}
}
}
/// Get buf unit bit at `index`th unit of `offset`bit.
#[inline]
unsafe fn get_buf_unit_bit(&self, offset: usize) -> B {
let (index, offset) = Self::bit_index_to_buf(offset);
let ptr = self.buf.ptr().offset(index as isize);
ptr::read(ptr) >> offset & B::one()
}
/// Get buf `length` bits of unit at `index`th unit's `offset`th bit
#[inline]
unsafe fn get_buf_unit_bits(&self, offset: usize, length: usize) -> B {
let offset = Self::bit_index_to_buf(offset);
let ptr = self.buf.ptr().offset(offset.0 as isize);
let unit = Self::buf_unit_bits();
(ptr::read(ptr) << (unit - offset.1 - length)) >> (unit - length)
}
/// Converts capacity to storage size
#[inline]
fn capacity_to_buf(capacity: usize) -> usize {
if capacity == 0 {
0
} else {
(capacity * Self::unit_bits() - 1) / (Self::buf_unit_bits()) + 1
}
}
/// Converts the storage size to capacity.
#[inline]
fn capacity_from_buf(buf_cap: usize) -> usize {
buf_cap * Self::buf_unit_bits() / Self::unit_bits()
}
/// Converts the vector index to buf `(index, offset)` tuple.
#[inline]
fn index_to_buf(index: usize) -> (usize, usize) {
let elem_bits = Self::buf_unit_bits();
let bits_index = index * Self::unit_bits();
(bits_index / elem_bits, bits_index % elem_bits)
}
/// Converts the vector index range to buf `(index, offset)` range tuple.
#[inline]
fn index_range_to_buf(index: usize, length: usize) -> ((usize, usize), (usize, usize)) {
(Self::index_to_buf(index),
Self::index_to_buf(index + length))
}
/// Converts bit index to buf `(index, offset)` tuple.
#[inline]
fn bit_index_to_buf(index: usize) -> (usize, usize) {
let unit = Self::buf_unit_bits();
(index / unit, index % unit)
}
/// Returns size of `B`.
#[inline]
fn buf_unit_bits() -> usize {
mem::size_of::<B>() * 8
}
/// Returns unit of bits - that is `NbitsVec`'s `N`.
#[inline]
fn unit_bits() -> usize {
T::bits()
}
}
Implement `push` and `pop`.
use alloc::raw_vec::RawVec;
use num::{self, PrimInt};
use std::cmp;
use std::ops::*;
use std::fmt::{self, Debug, Display};
use std::mem;
use std::ptr;
use std::marker::PhantomData;
pub trait Nbits {
fn bits() -> usize;
#[inline]
fn mask() -> usize {
(0..).take(Self::bits()).fold(0, |mask, _x| mask << 1 | 1)
}
}
pub struct NbitsVec<T: Nbits, B = usize> {
buf: RawVec<B>,
len: usize,
_marker: PhantomData<T>,
}
impl<
T: Nbits,
B: PrimInt,
> Default for NbitsVec<T, B> {
fn default() -> Self {
NbitsVec {
buf: RawVec::new(),
len: 0,
_marker: PhantomData,
}
}
}
impl<T: Nbits, B: PrimInt + fmt::LowerHex> Debug for NbitsVec<T, B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f,
"NbitsVec<{}> {{ len: {}, buf: RawVec {{ cap: {}, [",
T::bits(),
self.len,
self.buf.cap()));
let ptr = self.buf.ptr();
for i in 0..self.buf.cap() {
unsafe {
try!(write!(f, "{:#x}, ", ptr::read(ptr.offset(i as isize))));
}
}
write!(f, "] }}")
}
}
impl<
T: Nbits,
B: PrimInt
> NbitsVec<T, B> {
/// Constructs a new, empty NbitsVec<T>
///
/// The vector will not allocate until elements are pushed onto it.
///
/// # Examples
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::new();
/// # }
/// ```
#[inline]
pub fn new() -> Self {
NbitsVec {
buf: RawVec::new(),
len: 0,
_marker: PhantomData,
}
}
/// Constructs a new, empty Vec<T> with the specified capacity.
///
/// The vector will be able to hold exactly capacity elements without reallocating. If capacity
/// is 0, the vector will not allocate.
///
/// It is important to note that this function does not specify the length of the returned
/// vector, but only the capacity.
///
/// # Examples
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// assert!(vec.capacity() >= 10);
/// # }
/// ```
pub fn with_capacity(capacity: usize) -> Self {
NbitsVec {
buf: RawVec::with_capacity(Self::capacity_to_buf(capacity)),
len: 0,
_marker: PhantomData,
}
}
pub unsafe fn from_raw_parts(ptr: *mut B, length: usize, capacity: usize) -> Self {
NbitsVec {
buf: RawVec::from_raw_parts(ptr, Self::capacity_to_buf(capacity)),
len: length,
_marker: PhantomData,
}
}
/// Returns the number of elements the vector can hold without reallocating.
///
/// # Examples
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::{NbitsVec, As1bits};
/// # fn main() {
/// let v: NbitsVec<As1bits> = NbitsVec::with_capacity(10);
/// assert!(v.capacity() >= 10);
/// assert_eq!(v.capacity(), std::mem::size_of::<usize>() * 8);
/// # }
/// ```
#[inline(always)]
pub fn capacity(&self) -> usize {
Self::capacity_from_buf(self.buf.cap())
}
/// Reserves capacity for at least additional more elements to be inserted in the given
/// NbitsVec<T>.
/// The collection may reserve more space to avoid frequent reallocations.
///
/// # Panics
///
/// Panics if the new capacity overflows usize.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut v: NbitsVec<As2bits> = NbitsVec::new();
/// assert!(v.capacity() == 0);
/// v.reserve(100);
/// assert!(v.capacity() >= 100);
/// # }
/// ```
pub fn reserve(&mut self, additional: usize) {
let required_cap = self.len().checked_add(additional).expect("capacity overflow");
let used_cap = Self::capacity_to_buf(self.len());
let need_extra_cap = Self::capacity_to_buf(required_cap);
self.buf.reserve(used_cap, need_extra_cap);
}
/// Reserves the minimum capacity for exactly additional more elements to be inserted in the
/// given `NbitsVec<T>`. Does nothing if the capacity is already sufficient.
///
/// # Panics
///
/// Panics if the new capacity overflows usize.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// use bits_vec::*;
/// # fn main() {
/// let mut v: NbitsVec<As2bits> = NbitsVec::new();
/// assert!(v.capacity() == 0);
/// v.reserve_exact(64);
/// assert_eq!(v.capacity(), 64);
/// v.reserve_exact(127);
/// assert!(v.capacity() >= 127);
/// v.reserve_exact(128);
/// assert_eq!(v.capacity(), 128);
/// # }
/// ```
pub fn reserve_exact(&mut self, additional: usize) {
let required_cap = self.len().checked_add(additional).expect("capacity overflow");
let used_cap = Self::capacity_to_buf(self.len());
let need_extra_cap = Self::capacity_to_buf(required_cap);
self.buf.reserve_exact(used_cap, need_extra_cap);
}
/// Shrinks the capacity of the vector as much as possible.
///
/// It will drop down as close as possible to the length but the allocator may still inform the
/// vector that there is space for a few more elements.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// vec.shrink_to_fit();
/// assert_eq!(vec.capacity(), 0);
/// # }
/// ```
///
pub fn shrink_to_fit(&mut self) {
let fit_len = Self::capacity_to_buf(self.len());
self.buf.shrink_to_fit(fit_len);
}
/// Expands the length of the vector as much as possible with current capacity.
///
/// Be sure not to use the method if the capacity is not setted by yourself - means you didn't
/// expect the capacity so as the length.
pub fn expand_to_fit(&mut self) {
let fit_len = Self::capacity_to_buf(self.len());
unimplemented!();
}
pub fn into_boxed_slice(self) -> Box<[T]> {
unimplemented!();
}
/// Shorten a vector to be `len` elements long, dropping excess elements.
///
/// If `len` is greater than the vector's current length, this has no effect.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(2);
/// unsafe { vec.set_len(2) }
/// vec.truncate(3);
/// assert_eq!(vec.len(), 2);
/// vec.truncate(1);
/// assert_eq!(vec.len(), 1);
/// # }
/// ```
pub fn truncate(&mut self, len: usize) {
if self.len() > len {
self.len = len;
self.shrink_to_fit();
}
}
pub fn as_slice(&self) -> &[T] {
unimplemented!();
}
pub fn as_mut_slice(&mut self) -> &mut [T] {
unimplemented!();
}
/// Sets the length of a vector.
///
/// This will explicitly set the size of the vector, without actually modifying its buffers or
/// reserving additional capacity as needed, so it is up to the caller to ensure that the vector
/// is actually the specified size.
///
/// Recommend to use [resize()](#method.resize) when you actually want to `resize` the vector.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut v: NbitsVec<As2bits> = NbitsVec::new();
/// unsafe {
/// v.set_len(3);
/// }
/// assert_eq!(v.len(), 3);
/// assert_eq!(v.capacity(), 0); // as documented, the capacity will not change
/// unsafe {
/// v.set_len(1)
/// }
/// assert_eq!(v.len(), 1);
/// # }
/// ```
#[inline]
pub unsafe fn set_len(&mut self, len: usize) {
self.len = len;
}
pub fn swap_remove(&mut self, index: usize) -> T {
unimplemented!();
}
pub fn insert(&mut self, index: usize, element: T) {
unimplemented!();
}
pub fn remove(&mut self, index: usize) {
unimplemented!();
}
pub fn retain<F>(&mut self, f: F)
where F: FnMut(&T) -> bool
{
unimplemented!();
}
pub fn append(&mut self, other: &mut NbitsVec<T>) {
unimplemented!();
}
#[inline]
pub fn clear(&mut self) {
self.len = 0;
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
/// Returns the number of bits in current length.
///
/// It is related to the element numbers - not the capacity.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// assert_eq!(vec.bits(), 0);
/// # }
/// ```
#[inline]
pub fn bits(&self) -> usize {
self.len() * Self::unit_bits()
}
/// Total bits in buf.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// assert_eq!(vec.buf_bits(), std::mem::size_of::<usize>() * 8);
/// # }
/// ```
pub fn buf_bits(&self) -> usize {
self.buf.cap() * Self::buf_unit_bits()
}
/// Returns whether or not the vector is empty.
///
/// Alias to `len() == 0`.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// assert!(vec.is_empty());
/// # }
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn split_off(&mut self, at: usize) -> Self {
unimplemented!();
}
pub fn push_all(&mut self, other: &[T]) {
unimplemented!();
}
// And any lost functions from `dedup` to the end.
pub fn get_mut(&self, index: usize) {
unimplemented!();
}
/// Appends an element to the back of a collection.
///
/// # Panics
///
/// Panics if the number of elements in the vector overflows a `usize`.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::new();
/// vec.push(0b10);
/// vec.push(0b01);
/// assert_eq!(vec.len(), 2);
/// # }
/// ```
pub fn push(&mut self, value: B) {
let len = self.len();
let new_len = len.checked_add(1).expect("usize added overflows");
self.reserve(1);
self.len = new_len;
self.set(len, value);
}
/// Removes the last element from a vector and returns it, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::new();
/// vec.push(0b11);
/// assert_eq!(vec.pop(), Some(0b11));
/// assert_eq!(vec.len(), 0);
/// # }
/// ```
pub fn pop(&mut self) -> Option<B> {
if self.len() == 0 {
return None;
}
let first = self.get(0);
self.align(1, 0);
Some(first)
}
/// Resizes the Vec in-place so that len() is equal to new_len.
///
/// If new_len is greater than len(), the Vec is extended by the difference, with each
/// additional slot filled with value. If new_len is less than len(), the Vec is simply
/// truncated. Note that `resize` expand memeory will use `reserve_exact` method to
/// fit size.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::new();
/// vec.resize(10, 0);
/// assert_eq!(vec.capacity(), std::mem::size_of::<usize>() * 8 / 2);
/// # }
/// ```
#[inline]
pub fn resize(&mut self, new_len: usize, value: B) {
let len = self.len();
if len < new_len {
let n = new_len - len;
self.reserve_exact(n);
unsafe {
self.fill_buf(len, n, value);
self.len = new_len;
}
} else {
self.truncate(new_len);
}
}
/// ## Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits, u8> = NbitsVec::new();
/// vec.resize(24, 0);
/// unsafe {
/// vec.fill_buf(0, 12, 1);
/// vec.fill_buf(12, 12, 2);
/// }
/// println!("{:?}", vec);
/// // Left align will reduce the length.
/// vec.align(1, 0);
/// assert_eq!(vec.len(), 23);
/// assert!((0..).take(11).all(|x| vec.get(x) == 1));
/// assert!((11..).take(12).all(|x| vec.get(x) == 2));
///
/// vec.align(11, 3);
/// assert_eq!(vec.len(), 23 - 8);
/// assert!((0..).take(3).all(|x| vec.get(x) == 1));
/// assert!((3..vec.len()).all(|x| vec.get(x) == 2));
/// // Right align will expand the length.
/// vec.align(6, 7);
/// assert_eq!(vec.len(), 23 - 8 + 1);
/// assert!((6..7).all(|x| vec.get(x) == 0));
/// assert!((7..vec.len()).all(|x| vec.get(x) == 2));
///
/// vec.align(13, 33);
/// assert_eq!(vec.len(), 23 - 8 + 1 + 33 - 13);
/// assert!((13..33).all(|x| vec.get(x) == 0));
/// assert!((33..vec.len()).all(|x| vec.get(x) == 2));
/// println!("{:?}", vec);
/// # }
/// ```
pub fn align(&mut self, offset: usize, to: usize) {
let unit = Self::unit_bits();
let buf_unit = Self::buf_unit_bits();
let unit_cap = buf_unit / unit;
if offset > to {
// Reduce `interval` length.
let interval = offset - to;
// e.g. N = 2, B = u8, interval = 4
if buf_unit % unit == 0 && interval % unit_cap == 0 {
// Copy previous offset * unit % buf_unit values.
let extra = offset % unit_cap;
let (offset, to) = (0..extra).fold((offset, to), |(offset, to), _i| {
let value = self.get(offset);
self.set(to, value);
(offset + 1, to + 1)
});
unsafe {
let ptr = self.buf.ptr();
let src = offset / unit_cap;
let dst = to / unit_cap;
let count = self.len() / unit_cap - src + 1;
ptr::copy(ptr.offset(src as isize), ptr.offset(dst as isize), count);
}
} else {
for offset in offset..self.len() {
let value = self.get(offset);
self.set(offset - interval, value);
}
}
self.len = self.len - interval;
} else {
// Expand with `interval` length values.
let interval = to - offset;
let len = self.len();
self.reserve_exact(interval);
if buf_unit % unit == 0 && interval % unit_cap == 0 {
unsafe {
let ptr = self.buf.ptr();
let src = offset / unit_cap;
let dst = to / unit_cap;
let count = len / unit_cap - src + 1;
ptr::copy(ptr.offset(src as isize), ptr.offset(dst as isize), count);
self.fill_buf(offset, interval, B::zero());
self.len = self.len() + interval;
}
} else {
self.len = len + interval;
for offset in (offset..len).rev() {
let value = self.get(offset);
self.set(offset + interval, value);
}
unsafe {
self.fill_buf(offset, interval, B::zero());
}
}
}
}
/// Fill vector buf as `value` from `index` with size `length`.
///
/// ## Unsafety
///
/// The method doesnot check the index validation of the vector.
///
/// ## Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits, u8> = NbitsVec::new();
/// vec.resize(24, 0);
/// println!("{:?}", vec);
/// unsafe {
/// vec.fill_buf(1, 2, 2); // length < buf_unit
/// assert!((1..).take(2).all(|x| vec.get(x) == 2));
/// vec.fill_buf(0, 8, 1); // offset: 0, 0
/// assert!((0..).take(8).all(|x| vec.get(x) == 1));
/// vec.fill_buf(7, 10, 2); // offset: n, n
/// assert!((7..).take(10).all(|x| vec.get(x) == 2));
/// vec.fill_buf(8, 11, 1); // offset: 0,n
/// assert!((8..).take(11).all(|x| vec.get(x) == 1));
/// }
/// # }
/// ```
#[inline]
pub unsafe fn fill_buf(&mut self, index: usize, length: usize, value: B) {
let unit = Self::unit_bits();
if length == 1 {
return self.set_buf_bits(index * unit, unit, value);
}
let buf_unit = Self::buf_unit_bits();
if (length <= buf_unit / unit) || buf_unit % unit != 0 {
println!("length is short");
for i in (index..).take(length) {
self.set_buf_bits(i * unit, unit, value);
}
}
let mul = buf_unit / unit;
let item = (0..mul).fold(B::zero(), |v, _x| v << unit | value);
let ptr = self.buf.ptr();
let write_buf = |start: usize, end: usize| {
(start..end).fold(ptr.offset(start as isize), |ptr, _x| {
ptr::write(ptr, item);
ptr.offset(1)
});
};
match Self::index_range_to_buf(index, length) {
((start_idx, start_offset), (end_idx, end_offset)) if start_offset == 0 &&
end_offset == 0 => {
write_buf(start_idx, end_idx)
}
((start_idx, start_offset), (end_idx, end_offset)) if start_offset == 0 => {
write_buf(start_idx, end_idx);
self.set_buf_unit_bits(end_idx * buf_unit, end_offset, item);
}
((start_idx, start_offset), (end_idx, end_offset)) if end_offset == 0 => {
self.set_buf_unit_bits(index * unit, buf_unit - start_offset, item);
write_buf(start_idx + 1, end_idx);
}
((start_idx, start_offset), (end_idx, end_offset)) => {
self.set_buf_unit_bits(index * unit, buf_unit - start_offset, item);
self.set_buf_unit_bits(end_idx * buf_unit, end_offset, item);
write_buf(start_idx + 1, end_idx);
}
}
}
/// ## Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// unsafe { vec.set_len(2) }
/// vec.set(0, 0b11);
/// # }
/// ```
#[inline]
pub fn set(&mut self, index: usize, value: B) {
if index >= self.len {
panic!("attempt to set at {} but only {}", index, self.len);
}
unsafe {
let unit = Self::unit_bits();
self.set_buf_bits(index * unit, unit, value);
}
}
/// Set `bit` at `index`.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// vec.reserve(10);
/// unsafe { vec.set_len(7) };
/// vec.set_bit(0, true);
/// # }
/// ```
///
#[inline]
pub fn set_bit(&mut self, index: usize, bit: bool) {
let bits = self.bits();
if index >= bits {
panic!("attempt to set bit out of bounds");
}
unsafe {
self.set_buf_unit_bit(index, bit);
}
}
/// Get `bit` at some bit index.
///
/// Returns `None` if required index is out of bounds, else return `bool` for bit value.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// vec.reserve(10);
/// assert!(vec.get_bit(0).is_none());
/// vec.resize(10, 0);
/// println!("{:?}", vec);
/// for i in 0..8 {
/// vec.set_bit(i, true);
/// println!("Set at {} as true", i);
/// println!("{:?}", vec);
/// assert_eq!(vec.get_bit(i), Some(true));
/// }
/// for i in 0..8 {
/// vec.set_bit(i, false);
/// assert_eq!(vec.get_bit(i), Some(false));
/// }
/// # }
/// ```
#[inline]
pub fn get_bit(&self, at: usize) -> Option<bool> {
let bits = self.bits();
if at >= bits {
return None;
} else {
unsafe { Some(self.get_buf_unit_bit(at) == B::one()) }
}
}
/// Set `length` bits of buf at `offset`th bit as `value`.
///
/// ## Unsafety
///
/// `set_buf_bits` will not check the `offset`. Users should ensure to do this manually.
///
/// ## Panics
///
/// This method should panic while required `length` is longer than the buf unit bits size.
///
/// ## Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
///
/// unsafe {
/// println!("Set buf 0 as 1");
/// vec.set_buf_bits(0, 1, 1);
/// println!("Set buf bits [1, 2] as `10`");
/// vec.set_buf_bits(1, 2, 2);
/// println!("Set buf bits [3, 6] as `1010`");
/// vec.set_buf_bits(3, 4, 0b1010);
/// }
/// println!("{:?}", vec);
/// unsafe {
/// assert_eq!(vec.get_buf_bits(0, 1), 1);
/// assert_eq!(vec.get_buf_bits(1, 2), 2);
/// assert_eq!(vec.get_buf_bits(3, 4), 0b1010);
/// }
/// # }
/// ```
#[inline]
pub unsafe fn set_buf_bits(&mut self, offset: usize, length: usize, value: B) {
let buf_unit = Self::buf_unit_bits();
if length > buf_unit {
panic!("set {} buf bits longer than buf unit bits {}",
length,
buf_unit);
}
if length == 1 {
return self.set_buf_unit_bit(offset, value & B::one() == B::one());
}
match Self::unit_bits() {
unit if unit == buf_unit => {
// NOTE: maybe unreachable!() is better.
self.set_buf_unit_bits(offset, length, value);
}
unit if unit < buf_unit && buf_unit % unit == 0 => {
self.set_buf_unit_bits(offset, length, value);
}
_ => {
let mut v = value;
for x in offset..cmp::min(offset + length, self.buf_bits()) {
self.set_buf_unit_bit(x, v & B::one() == B::one());
v = v >> 1;
}
}
}
}
/// Mask buf element of `index` at offset `(from, to)` as zero.
#[inline]
unsafe fn zero_buf_unit_bits(&mut self, offset: usize, length: usize) {
self.set_buf_unit_bits(offset, length, B::zero());
}
/// Set buf element of `index` at offset `from` to `to` as `value`.
#[inline]
unsafe fn set_buf_unit_bits(&mut self, offset: usize, length: usize, value: B) {
let (index, offset) = Self::bit_index_to_buf(offset);
let mask = (offset..)
.take(length)
.fold(B::zero(), |mask, _x| mask << 1 | B::one()) <<
offset;
let ptr = self.buf.ptr().offset(index as isize);
let cur = ptr::read(ptr);
let new = mask & (value << offset);
let old = mask & cur;
if old != new {
ptr::write(ptr, cur & !mask | new);
}
}
/// Set buf unit bit at `index`th unit of `offset`bit.
#[inline]
unsafe fn set_buf_unit_bit(&mut self, offset: usize, bit: bool) {
let (index, offset) = Self::bit_index_to_buf(offset);
let mask = B::one() << offset;
let ptr = self.buf.ptr().offset(index as isize);
let cur = ptr::read(ptr);
let old = cur >> offset & B::one();
match (old == B::one(), bit) {
(lhs, rhs) if lhs == rhs => (),
(_, true) => ptr::write(ptr, cur | mask),
(_, false) => ptr::write(ptr, cur & mask.not()),
}
}
/// Get `N` bits value as `B`.
///
/// ## TODO
///
/// ?? Is a `Nbits` object is better than `B` ??
///
/// ## Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::with_capacity(10);
/// unsafe { vec.set_len(2) }
/// vec.set(0, 0b11);
/// assert_eq!(vec.get(0), 0b11);
/// # }
/// ```
pub fn get(&self, index: usize) -> B {
if index >= self.len {
panic!("attempt to get at {} but only {}", index, self.len);
}
let unit = Self::unit_bits();
unsafe { self.get_buf_bits(index * unit, unit) }
}
/// Get `length` bits of buf at `offset`th bit.
///
/// # Unsafety
///
/// `get_buf_bits` will not check the `offset`. Users should ensure to do this manually.
///
/// # Panics
///
/// This method should panic while required `length` is longer than the buf unit bits size.
///
/// # Examples
///
/// ```
/// # extern crate bits_vec;
/// # use bits_vec::*;
/// # fn main() {
/// let mut vec: NbitsVec<As2bits> = NbitsVec::new();
/// vec.resize(10, 0);
/// println!("{:?}", vec);
/// for i in 0..8 {
/// vec.set_bit(i, if i % 2 == 0 { true } else { false });
/// }
/// println!("{:?}", vec);
/// unsafe {
/// println!("Get buf bits at 0 with length 1");
/// assert_eq!(vec.get_buf_bits(0, 1), 1);
/// println!("Get buf bits at 1 with length 2");
/// assert_eq!(vec.get_buf_bits(1, 2), 2);
/// println!("Get buf bits at 3 with length 4");
/// assert_eq!(vec.get_buf_bits(3, 4), 0b1010);
/// }
/// # }
/// ```
#[inline]
pub unsafe fn get_buf_bits(&self, offset: usize, length: usize) -> B {
let buf_unit = Self::buf_unit_bits();
if length > buf_unit {
panic!("get {} buf bits longer than buf unit bits {}",
length,
buf_unit);
}
if length == 1 {
return self.get_buf_unit_bit(offset);
}
match (Self::unit_bits(), Self::buf_unit_bits()) {
(unit, buf_unit) if unit == buf_unit => {
// NOTE: maybe unreachable!() is better
self.get_buf_unit_bits(offset, length)
}
(unit, buf_unit) if unit < buf_unit && buf_unit % unit == 0 => {
self.get_buf_unit_bits(offset, length)
}
(_, _) => {
(offset..cmp::min(offset + length, self.buf_bits()))
.map(|x| self.get_buf_unit_bit(x))
.fold(B::zero(), |v, x| v << 1 | x)
}
}
}
/// Get buf unit bit at `index`th unit of `offset`bit.
#[inline]
unsafe fn get_buf_unit_bit(&self, offset: usize) -> B {
let (index, offset) = Self::bit_index_to_buf(offset);
let ptr = self.buf.ptr().offset(index as isize);
ptr::read(ptr) >> offset & B::one()
}
/// Get buf `length` bits of unit at `index`th unit's `offset`th bit
#[inline]
unsafe fn get_buf_unit_bits(&self, offset: usize, length: usize) -> B {
let offset = Self::bit_index_to_buf(offset);
let ptr = self.buf.ptr().offset(offset.0 as isize);
let unit = Self::buf_unit_bits();
(ptr::read(ptr) << (unit - offset.1 - length)) >> (unit - length)
}
/// Converts capacity to storage size
#[inline]
fn capacity_to_buf(capacity: usize) -> usize {
if capacity == 0 {
0
} else {
(capacity * Self::unit_bits() - 1) / (Self::buf_unit_bits()) + 1
}
}
/// Converts the storage size to capacity.
#[inline]
fn capacity_from_buf(buf_cap: usize) -> usize {
buf_cap * Self::buf_unit_bits() / Self::unit_bits()
}
/// Converts the vector index to buf `(index, offset)` tuple.
#[inline]
fn index_to_buf(index: usize) -> (usize, usize) {
let elem_bits = Self::buf_unit_bits();
let bits_index = index * Self::unit_bits();
(bits_index / elem_bits, bits_index % elem_bits)
}
/// Converts the vector index range to buf `(index, offset)` range tuple.
#[inline]
fn index_range_to_buf(index: usize, length: usize) -> ((usize, usize), (usize, usize)) {
(Self::index_to_buf(index),
Self::index_to_buf(index + length))
}
/// Converts bit index to buf `(index, offset)` tuple.
#[inline]
fn bit_index_to_buf(index: usize) -> (usize, usize) {
let unit = Self::buf_unit_bits();
(index / unit, index % unit)
}
/// Returns size of `B`.
#[inline]
fn buf_unit_bits() -> usize {
mem::size_of::<B>() * 8
}
/// Returns unit of bits - that is `NbitsVec`'s `N`.
#[inline]
fn unit_bits() -> usize {
T::bits()
}
}
|
// Copyright (c) 2001-2016, Alliance for Open Media. All rights reserved
// Copyright (c) 2017-2019, The rav1e contributors. All rights reserved
//
// This source code is subject to the terms of the BSD 2 Clause License and
// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
// was not distributed with this source code in the LICENSE file, you can
// obtain it at www.aomedia.org/license/software. If the Alliance for Open
// Media Patent License 1.0 was not distributed with this source code in the
// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
#![allow(non_camel_case_types)]
use crate::api::*;
use crate::cdef::*;
use crate::context::*;
use crate::deblock::*;
use crate::dist::*;
use crate::ec::{Writer, WriterCounter, OD_BITRES};
use crate::encode_block_with_modes;
use crate::encoder::{FrameInvariants, IMPORTANCE_BLOCK_SIZE};
use crate::frame::Frame;
use crate::frame::*;
use crate::header::ReferenceMode;
use crate::lrf::*;
use crate::luma_ac;
use crate::mc::MotionVector;
use crate::me::*;
use crate::motion_compensate;
use crate::partition::RefType::*;
use crate::partition::*;
use crate::predict::{
AngleDelta, IntraEdgeFilterParameters, IntraParam, PredictionMode,
RAV1E_INTER_COMPOUND_MODES, RAV1E_INTER_MODES_MINIMAL, RAV1E_INTRA_MODES,
};
use crate::rdo_tables::*;
use crate::tiling::*;
use crate::transform::{TxSet, TxSize, TxType, RAV1E_TX_TYPES};
use crate::util::{Aligned, CastFromPrimitive, Pixel};
use crate::write_tx_blocks;
use crate::write_tx_tree;
use crate::Tune;
use crate::{encode_block_post_cdef, encode_block_pre_cdef};
use crate::partition::PartitionType::*;
use arrayvec::*;
use itertools::izip;
use std::fmt;
#[derive(Copy, Clone, PartialEq)]
pub enum RDOType {
PixelDistRealRate,
TxDistRealRate,
TxDistEstRate,
}
impl RDOType {
pub fn needs_tx_dist(self) -> bool {
match self {
// Pixel-domain distortion and exact ec rate
RDOType::PixelDistRealRate => false,
// Tx-domain distortion and exact ec rate
RDOType::TxDistRealRate => true,
// Tx-domain distortion and txdist-based rate
RDOType::TxDistEstRate => true,
}
}
pub fn needs_coeff_rate(self) -> bool {
match self {
RDOType::PixelDistRealRate => true,
RDOType::TxDistRealRate => true,
RDOType::TxDistEstRate => false,
}
}
}
#[derive(Clone)]
pub struct PartitionGroupParameters {
pub rd_cost: f64,
pub part_type: PartitionType,
pub part_modes: ArrayVec<[PartitionParameters; 4]>,
}
#[derive(Clone, Debug)]
pub struct PartitionParameters {
pub rd_cost: f64,
pub bo: TileBlockOffset,
pub bsize: BlockSize,
pub pred_mode_luma: PredictionMode,
pub pred_mode_chroma: PredictionMode,
pub pred_cfl_params: CFLParams,
pub angle_delta: AngleDelta,
pub ref_frames: [RefType; 2],
pub mvs: [MotionVector; 2],
pub skip: bool,
pub has_coeff: bool,
pub tx_size: TxSize,
pub tx_type: TxType,
pub sidx: u8,
}
impl Default for PartitionParameters {
fn default() -> Self {
PartitionParameters {
rd_cost: std::f64::MAX,
bo: TileBlockOffset::default(),
bsize: BlockSize::BLOCK_INVALID,
pred_mode_luma: PredictionMode::default(),
pred_mode_chroma: PredictionMode::default(),
pred_cfl_params: CFLParams::default(),
angle_delta: AngleDelta::default(),
ref_frames: [RefType::INTRA_FRAME, RefType::NONE_FRAME],
mvs: [MotionVector::default(); 2],
skip: false,
has_coeff: true,
tx_size: TxSize::TX_4X4,
tx_type: TxType::DCT_DCT,
sidx: 0,
}
}
}
pub fn estimate_rate(qindex: u8, ts: TxSize, fast_distortion: u64) -> u64 {
let bs_index = ts as usize;
let q_bin_idx = (qindex as usize) / RDO_QUANT_DIV;
let bin_idx_down =
((fast_distortion) / RATE_EST_BIN_SIZE).min((RDO_NUM_BINS - 2) as u64);
let bin_idx_up = (bin_idx_down + 1).min((RDO_NUM_BINS - 1) as u64);
let x0 = (bin_idx_down * RATE_EST_BIN_SIZE) as i64;
let x1 = (bin_idx_up * RATE_EST_BIN_SIZE) as i64;
let y0 = RDO_RATE_TABLE[q_bin_idx][bs_index][bin_idx_down as usize] as i64;
let y1 = RDO_RATE_TABLE[q_bin_idx][bs_index][bin_idx_up as usize] as i64;
let slope = ((y1 - y0) << 8) / (x1 - x0);
(y0 + (((fast_distortion as i64 - x0) * slope) >> 8)).max(0) as u64
}
// The microbenchmarks perform better with inlining turned off
#[inline(never)]
fn cdef_dist_wxh_8x8<T: Pixel>(
src1: &PlaneRegion<'_, T>, src2: &PlaneRegion<'_, T>, bit_depth: usize,
) -> RawDistortion {
debug_assert!(src1.plane_cfg.xdec == 0);
debug_assert!(src1.plane_cfg.ydec == 0);
debug_assert!(src2.plane_cfg.xdec == 0);
debug_assert!(src2.plane_cfg.ydec == 0);
let coeff_shift = bit_depth - 8;
// Sum into columns to improve auto-vectorization
let mut sum_s_cols: [u16; 8] = [0; 8];
let mut sum_d_cols: [u16; 8] = [0; 8];
let mut sum_s2_cols: [u32; 8] = [0; 8];
let mut sum_d2_cols: [u32; 8] = [0; 8];
let mut sum_sd_cols: [u32; 8] = [0; 8];
for j in 0..8 {
let row1 = &src1[j][0..8];
let row2 = &src2[j][0..8];
for (sum_s, sum_d, sum_s2, sum_d2, sum_sd, s, d) in izip!(
&mut sum_s_cols,
&mut sum_d_cols,
&mut sum_s2_cols,
&mut sum_d2_cols,
&mut sum_sd_cols,
row1,
row2
) {
// Don't convert directly to u32 to allow better vectorization
let s: u16 = u16::cast_from(*s);
let d: u16 = u16::cast_from(*d);
*sum_s += s;
*sum_d += d;
// Convert to u32 to avoid overflows when multiplying
let s: u32 = s as u32;
let d: u32 = d as u32;
*sum_s2 += s * s;
*sum_d2 += d * d;
*sum_sd += s * d;
}
}
// Sum together the sum of columns
let sum_s: i64 =
sum_s_cols.iter().map(|&a| u32::cast_from(a)).sum::<u32>() as i64;
let sum_d: i64 =
sum_d_cols.iter().map(|&a| u32::cast_from(a)).sum::<u32>() as i64;
let sum_s2: i64 = sum_s2_cols.iter().sum::<u32>() as i64;
let sum_d2: i64 = sum_d2_cols.iter().sum::<u32>() as i64;
let sum_sd: i64 = sum_sd_cols.iter().sum::<u32>() as i64;
// Use sums to calculate distortion
let svar = sum_s2 - ((sum_s * sum_s + 32) >> 6);
let dvar = sum_d2 - ((sum_d * sum_d + 32) >> 6);
let sse = (sum_d2 + sum_s2 - 2 * sum_sd) as f64;
//The two constants were tuned for CDEF, but can probably be better tuned for use in general RDO
let ssim_boost = (4033_f64 / 16_384_f64)
* (svar + dvar + (16_384 << (2 * coeff_shift))) as f64
/ f64::sqrt(((16_265_089i64 << (4 * coeff_shift)) + svar * dvar) as f64);
RawDistortion::new((sse * ssim_boost + 0.5_f64) as u64)
}
#[allow(unused)]
pub fn cdef_dist_wxh<T: Pixel, F: Fn(Area, BlockSize) -> DistortionScale>(
src1: &PlaneRegion<'_, T>, src2: &PlaneRegion<'_, T>, w: usize, h: usize,
bit_depth: usize, compute_bias: F,
) -> Distortion {
assert!(w & 0x7 == 0);
assert!(h & 0x7 == 0);
debug_assert!(src1.plane_cfg.xdec == 0);
debug_assert!(src1.plane_cfg.ydec == 0);
debug_assert!(src2.plane_cfg.xdec == 0);
debug_assert!(src2.plane_cfg.ydec == 0);
let mut sum = Distortion::zero();
for j in 0isize..h as isize / 8 {
for i in 0isize..w as isize / 8 {
let area = Area::StartingAt { x: i * 8, y: j * 8 };
let value = cdef_dist_wxh_8x8(
&src1.subregion(area),
&src2.subregion(area),
bit_depth,
);
// cdef is always called on non-subsampled planes, so BLOCK_8X8 is
// correct here.
sum += value * compute_bias(area, BlockSize::BLOCK_8X8);
}
}
sum
}
// Sum of Squared Error for a wxh block
pub fn sse_wxh<T: Pixel, F: Fn(Area, BlockSize) -> DistortionScale>(
src1: &PlaneRegion<'_, T>, src2: &PlaneRegion<'_, T>, w: usize, h: usize,
compute_bias: F,
) -> Distortion {
assert!(w & (MI_SIZE - 1) == 0);
assert!(h & (MI_SIZE - 1) == 0);
// To bias the distortion correctly, compute it in blocks up to the size
// importance block size in a non-subsampled plane.
let imp_block_w = IMPORTANCE_BLOCK_SIZE.min(w);
let imp_block_h = IMPORTANCE_BLOCK_SIZE.min(h);
let imp_bsize = BlockSize::from_width_and_height(imp_block_w, imp_block_h);
let block_w = imp_block_w >> src1.plane_cfg.xdec;
let block_h = imp_block_h >> src1.plane_cfg.ydec;
let mut sse = Distortion::zero();
for block_y in 0..h / block_h {
for block_x in 0..w / block_w {
let mut value = 0;
for j in 0..block_h {
let s1 = &src1[block_y * block_h + j]
[block_x * block_w..(block_x + 1) * block_w];
let s2 = &src2[block_y * block_h + j]
[block_x * block_w..(block_x + 1) * block_w];
let row_sse = s1
.iter()
.zip(s2)
.map(|(&a, &b)| {
let c = (i16::cast_from(a) - i16::cast_from(b)) as i32;
(c * c) as u32
})
.sum::<u32>();
value += row_sse as u64;
}
let bias = compute_bias(
// StartingAt gives the correct block offset.
Area::StartingAt {
x: (block_x * block_w) as isize,
y: (block_y * block_h) as isize,
},
imp_bsize,
);
sse += RawDistortion::new(value) * bias;
}
}
sse
}
// Compute the pixel-domain distortion for an encode
fn compute_distortion<T: Pixel>(
fi: &FrameInvariants<T>, ts: &TileStateMut<'_, T>, bsize: BlockSize,
is_chroma_block: bool, tile_bo: TileBlockOffset, luma_only: bool,
) -> ScaledDistortion {
let area = Area::BlockStartingAt { bo: tile_bo.0 };
let input_region = ts.input_tile.planes[0].subregion(area);
let rec_region = ts.rec.planes[0].subregion(area);
let mut distortion = match fi.config.tune {
Tune::Psychovisual if bsize.width() >= 8 && bsize.height() >= 8 => {
cdef_dist_wxh(
&input_region,
&rec_region,
bsize.width(),
bsize.height(),
fi.sequence.bit_depth,
|bias_area, bsize| {
distortion_scale(
fi,
input_region.subregion(bias_area).frame_block_offset(),
bsize,
)
},
)
}
Tune::Psnr | Tune::Psychovisual => sse_wxh(
&input_region,
&rec_region,
bsize.width(),
bsize.height(),
|bias_area, bsize| {
distortion_scale(
fi,
input_region.subregion(bias_area).frame_block_offset(),
bsize,
)
},
),
} * fi.dist_scale[0];
if !luma_only {
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let mask = !(MI_SIZE - 1);
let mut w_uv = (bsize.width() >> xdec) & mask;
let mut h_uv = (bsize.height() >> ydec) & mask;
if (w_uv == 0 || h_uv == 0) && is_chroma_block {
w_uv = MI_SIZE;
h_uv = MI_SIZE;
}
// Add chroma distortion only when it is available
if fi.config.chroma_sampling != ChromaSampling::Cs400
&& w_uv > 0
&& h_uv > 0
{
for p in 1..3 {
let input_region = ts.input_tile.planes[p].subregion(area);
let rec_region = ts.rec.planes[p].subregion(area);
distortion += sse_wxh(
&input_region,
&rec_region,
w_uv,
h_uv,
|bias_area, bsize| {
distortion_scale(
fi,
input_region.subregion(bias_area).frame_block_offset(),
bsize,
)
},
) * fi.dist_scale[p];
}
};
}
distortion
}
// Compute the transform-domain distortion for an encode
fn compute_tx_distortion<T: Pixel>(
fi: &FrameInvariants<T>, ts: &TileStateMut<'_, T>, bsize: BlockSize,
is_chroma_block: bool, tile_bo: TileBlockOffset, tx_dist: ScaledDistortion,
skip: bool, luma_only: bool,
) -> ScaledDistortion {
assert!(fi.config.tune == Tune::Psnr);
let area = Area::BlockStartingAt { bo: tile_bo.0 };
let input_region = ts.input_tile.planes[0].subregion(area);
let rec_region = ts.rec.planes[0].subregion(area);
let mut distortion = if skip {
sse_wxh(
&input_region,
&rec_region,
bsize.width(),
bsize.height(),
|bias_area, bsize| {
distortion_scale(
fi,
input_region.subregion(bias_area).frame_block_offset(),
bsize,
)
},
) * fi.dist_scale[0]
} else {
tx_dist
};
if !luma_only && skip {
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let mask = !(MI_SIZE - 1);
let mut w_uv = (bsize.width() >> xdec) & mask;
let mut h_uv = (bsize.height() >> ydec) & mask;
if (w_uv == 0 || h_uv == 0) && is_chroma_block {
w_uv = MI_SIZE;
h_uv = MI_SIZE;
}
// Add chroma distortion only when it is available
if fi.config.chroma_sampling != ChromaSampling::Cs400
&& w_uv > 0
&& h_uv > 0
{
for p in 1..3 {
let input_region = ts.input_tile.planes[p].subregion(area);
let rec_region = ts.rec.planes[p].subregion(area);
distortion += sse_wxh(
&input_region,
&rec_region,
w_uv,
h_uv,
|bias_area, bsize| {
distortion_scale(
fi,
input_region.subregion(bias_area).frame_block_offset(),
bsize,
)
},
) * fi.dist_scale[p];
}
}
}
distortion
}
/// Compute a scaling factor to multiply the distortion of a block by,
/// this factor is determined using temporal RDO.
pub fn distortion_scale<T: Pixel>(
fi: &FrameInvariants<T>, frame_bo: PlaneBlockOffset, bsize: BlockSize,
) -> DistortionScale {
if !fi.config.temporal_rdo() {
return DistortionScale::default();
}
// EncoderConfig::temporal_rdo() should always return false in situations
// where distortion is computed on > 8x8 blocks, so we should never hit this
// assert.
assert!(bsize <= BlockSize::BLOCK_8X8);
let x = frame_bo.0.x >> IMPORTANCE_BLOCK_TO_BLOCK_SHIFT;
let y = frame_bo.0.y >> IMPORTANCE_BLOCK_TO_BLOCK_SHIFT;
fi.distortion_scales[y * fi.w_in_imp_b + x]
}
pub fn distortion_scale_for(
propagate_cost: f64, intra_cost: f64,
) -> DistortionScale {
// The mbtree paper \cite{mbtree} uses the following formula:
//
// QP_delta = -strength * log2(1 + (propagate_cost / intra_cost))
//
// Since this is H.264, this corresponds to the following quantizer:
//
// Q' = Q * 2^(QP_delta/6)
//
// Since lambda is proportial to Q^2, this means we want to minimize:
//
// D + lambda' * R
// = D + 2^(QP_delta / 3) * lambda * R
//
// If we want to keep lambda fixed, we can instead scale distortion and
// minimize:
//
// D * scale + lambda * R
//
// where:
//
// scale = 2^(QP_delta / -3)
// = (1 + (propagate_cost / intra_cost))^(strength / 3)
//
// The original paper empirically chooses strength = 2.0, but strength = 1.0
// seems to work best in rav1e currently, this may have something to do with
// the fact that they use 16x16 blocks whereas our "importance blocks" are
// 8x8, but everything should be scale invariant here so that's weird.
//
// @article{mbtree,
// title={A novel macroblock-tree algorithm for high-performance
// optimization of dependent video coding in H.264/AVC},
// author={Garrett-Glaser, Jason},
// journal={Tech. Rep.},
// year={2009},
// url={https://pdfs.semanticscholar.org/032f/1ab7d9db385780a02eb2d579af8303b266d2.pdf}
// }
if intra_cost == 0. {
return DistortionScale::default(); // no scaling
}
let strength = 1.0; // empirical, see comment above
let frac = (intra_cost + propagate_cost) / intra_cost;
DistortionScale::new(frac.powf(strength / 3.0))
}
/// Fixed point arithmetic version of distortion scale
#[repr(transparent)]
#[derive(Copy, Clone)]
pub struct DistortionScale(u32);
#[repr(transparent)]
pub struct RawDistortion(u64);
#[repr(transparent)]
pub struct Distortion(u64);
#[repr(transparent)]
pub struct ScaledDistortion(u64);
impl DistortionScale {
/// Bits past the radix point
const SHIFT: u32 = 12;
/// Number of bits used. Determines the max value.
/// 24 bits is likely excessive.
const BITS: u32 = 24;
pub fn new(scale: f64) -> Self {
Self(
(scale * (1 << Self::SHIFT) as f64 + 0.5)
.min(((1 << Self::BITS as u64) - 1) as f64) as u32,
)
}
/// Multiply, round and shift
/// Internal implementation, so don't use multiply trait.
fn mul_u64(self, dist: u64) -> u64 {
(self.0 as u64 * dist + (1 << Self::SHIFT >> 1)) >> Self::SHIFT
}
}
// Default value for DistortionScale is a fixed point 1
impl Default for DistortionScale {
fn default() -> Self {
Self(1 << Self::SHIFT)
}
}
impl fmt::Debug for DistortionScale {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", f64::from(*self))
}
}
impl From<DistortionScale> for f64 {
fn from(scale: DistortionScale) -> Self {
scale.0 as f64 / (1 << DistortionScale::SHIFT) as f64
}
}
impl RawDistortion {
pub fn new(dist: u64) -> Self {
Self(dist)
}
}
impl std::ops::Mul<DistortionScale> for RawDistortion {
type Output = Distortion;
fn mul(self, rhs: DistortionScale) -> Distortion {
Distortion(rhs.mul_u64(self.0))
}
}
impl Distortion {
pub const fn zero() -> Self {
Self(0)
}
}
impl std::ops::Mul<f64> for Distortion {
type Output = ScaledDistortion;
fn mul(self, rhs: f64) -> ScaledDistortion {
ScaledDistortion((self.0 as f64 * rhs) as u64)
}
}
impl std::ops::AddAssign for Distortion {
fn add_assign(&mut self, other: Self) {
self.0 += other.0;
}
}
impl ScaledDistortion {
pub const fn zero() -> Self {
Self(0)
}
}
impl std::ops::AddAssign for ScaledDistortion {
fn add_assign(&mut self, other: Self) {
self.0 += other.0;
}
}
pub fn compute_rd_cost<T: Pixel>(
fi: &FrameInvariants<T>, rate: u32, distortion: ScaledDistortion,
) -> f64 {
let rate_in_bits = (rate as f64) / ((1 << OD_BITRES) as f64);
distortion.0 as f64 + fi.lambda * rate_in_bits
}
pub fn rdo_tx_size_type<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, bsize: BlockSize, tile_bo: TileBlockOffset,
luma_mode: PredictionMode, ref_frames: [RefType; 2], mvs: [MotionVector; 2],
skip: bool,
) -> (TxSize, TxType) {
let is_inter = !luma_mode.is_intra();
let mut tx_size = max_txsize_rect_lookup[bsize as usize];
if fi.enable_inter_txfm_split && is_inter && !skip {
tx_size = sub_tx_size_map[tx_size as usize]; // Always choose one level split size
}
let mut best_tx_type = TxType::DCT_DCT;
let mut best_tx_size = tx_size;
let mut best_rd = std::f64::MAX;
let do_rdo_tx_size =
fi.tx_mode_select && fi.config.speed_settings.rdo_tx_decision && !is_inter;
let rdo_tx_depth = if do_rdo_tx_size { 2 } else { 0 };
let mut cw_checkpoint = None;
for _ in 0..=rdo_tx_depth {
let tx_set = get_tx_set(tx_size, is_inter, fi.use_reduced_tx_set);
let do_rdo_tx_type = tx_set > TxSet::TX_SET_DCTONLY
&& fi.config.speed_settings.rdo_tx_decision
&& !is_inter
&& !skip;
if !do_rdo_tx_size && !do_rdo_tx_type {
return (best_tx_size, best_tx_type);
};
if cw_checkpoint.is_none() {
// Only runs on the first iteration of the loop.
// Avoids creating the checkpoint if we early exit above.
cw_checkpoint = Some(cw.checkpoint());
}
let tx_types =
if do_rdo_tx_type { RAV1E_TX_TYPES } else { &[TxType::DCT_DCT] };
// Luma plane transform type decision
let (tx_type, rd_cost) = rdo_tx_type_decision(
fi, ts, cw, luma_mode, ref_frames, mvs, bsize, tile_bo, tx_size, tx_set,
tx_types,
);
if rd_cost < best_rd {
best_tx_size = tx_size;
best_tx_type = tx_type;
best_rd = rd_cost;
}
debug_assert!(tx_size.width_log2() <= bsize.width_log2());
debug_assert!(tx_size.height_log2() <= bsize.height_log2());
debug_assert!(
tx_size.sqr() <= TxSize::TX_32X32 || tx_type == TxType::DCT_DCT
);
let next_tx_size = sub_tx_size_map[tx_size as usize];
cw.rollback(cw_checkpoint.as_ref().unwrap());
if next_tx_size == tx_size {
break;
} else {
tx_size = next_tx_size;
};
}
(best_tx_size, best_tx_type)
}
#[inline]
fn luma_chroma_mode_rdo<T: Pixel>(
luma_mode: PredictionMode, fi: &FrameInvariants<T>, bsize: BlockSize,
tile_bo: TileBlockOffset, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, rdo_type: RDOType,
cw_checkpoint: &ContextWriterCheckpoint, best: &mut PartitionParameters,
mvs: [MotionVector; 2], ref_frames: [RefType; 2],
mode_set_chroma: &[PredictionMode], luma_mode_is_intra: bool,
mode_context: usize, mv_stack: &ArrayVec<[CandidateMV; 9]>,
angle_delta: AngleDelta,
) {
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let is_chroma_block =
has_chroma(tile_bo, bsize, xdec, ydec, fi.sequence.chroma_sampling);
// Find the best chroma prediction mode for the current luma prediction mode
let mut chroma_rdo = |skip: bool| -> bool {
let mut zero_distortion = false;
// If skip is true or segmentation is turned off, sidx is not coded.
let sidx_range = if skip || !fi.enable_segmentation {
0..=0
} else if fi.base_q_idx as i16
+ ts.segmentation.data[2][SegLvl::SEG_LVL_ALT_Q as usize]
< 1
{
0..=1
} else {
0..=2
};
for sidx in sidx_range {
cw.bc.blocks.set_segmentation_idx(tile_bo, bsize, sidx);
let (tx_size, tx_type) = rdo_tx_size_type(
fi, ts, cw, bsize, tile_bo, luma_mode, ref_frames, mvs, skip,
);
for &chroma_mode in mode_set_chroma.iter() {
let wr = &mut WriterCounter::new();
let tell = wr.tell_frac();
if bsize >= BlockSize::BLOCK_8X8 && bsize.is_sqr() {
cw.write_partition(
wr,
tile_bo,
PartitionType::PARTITION_NONE,
bsize,
);
}
// TODO(yushin): luma and chroma would have different decision based on chroma format
let need_recon_pixel =
luma_mode_is_intra && tx_size.block_size() != bsize;
encode_block_pre_cdef(&fi.sequence, ts, cw, wr, bsize, tile_bo, skip);
let (has_coeff, tx_dist) = encode_block_post_cdef(
fi,
ts,
cw,
wr,
luma_mode,
chroma_mode,
angle_delta,
ref_frames,
mvs,
bsize,
tile_bo,
skip,
CFLParams::default(),
tx_size,
tx_type,
mode_context,
mv_stack,
rdo_type,
need_recon_pixel,
false,
);
let rate = wr.tell_frac() - tell;
let distortion = if fi.use_tx_domain_distortion && !need_recon_pixel {
compute_tx_distortion(
fi,
ts,
bsize,
is_chroma_block,
tile_bo,
tx_dist,
skip,
false,
)
} else {
compute_distortion(fi, ts, bsize, is_chroma_block, tile_bo, false)
};
let is_zero_dist = distortion.0 == 0;
let rd = compute_rd_cost(fi, rate, distortion);
if rd < best.rd_cost {
//if rd < best.rd_cost || luma_mode == PredictionMode::NEW_NEWMV {
best.rd_cost = rd;
best.pred_mode_luma = luma_mode;
best.pred_mode_chroma = chroma_mode;
best.angle_delta = angle_delta;
best.ref_frames = ref_frames;
best.mvs = mvs;
best.skip = skip;
best.has_coeff = has_coeff;
best.tx_size = tx_size;
best.tx_type = tx_type;
best.sidx = sidx;
zero_distortion = is_zero_dist;
}
cw.rollback(cw_checkpoint);
}
}
zero_distortion
};
// Don't skip when using intra modes
let zero_distortion =
if !luma_mode_is_intra { chroma_rdo(true) } else { false };
// early skip
if !zero_distortion {
chroma_rdo(false);
}
}
// RDO-based mode decision
pub fn rdo_mode_decision<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, bsize: BlockSize, tile_bo: TileBlockOffset,
pmv_idxs: (usize, usize), inter_cfg: &InterConfig,
) -> PartitionParameters {
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let cw_checkpoint = cw.checkpoint();
let rdo_type = if fi.use_tx_domain_rate {
RDOType::TxDistEstRate
} else if fi.use_tx_domain_distortion {
RDOType::TxDistRealRate
} else {
RDOType::PixelDistRealRate
};
let mut best = if fi.frame_type.has_inter() {
inter_frame_rdo_mode_decision(
fi,
ts,
cw,
bsize,
tile_bo,
pmv_idxs,
inter_cfg,
&cw_checkpoint,
rdo_type,
)
} else {
PartitionParameters::default()
};
let is_chroma_block =
has_chroma(tile_bo, bsize, xdec, ydec, fi.sequence.chroma_sampling);
if !best.skip {
best = intra_frame_rdo_mode_decision(
fi,
ts,
cw,
bsize,
tile_bo,
&cw_checkpoint,
rdo_type,
best,
is_chroma_block,
);
}
if best.pred_mode_luma.is_intra() && is_chroma_block && bsize.cfl_allowed() {
cw.bc.blocks.set_segmentation_idx(tile_bo, bsize, best.sidx);
let chroma_mode = PredictionMode::UV_CFL_PRED;
let cw_checkpoint = cw.checkpoint();
let wr: &mut dyn Writer = &mut WriterCounter::new();
let angle_delta = AngleDelta { y: best.angle_delta.y, uv: 0 };
write_tx_blocks(
fi,
ts,
cw,
wr,
best.pred_mode_luma,
best.pred_mode_luma,
angle_delta,
tile_bo,
bsize,
best.tx_size,
best.tx_type,
false,
CFLParams::default(),
true,
rdo_type,
true,
);
cw.rollback(&cw_checkpoint);
if fi.sequence.chroma_sampling != ChromaSampling::Cs400 {
if let Some(cfl) = rdo_cfl_alpha(ts, tile_bo, bsize, fi) {
let wr: &mut dyn Writer = &mut WriterCounter::new();
let tell = wr.tell_frac();
encode_block_pre_cdef(
&fi.sequence,
ts,
cw,
wr,
bsize,
tile_bo,
best.skip,
);
let (has_coeff, _) = encode_block_post_cdef(
fi,
ts,
cw,
wr,
best.pred_mode_luma,
chroma_mode,
angle_delta,
best.ref_frames,
best.mvs,
bsize,
tile_bo,
best.skip,
cfl,
best.tx_size,
best.tx_type,
0,
&[],
rdo_type,
true, // For CFL, luma should be always reconstructed.
false,
);
let rate = wr.tell_frac() - tell;
// For CFL, tx-domain distortion is not an option.
let distortion =
compute_distortion(fi, ts, bsize, is_chroma_block, tile_bo, false);
let rd = compute_rd_cost(fi, rate, distortion);
if rd < best.rd_cost {
best.rd_cost = rd;
best.pred_mode_chroma = chroma_mode;
best.angle_delta = angle_delta;
best.has_coeff = has_coeff;
best.pred_cfl_params = cfl;
}
cw.rollback(&cw_checkpoint);
}
}
}
cw.bc.blocks.set_mode(tile_bo, bsize, best.pred_mode_luma);
cw.bc.blocks.set_ref_frames(tile_bo, bsize, best.ref_frames);
cw.bc.blocks.set_motion_vectors(tile_bo, bsize, best.mvs);
assert!(best.rd_cost >= 0_f64);
PartitionParameters {
bo: tile_bo,
bsize,
pred_mode_luma: best.pred_mode_luma,
pred_mode_chroma: best.pred_mode_chroma,
pred_cfl_params: best.pred_cfl_params,
angle_delta: best.angle_delta,
ref_frames: best.ref_frames,
mvs: best.mvs,
rd_cost: best.rd_cost,
skip: best.skip,
has_coeff: best.has_coeff,
tx_size: best.tx_size,
tx_type: best.tx_type,
sidx: best.sidx,
}
}
fn inter_frame_rdo_mode_decision<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, bsize: BlockSize, tile_bo: TileBlockOffset,
pmv_idxs: (usize, usize), inter_cfg: &InterConfig,
cw_checkpoint: &ContextWriterCheckpoint, rdo_type: RDOType,
) -> PartitionParameters {
let mut best = PartitionParameters::default();
// we can never have more than 7 reference frame sets
let mut ref_frames_set = ArrayVec::<[_; 7]>::new();
// again, max of 7 ref slots
let mut ref_slot_set = ArrayVec::<[_; 7]>::new();
// our implementation never returns more than 3 at the moment
let mut mvs_from_me = ArrayVec::<[_; 3]>::new();
let mut fwdref = None;
let mut bwdref = None;
for i in inter_cfg.allowed_ref_frames().iter().copied() {
// Don't search LAST3 since it's used only for probs
if i == LAST3_FRAME {
continue;
}
if !ref_slot_set.contains(&fi.ref_frames[i.to_index()]) {
if fwdref == None && i.is_fwd_ref() {
fwdref = Some(ref_frames_set.len());
}
if bwdref == None && i.is_bwd_ref() {
bwdref = Some(ref_frames_set.len());
}
ref_frames_set.push([i, NONE_FRAME]);
let slot_idx = fi.ref_frames[i.to_index()];
ref_slot_set.push(slot_idx);
}
}
assert!(!ref_frames_set.is_empty());
let mut inter_mode_set = ArrayVec::<[(PredictionMode, usize); 20]>::new();
let mut mvs_set = ArrayVec::<[[MotionVector; 2]; 20]>::new();
let mut satds = ArrayVec::<[u32; 20]>::new();
let mut mv_stacks = ArrayVec::<[_; 20]>::new();
let mut mode_contexts = ArrayVec::<[_; 7]>::new();
let pmvs = ts.half_res_pmvs[pmv_idxs.0][pmv_idxs.1];
let motion_estimation = if fi.config.speed_settings.diamond_me {
crate::me::DiamondSearch::motion_estimation
} else {
crate::me::FullSearch::motion_estimation
};
for (i, &ref_frames) in ref_frames_set.iter().enumerate() {
let mut mv_stack = ArrayVec::<[CandidateMV; 9]>::new();
mode_contexts.push(cw.find_mvrefs(
tile_bo,
ref_frames,
&mut mv_stack,
bsize,
fi,
false,
));
let mut pmv = [MotionVector::default(); 2];
if !mv_stack.is_empty() {
pmv[0] = mv_stack[0].this_mv;
}
if mv_stack.len() > 1 {
pmv[1] = mv_stack[1].this_mv;
}
let ref_slot = ref_slot_set[i] as usize;
let cmv = pmvs[ref_slot].unwrap_or_else(Default::default);
let b_me =
motion_estimation(fi, ts, bsize, tile_bo, ref_frames[0], cmv, pmv);
if !fi.config.speed_settings.encode_bottomup
&& (bsize == BlockSize::BLOCK_32X32 || bsize == BlockSize::BLOCK_64X64)
{
ts.half_res_pmvs[pmv_idxs.0][pmv_idxs.1][ref_slot] = Some(b_me);
};
mvs_from_me.push([b_me, MotionVector::default()]);
for &x in RAV1E_INTER_MODES_MINIMAL {
inter_mode_set.push((x, i));
}
if !mv_stack.is_empty() {
inter_mode_set.push((PredictionMode::NEAR0MV, i));
}
if mv_stack.len() >= 2 {
inter_mode_set.push((PredictionMode::GLOBALMV, i));
}
let include_near_mvs = fi.config.speed_settings.include_near_mvs;
if include_near_mvs {
if mv_stack.len() >= 3 {
inter_mode_set.push((PredictionMode::NEAR1MV, i));
}
if mv_stack.len() >= 4 {
inter_mode_set.push((PredictionMode::NEAR2MV, i));
}
}
let same_row_col = |x: &CandidateMV| {
x.this_mv.row == mvs_from_me[i][0].row
&& x.this_mv.col == mvs_from_me[i][0].col
};
if !mv_stack
.iter()
.take(if include_near_mvs { 4 } else { 2 })
.any(same_row_col)
&& (mvs_from_me[i][0].row != 0 || mvs_from_me[i][0].col != 0)
{
inter_mode_set.push((PredictionMode::NEWMV, i));
}
mv_stacks.push(mv_stack);
}
let sz = bsize.width_mi().min(bsize.height_mi());
// To use non single reference modes, block width and height must be greater than 4.
if fi.reference_mode != ReferenceMode::SINGLE && sz >= 2 {
// Adding compound candidate
if let Some(r0) = fwdref {
if let Some(r1) = bwdref {
let ref_frames = [ref_frames_set[r0][0], ref_frames_set[r1][0]];
ref_frames_set.push(ref_frames);
let mv0 = mvs_from_me[r0][0];
let mv1 = mvs_from_me[r1][0];
mvs_from_me.push([mv0, mv1]);
let mut mv_stack = ArrayVec::<[CandidateMV; 9]>::new();
mode_contexts.push(cw.find_mvrefs(
tile_bo,
ref_frames,
&mut mv_stack,
bsize,
fi,
true,
));
for &x in RAV1E_INTER_COMPOUND_MODES {
inter_mode_set.push((x, ref_frames_set.len() - 1));
}
mv_stacks.push(mv_stack);
}
}
}
let num_modes_rdo = if fi.config.speed_settings.prediction_modes
>= PredictionModesSetting::ComplexAll
{
inter_mode_set.len()
} else {
9 // This number is determined by AWCY test
};
inter_mode_set.iter().for_each(|&(luma_mode, i)| {
let mvs = match luma_mode {
PredictionMode::NEWMV | PredictionMode::NEW_NEWMV => mvs_from_me[i],
PredictionMode::NEARESTMV | PredictionMode::NEAREST_NEARESTMV => {
if !mv_stacks[i].is_empty() {
[mv_stacks[i][0].this_mv, mv_stacks[i][0].comp_mv]
} else {
[MotionVector::default(); 2]
}
}
PredictionMode::NEAR0MV | PredictionMode::NEAR_NEARMV => {
if mv_stacks[i].len() > 1 {
[mv_stacks[i][1].this_mv, mv_stacks[i][1].comp_mv]
} else {
[MotionVector::default(); 2]
}
}
PredictionMode::NEAR1MV | PredictionMode::NEAR2MV => [
mv_stacks[i]
[luma_mode as usize - PredictionMode::NEAR0MV as usize + 1]
.this_mv,
mv_stacks[i]
[luma_mode as usize - PredictionMode::NEAR0MV as usize + 1]
.comp_mv,
],
PredictionMode::NEAREST_NEWMV => {
[mv_stacks[i][0].this_mv, mvs_from_me[i][1]]
}
PredictionMode::NEW_NEARESTMV => {
[mvs_from_me[i][0], mv_stacks[i][0].comp_mv]
}
PredictionMode::GLOBALMV | PredictionMode::GLOBAL_GLOBALMV => {
[MotionVector::default(); 2]
}
_ => {
unimplemented!();
}
};
mvs_set.push(mvs);
// Calculate SATD for each mode
if num_modes_rdo != inter_mode_set.len() {
let tile_rect = ts.tile_rect();
let rec = &mut ts.rec.planes[0];
let po = tile_bo.plane_offset(rec.plane_cfg);
let mut rec_region =
rec.subregion_mut(Area::BlockStartingAt { bo: tile_bo.0 });
luma_mode.predict_inter(
fi,
tile_rect,
0,
po,
&mut rec_region,
bsize.width(),
bsize.height(),
ref_frames_set[i],
mvs,
);
let plane_org = ts.input_tile.planes[0]
.subregion(Area::BlockStartingAt { bo: tile_bo.0 });
let plane_ref = rec_region.as_const();
let satd = get_satd(
&plane_org,
&plane_ref,
bsize,
fi.sequence.bit_depth,
fi.cpu_feature_level,
);
satds.push(satd);
} else {
satds.push(0);
}
});
let mut sorted = izip!(inter_mode_set, mvs_set, satds).collect::<Vec<_>>();
if num_modes_rdo != sorted.len() {
sorted.sort_by_key(|((_mode, _i), _mvs, satd)| *satd);
}
sorted.iter().take(num_modes_rdo).for_each(
|&((luma_mode, i), mvs, _satd)| {
let mode_set_chroma = ArrayVec::from([luma_mode]);
luma_chroma_mode_rdo(
luma_mode,
fi,
bsize,
tile_bo,
ts,
cw,
rdo_type,
cw_checkpoint,
&mut best,
mvs,
ref_frames_set[i],
&mode_set_chroma,
false,
mode_contexts[i],
&mv_stacks[i],
AngleDelta::default(),
);
},
);
best
}
fn intra_frame_rdo_mode_decision<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, bsize: BlockSize, tile_bo: TileBlockOffset,
cw_checkpoint: &ContextWriterCheckpoint, rdo_type: RDOType,
mut best: PartitionParameters, is_chroma_block: bool,
) -> PartitionParameters {
let num_modes_rdo: usize;
let mut modes = ArrayVec::<[_; INTRA_MODES]>::new();
// Reduce number of prediction modes at higher speed levels
num_modes_rdo = if (fi.frame_type == FrameType::KEY
&& fi.config.speed_settings.prediction_modes
>= PredictionModesSetting::ComplexKeyframes)
|| (fi.frame_type.has_inter()
&& fi.config.speed_settings.prediction_modes
>= PredictionModesSetting::ComplexAll)
{
7
} else {
3
};
let intra_mode_set = RAV1E_INTRA_MODES;
// Find mode with lowest rate cost
{
let probs_all = if fi.frame_type.has_inter() {
cw.get_cdf_intra_mode(bsize)
} else {
cw.get_cdf_intra_mode_kf(tile_bo)
}
.iter()
.take(INTRA_MODES)
.scan(32768, |z, &a| {
let d = *z - a;
*z = a;
Some(!d)
})
.collect::<ArrayVec<[_; INTRA_MODES]>>();
modes.try_extend_from_slice(intra_mode_set).unwrap();
modes.sort_by_key(|&a| probs_all[a as usize]);
}
// If tx partition (i.e. fi.tx_mode_select) is enabled, the below intra prediction screening
// may be improved by emulating prediction for each tx block.
{
let satds = {
// FIXME: If tx partition is used, this whole sads block should be fixed
let tx_size = bsize.tx_size();
let edge_buf = {
let rec = &ts.rec.planes[0].as_const();
let po = tile_bo.plane_offset(rec.plane_cfg);
// FIXME: If tx partition is used, get_intra_edges() should be called for each tx block
get_intra_edges(
rec,
tile_bo,
0,
0,
bsize,
po,
tx_size,
fi.sequence.bit_depth,
None,
fi.sequence.enable_intra_edge_filter,
IntraParam::None,
)
};
let ief_params = if fi.sequence.enable_intra_edge_filter {
let above_block_info = ts.above_block_info(tile_bo, 0);
let left_block_info = ts.left_block_info(tile_bo, 0);
Some(IntraEdgeFilterParameters::new(
0,
above_block_info,
left_block_info,
))
} else {
None
};
let mut satds_all = [0; INTRA_MODES];
for &luma_mode in modes.iter().skip(num_modes_rdo / 2) {
let tile_rect = ts.tile_rect();
let rec = &mut ts.rec.planes[0];
let mut rec_region =
rec.subregion_mut(Area::BlockStartingAt { bo: tile_bo.0 });
// FIXME: If tx partition is used, luma_mode.predict_intra() should be called for each tx block
luma_mode.predict_intra(
tile_rect,
&mut rec_region,
tx_size,
fi.sequence.bit_depth,
&[0i16; 2],
IntraParam::None,
if luma_mode.is_directional() { ief_params } else { None },
&edge_buf,
fi.cpu_feature_level,
);
let plane_org = ts.input_tile.planes[0]
.subregion(Area::BlockStartingAt { bo: tile_bo.0 });
let plane_ref = rec_region.as_const();
satds_all[luma_mode as usize] = get_satd(
&plane_org,
&plane_ref,
tx_size.block_size(),
fi.sequence.bit_depth,
fi.cpu_feature_level,
);
}
satds_all
};
modes[num_modes_rdo / 2..].sort_by_key(|&a| satds[a as usize]);
}
debug_assert!(num_modes_rdo >= 1);
modes.iter().take(num_modes_rdo).for_each(|&luma_mode| {
let mvs = [MotionVector::default(); 2];
let ref_frames = [INTRA_FRAME, NONE_FRAME];
let mut mode_set_chroma = ArrayVec::<[_; 2]>::new();
mode_set_chroma.push(luma_mode);
if is_chroma_block && luma_mode != PredictionMode::DC_PRED {
mode_set_chroma.push(PredictionMode::DC_PRED);
}
luma_chroma_mode_rdo(
luma_mode,
fi,
bsize,
tile_bo,
ts,
cw,
rdo_type,
cw_checkpoint,
&mut best,
mvs,
ref_frames,
&mode_set_chroma,
true,
0,
&ArrayVec::<[CandidateMV; 9]>::new(),
AngleDelta::default(),
);
});
if fi.config.speed_settings.fine_directional_intra
&& bsize >= BlockSize::BLOCK_8X8
{
// Find the best angle delta for the current best prediction mode
let luma_deltas = best.pred_mode_luma.angle_delta_count();
let chroma_deltas = best.pred_mode_chroma.angle_delta_count();
let mvs = [MotionVector::default(); 2];
let ref_frames = [INTRA_FRAME, NONE_FRAME];
let mode_set_chroma = [best.pred_mode_chroma];
let mv_stack = ArrayVec::<[_; 9]>::new();
let mut best_angle_delta_y = best.angle_delta.y;
let mut angle_delta_rdo = |y, uv| -> i8 {
if best.angle_delta.y != y || best.angle_delta.uv != uv {
luma_chroma_mode_rdo(
best.pred_mode_luma,
fi,
bsize,
tile_bo,
ts,
cw,
rdo_type,
cw_checkpoint,
&mut best,
mvs,
ref_frames,
&mode_set_chroma,
true,
0,
&mv_stack,
AngleDelta { y, uv },
);
}
best.angle_delta.y
};
for i in 0..luma_deltas {
let angle_delta_y =
if luma_deltas == 1 { 0 } else { i - MAX_ANGLE_DELTA as i8 };
let angle_delta_uv = if chroma_deltas == 1 { 0 } else { angle_delta_y };
best_angle_delta_y = angle_delta_rdo(angle_delta_y, angle_delta_uv);
}
for j in 0..chroma_deltas {
let angle_delta_uv =
if chroma_deltas == 1 { 0 } else { j - MAX_ANGLE_DELTA as i8 };
angle_delta_rdo(best_angle_delta_y, angle_delta_uv);
}
}
best
}
pub fn rdo_cfl_alpha<T: Pixel>(
ts: &mut TileStateMut<'_, T>, tile_bo: TileBlockOffset, bsize: BlockSize,
fi: &FrameInvariants<T>,
) -> Option<CFLParams> {
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let uv_tx_size = bsize.largest_chroma_tx_size(xdec, ydec);
debug_assert!(bsize.subsampled_size(xdec, ydec) == uv_tx_size.block_size());
let mut ac: Aligned<[i16; 32 * 32]> = Aligned::uninitialized();
luma_ac(&mut ac.data, ts, tile_bo, bsize);
let best_alpha: ArrayVec<[i16; 2]> = (1..3)
.map(|p| {
let &PlaneConfig { xdec, ydec, .. } = ts.rec.planes[p].plane_cfg;
let tile_rect = ts.tile_rect().decimated(xdec, ydec);
let rec = &mut ts.rec.planes[p];
let input = &ts.input_tile.planes[p];
let po = tile_bo.plane_offset(rec.plane_cfg);
let edge_buf = get_intra_edges(
&rec.as_const(),
tile_bo,
0,
0,
bsize,
po,
uv_tx_size,
fi.sequence.bit_depth,
Some(PredictionMode::UV_CFL_PRED),
fi.sequence.enable_intra_edge_filter,
IntraParam::None,
);
let mut alpha_cost = |alpha: i16| -> u64 {
let mut rec_region =
rec.subregion_mut(Area::BlockStartingAt { bo: tile_bo.0 });
PredictionMode::UV_CFL_PRED.predict_intra(
tile_rect,
&mut rec_region,
uv_tx_size,
fi.sequence.bit_depth,
&ac.data,
IntraParam::Alpha(alpha),
None,
&edge_buf,
fi.cpu_feature_level,
);
sse_wxh(
&input.subregion(Area::BlockStartingAt { bo: tile_bo.0 }),
&rec_region.as_const(),
uv_tx_size.width(),
uv_tx_size.height(),
|_, _| DistortionScale::default(), // We're not doing RDO here.
)
.0
};
let mut best = (alpha_cost(0), 0);
let mut count = 2;
for alpha in 1i16..=16i16 {
let cost = (alpha_cost(alpha), alpha_cost(-alpha));
if cost.0 < best.0 {
best = (cost.0, alpha);
count += 2;
}
if cost.1 < best.0 {
best = (cost.1, -alpha);
count += 2;
}
if count < alpha {
break;
}
}
best.1
})
.collect();
if best_alpha[0] == 0 && best_alpha[1] == 0 {
None
} else {
Some(CFLParams::from_alpha(best_alpha[0], best_alpha[1]))
}
}
// RDO-based transform type decision
pub fn rdo_tx_type_decision<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, mode: PredictionMode, ref_frames: [RefType; 2],
mvs: [MotionVector; 2], bsize: BlockSize, tile_bo: TileBlockOffset,
tx_size: TxSize, tx_set: TxSet, tx_types: &[TxType],
) -> (TxType, f64) {
let mut best_type = TxType::DCT_DCT;
let mut best_rd = std::f64::MAX;
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let is_chroma_block =
has_chroma(tile_bo, bsize, xdec, ydec, fi.sequence.chroma_sampling);
let is_inter = !mode.is_intra();
let cw_checkpoint = cw.checkpoint();
let rdo_type = if fi.use_tx_domain_distortion {
RDOType::TxDistRealRate
} else {
RDOType::PixelDistRealRate
};
let need_recon_pixel = tx_size.block_size() != bsize && !is_inter;
for &tx_type in tx_types {
// Skip unsupported transform types
if av1_tx_used[tx_set as usize][tx_type as usize] == 0 {
continue;
}
if is_inter {
motion_compensate(
fi, ts, cw, mode, ref_frames, mvs, bsize, tile_bo, true,
);
}
let wr: &mut dyn Writer = &mut WriterCounter::new();
let tell = wr.tell_frac();
let (_, tx_dist) = if is_inter {
write_tx_tree(
fi,
ts,
cw,
wr,
mode,
0,
tile_bo,
bsize,
tx_size,
tx_type,
false,
true,
rdo_type,
need_recon_pixel,
)
} else {
write_tx_blocks(
fi,
ts,
cw,
wr,
mode,
mode,
AngleDelta::default(),
tile_bo,
bsize,
tx_size,
tx_type,
false,
CFLParams::default(), // Unused.
true,
rdo_type,
need_recon_pixel,
)
};
let rate = wr.tell_frac() - tell;
let distortion = if fi.use_tx_domain_distortion {
compute_tx_distortion(
fi,
ts,
bsize,
is_chroma_block,
tile_bo,
tx_dist,
false,
true,
)
} else {
compute_distortion(fi, ts, bsize, is_chroma_block, tile_bo, true)
};
let rd = compute_rd_cost(fi, rate, distortion);
if rd < best_rd {
best_rd = rd;
best_type = tx_type;
}
cw.rollback(&cw_checkpoint);
}
assert!(best_rd >= 0_f64);
(best_type, best_rd)
}
pub fn get_sub_partitions(
four_partitions: &[TileBlockOffset; 4], partition: PartitionType,
) -> ArrayVec<[TileBlockOffset; 4]> {
let mut partition_offsets = ArrayVec::<[TileBlockOffset; 4]>::new();
partition_offsets.push(four_partitions[0]);
if partition == PARTITION_NONE {
return partition_offsets;
}
if partition == PARTITION_VERT || partition == PARTITION_SPLIT {
partition_offsets.push(four_partitions[1]);
};
if partition == PARTITION_HORZ || partition == PARTITION_SPLIT {
partition_offsets.push(four_partitions[2]);
};
if partition == PARTITION_SPLIT {
partition_offsets.push(four_partitions[3]);
};
partition_offsets
}
pub fn get_sub_partitions_with_border_check(
four_partitions: &[TileBlockOffset; 4], partition: PartitionType,
mi_width: usize, mi_height: usize, subsize: BlockSize,
) -> ArrayVec<[TileBlockOffset; 4]> {
let mut partition_offsets = ArrayVec::<[TileBlockOffset; 4]>::new();
partition_offsets.push(four_partitions[0]);
if partition == PARTITION_NONE {
return partition_offsets;
}
let hbsw = subsize.width_mi(); // Half the block size width in blocks
let hbsh = subsize.height_mi(); // Half the block size height in blocks
if (partition == PARTITION_VERT || partition == PARTITION_SPLIT)
&& four_partitions[1].0.x + hbsw <= mi_width
&& four_partitions[1].0.y + hbsh <= mi_height
{
partition_offsets.push(four_partitions[1]);
};
if (partition == PARTITION_HORZ || partition == PARTITION_SPLIT)
&& four_partitions[2].0.x + hbsw <= mi_width
&& four_partitions[2].0.y + hbsh <= mi_height
{
partition_offsets.push(four_partitions[2]);
};
if partition == PARTITION_SPLIT
&& four_partitions[3].0.x + hbsw <= mi_width
&& four_partitions[3].0.y + hbsh <= mi_height
{
partition_offsets.push(four_partitions[3]);
};
partition_offsets
}
#[inline(always)]
fn rdo_partition_none<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, bsize: BlockSize, tile_bo: TileBlockOffset,
pmv_idx: usize, inter_cfg: &InterConfig,
child_modes: &mut ArrayVec<[PartitionParameters; 4]>,
) -> Option<f64> {
let pmv_inner_idx = if bsize > BlockSize::BLOCK_32X32 {
0
} else {
((tile_bo.0.x & 32) >> 5) + ((tile_bo.0.y & 32) >> 4) + 1
};
let mode = rdo_mode_decision(
fi,
ts,
cw,
bsize,
tile_bo,
(pmv_idx, pmv_inner_idx),
inter_cfg,
);
let cost = mode.rd_cost;
child_modes.push(mode);
Some(cost)
}
// VERTICAL, HORIZONTAL or simple SPLIT
#[inline(always)]
fn rdo_partition_simple<T: Pixel, W: Writer>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, w_pre_cdef: &mut W, w_post_cdef: &mut W,
bsize: BlockSize, tile_bo: TileBlockOffset, pmv_idx: usize,
inter_cfg: &InterConfig, partition: PartitionType, rdo_type: RDOType,
best_rd: f64, child_modes: &mut ArrayVec<[PartitionParameters; 4]>,
) -> Option<f64> {
let subsize = bsize.subsize(partition);
debug_assert!(subsize != BlockSize::BLOCK_INVALID);
let cost = if bsize >= BlockSize::BLOCK_8X8 {
let w: &mut W = if cw.bc.cdef_coded { w_post_cdef } else { w_pre_cdef };
let tell = w.tell_frac();
cw.write_partition(w, tile_bo, partition, bsize);
compute_rd_cost(fi, w.tell_frac() - tell, ScaledDistortion::zero())
} else {
0.0
};
//pmv = best_pred_modes[0].mvs[0];
// assert!(best_pred_modes.len() <= 4);
let hbsw = subsize.width_mi(); // Half the block size width in blocks
let hbsh = subsize.height_mi(); // Half the block size height in blocks
let four_partitions = [
tile_bo,
TileBlockOffset(BlockOffset {
x: tile_bo.0.x + hbsw as usize,
y: tile_bo.0.y,
}),
TileBlockOffset(BlockOffset {
x: tile_bo.0.x,
y: tile_bo.0.y + hbsh as usize,
}),
TileBlockOffset(BlockOffset {
x: tile_bo.0.x + hbsw as usize,
y: tile_bo.0.y + hbsh as usize,
}),
];
let partitions = get_sub_partitions_with_border_check(
&four_partitions,
partition,
ts.mi_width,
ts.mi_height,
subsize,
);
let pmv_idxs = partitions
.iter()
.map(|&offset| {
if subsize > BlockSize::BLOCK_32X32 {
0
} else {
((offset.0.x & 32) >> 5) + ((offset.0.y & 32) >> 4) + 1
}
})
.collect::<ArrayVec<[_; 4]>>();
let mut rd_cost_sum = 0.0;
for (&offset, pmv_inner_idx) in partitions.iter().zip(pmv_idxs) {
let mode_decision = rdo_mode_decision(
fi,
ts,
cw,
subsize,
offset,
(pmv_idx, pmv_inner_idx),
inter_cfg,
);
rd_cost_sum += mode_decision.rd_cost;
if fi.enable_early_exit && rd_cost_sum > best_rd {
return None;
}
if subsize >= BlockSize::BLOCK_8X8 && subsize.is_sqr() {
let w: &mut W = if cw.bc.cdef_coded { w_post_cdef } else { w_pre_cdef };
cw.write_partition(w, offset, PartitionType::PARTITION_NONE, subsize);
}
encode_block_with_modes(
fi,
ts,
cw,
w_pre_cdef,
w_post_cdef,
subsize,
offset,
&mode_decision,
rdo_type,
false,
);
child_modes.push(mode_decision);
}
Some(cost + rd_cost_sum)
}
// RDO-based single level partitioning decision
pub fn rdo_partition_decision<T: Pixel, W: Writer>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, w_pre_cdef: &mut W, w_post_cdef: &mut W,
bsize: BlockSize, tile_bo: TileBlockOffset,
cached_block: &PartitionGroupParameters, pmv_idx: usize,
partition_types: &[PartitionType], rdo_type: RDOType,
inter_cfg: &InterConfig,
) -> PartitionGroupParameters {
let mut best_partition = cached_block.part_type;
let mut best_rd = cached_block.rd_cost;
let mut best_pred_modes = cached_block.part_modes.clone();
let cw_checkpoint = cw.checkpoint();
let w_pre_checkpoint = w_pre_cdef.checkpoint();
let w_post_checkpoint = w_post_cdef.checkpoint();
for &partition in partition_types {
// Do not re-encode results we already have
if partition == cached_block.part_type {
continue;
}
let mut child_modes = ArrayVec::<[_; 4]>::new();
let cost = match partition {
PARTITION_NONE if bsize <= BlockSize::BLOCK_64X64 => rdo_partition_none(
fi,
ts,
cw,
bsize,
tile_bo,
pmv_idx,
inter_cfg,
&mut child_modes,
),
PARTITION_SPLIT | PARTITION_HORZ | PARTITION_VERT => {
rdo_partition_simple(
fi,
ts,
cw,
w_pre_cdef,
w_post_cdef,
bsize,
tile_bo,
pmv_idx,
inter_cfg,
partition,
rdo_type,
best_rd,
&mut child_modes,
)
}
_ => {
unreachable!();
}
};
if let Some(rd) = cost {
if rd < best_rd {
best_rd = rd;
best_partition = partition;
best_pred_modes = child_modes.clone();
}
}
cw.rollback(&cw_checkpoint);
w_pre_cdef.rollback(&w_pre_checkpoint);
w_post_cdef.rollback(&w_post_checkpoint);
}
assert!(best_rd >= 0_f64);
PartitionGroupParameters {
rd_cost: best_rd,
part_type: best_partition,
part_modes: best_pred_modes,
}
}
fn rdo_loop_plane_error<T: Pixel>(
base_sbo: TileSuperBlockOffset, offset_sbo: TileSuperBlockOffset,
sb_w: usize, sb_h: usize, fi: &FrameInvariants<T>, ts: &TileStateMut<'_, T>,
blocks: &TileBlocks<'_>, test: &Frame<u16>, src: &Frame<u16>, pli: usize,
) -> ScaledDistortion {
let sb_w_blocks =
if fi.sequence.use_128x128_superblock { 16 } else { 8 } * sb_w;
let sb_h_blocks =
if fi.sequence.use_128x128_superblock { 16 } else { 8 } * sb_h;
// Each direction block is 8x8 in y, potentially smaller if subsampled in chroma
// accumulating in-frame and unpadded
let mut err = Distortion::zero();
for by in 0..sb_h_blocks {
for bx in 0..sb_w_blocks {
let loop_bo = offset_sbo.block_offset(bx << 1, by << 1);
if loop_bo.0.x < blocks.cols() && loop_bo.0.y < blocks.rows() {
let src_plane = &src.planes[pli];
let test_plane = &test.planes[pli];
let PlaneConfig { xdec, ydec, .. } = src_plane.cfg;
debug_assert_eq!(xdec, test_plane.cfg.xdec);
debug_assert_eq!(ydec, test_plane.cfg.ydec);
// Unfortunately, our distortion biases are only available via
// Frame-absolute addressing, so we need a block offset
// relative to the full frame origin (not the tile or analysis
// area)
let frame_bo = (base_sbo + offset_sbo).block_offset(bx << 1, by << 1);
let bias = distortion_scale(
fi,
ts.to_frame_block_offset(frame_bo),
BlockSize::BLOCK_8X8,
);
let src_region =
src_plane.region(Area::BlockStartingAt { bo: loop_bo.0 });
let test_region =
test_plane.region(Area::BlockStartingAt { bo: loop_bo.0 });
err += if pli == 0 {
// For loop filters, We intentionally use cdef_dist even with
// `--tune Psnr`. Using SSE instead gives no PSNR gain but has a
// significant negative impact on other metrics and visual quality.
cdef_dist_wxh_8x8(&src_region, &test_region, fi.sequence.bit_depth)
* bias
} else {
sse_wxh(&src_region, &test_region, 8 >> xdec, 8 >> ydec, |_, _| bias)
};
}
}
}
err * fi.dist_scale[pli]
}
// Passed in a superblock offset representing the upper left corner of
// the LRU area we're optimizing. This area covers the largest LRU in
// any of the present planes, but may consist of a number of
// superblocks and full, smaller LRUs in the other planes
pub fn rdo_loop_decision<T: Pixel>(
base_sbo: TileSuperBlockOffset, fi: &FrameInvariants<T>,
ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer,
deblock_p: bool,
) {
let planes = if fi.sequence.chroma_sampling == ChromaSampling::Cs400 {
1
} else {
MAX_PLANES
};
assert!(fi.sequence.enable_cdef || fi.sequence.enable_restoration);
// Determine area of optimization: Which plane has the largest LRUs?
// How many LRUs for each?
let mut sb_w = 1; // how many superblocks wide the largest LRU
// is/how many SBs we're processing (same thing)
let mut sb_h = 1; // how many superblocks wide the largest LRU
// is/how many SBs we're processing (same thing)
let mut lru_w = [0; MAX_PLANES]; // how many LRUs we're processing
let mut lru_h = [0; MAX_PLANES]; // how many LRUs we're processing
for pli in 0..planes {
let sb_h_shift = ts.restoration.planes[pli].rp_cfg.sb_h_shift;
let sb_v_shift = ts.restoration.planes[pli].rp_cfg.sb_v_shift;
if sb_w < (1 << sb_h_shift) {
sb_w = 1 << sb_h_shift;
}
if sb_h < (1 << sb_v_shift) {
sb_h = 1 << sb_v_shift;
}
}
for pli in 0..planes {
let sb_h_shift = ts.restoration.planes[pli].rp_cfg.sb_h_shift;
let sb_v_shift = ts.restoration.planes[pli].rp_cfg.sb_v_shift;
lru_w[pli] = sb_w / (1 << sb_h_shift);
lru_h[pli] = sb_h / (1 << sb_v_shift);
}
// The superblock width/height determinations may be calling for us
// to compute over superblocks that do not actually exist in the
// frame (off the right or lower edge). Trim sb width/height down
// to actual superblocks. Note that these last superblocks on the
// right/bottom may themselves still span the edge of the frame, but
// they do hold at least some visible pixels.
sb_w = sb_w.min(ts.sb_width - base_sbo.0.x);
sb_h = sb_h.min(ts.sb_height - base_sbo.0.y);
// We have need to know the Y visible pixel limits as well (the
// sb_w/sb_h figures above can be used to determine how many
// allocated pixels, possibly beyond the visible frame, exist).
let crop_w =
fi.width - ((ts.sbo.0.x + base_sbo.0.x) << SUPERBLOCK_TO_PLANE_SHIFT);
let crop_h =
fi.height - ((ts.sbo.0.y + base_sbo.0.y) << SUPERBLOCK_TO_PLANE_SHIFT);
let pixel_w = crop_w.min(sb_w << SUPERBLOCK_TO_PLANE_SHIFT);
let pixel_h = crop_h.min(sb_h << SUPERBLOCK_TO_PLANE_SHIFT);
// Based on `RestorationState::new`
const MAX_SB_SHIFT: usize = 4;
const MAX_SB_SIZE: usize = 1 << MAX_SB_SHIFT;
const MAX_LRU_SIZE: usize = MAX_SB_SIZE;
// Static allocation relies on the "minimal LRU area for all N planes" invariant.
let mut best_index = [-1; MAX_SB_SIZE * MAX_SB_SIZE];
let mut best_lrf =
[[RestorationFilter::None; MAX_PLANES]; MAX_LRU_SIZE * MAX_LRU_SIZE];
// due to imprecision in the reconstruction parameter solver, we
// need to make sure we don't fall into a limit cycle. Track our
// best cost at LRF so that we can break if we get a solution that doesn't
// improve at the reconstruction stage.
let mut best_lrf_cost = [[-1.0; MAX_PLANES]; MAX_LRU_SIZE * MAX_LRU_SIZE];
// Loop filter RDO is an iterative process and we need temporary
// scratch data to hold the results of deblocking, cdef, and the
// loop reconstruction filter so that each can be partially updated
// without recomputing the entire stack. Construct
// largest-LRU-sized frames for each, accounting for padding
// required by deblocking, cdef and [optionally] LR.
// NOTE: the CDEF code requires padding to simplify addressing.
// Right now, the padded area does borrow neighboring pixels for the
// border so long as they're within the tile [as opposed to simply
// flagging the border pixels as inactive]. LR code currently does
// not need and will not use padding area. It always edge-extends
// the passed in rectangle.
let mut rec_subset = {
let const_rec = ts.rec.as_const();
// a padding of 8 gets us a full block of border. CDEF
// only needs 2 pixels, but deblocking is happier with full
// blocks.
cdef_padded_tile_copy(
&const_rec,
base_sbo,
(pixel_w + 7) >> 3,
(pixel_h + 7) >> 3,
8,
planes,
)
};
// sub-setted region of the TileBlocks for our working frame area
let mut tileblocks_subset = cw.bc.blocks.subregion(
base_sbo.block_offset(0, 0).0.x,
base_sbo.block_offset(0, 0).0.y,
sb_w << SUPERBLOCK_TO_BLOCK_SHIFT,
sb_h << SUPERBLOCK_TO_BLOCK_SHIFT,
);
// why copy and not just a view? Because CDEF optimization requires
// u16 working space. This avoids adding another generic buffer
// typing parameter and expanding code to handle all the possible
// input/output combinations. In the future we may decide to prefer
// that over the additional temp buffer (after doing the work needed
// to allow CDEF opt to work on 8 bit).
let src_subset = {
cdef_padded_tile_copy(
&ts.input_tile,
base_sbo,
(pixel_w + 7) >> 3,
(pixel_h + 7) >> 3,
0,
planes,
)
};
if deblock_p {
// Find a good deblocking filter solution for the passed in area.
// This is not RDO of deblocking itself, merely a solution to get
// better results from CDEF/LRF RDO.
let deblock_levels = deblock_filter_optimize(
fi,
&rec_subset.as_tile(),
&src_subset.as_tile(),
&tileblocks_subset.as_const(),
crop_w,
crop_h,
);
// Deblock the contents of our reconstruction copy.
if deblock_levels[0] != 0 || deblock_levels[1] != 0 {
// copy ts.deblock because we need to set some of our own values here
let mut deblock_copy = *ts.deblock;
deblock_copy.levels = deblock_levels;
// finally, deblock the temp frame
deblock_filter_frame(
&deblock_copy,
&mut rec_subset.as_tile_mut(),
&tileblocks_subset.as_const(),
crop_w,
crop_h,
fi.sequence.bit_depth,
planes,
);
}
}
let mut cdef_work = if fi.sequence.enable_cdef {
Some(cdef_padded_tile_copy(
&rec_subset.as_tile(),
TileSuperBlockOffset(SuperBlockOffset { x: 0, y: 0 }),
(pixel_w + 7) >> 3,
(pixel_h + 7) >> 3,
0,
planes,
))
} else {
None
};
let mut lrf_work = if fi.sequence.enable_restoration {
Some(cdef_block8_frame(
(pixel_w + 7) >> 3,
(pixel_h + 7) >> 3,
&ts.rec.as_const(),
))
} else {
None
};
// Precompute directional analysis for CDEF
let cdef_data = {
if cdef_work.is_some() {
Some((
&rec_subset,
cdef_analyze_superblock_range(
fi,
&rec_subset,
&tileblocks_subset.as_const(),
sb_w,
sb_h,
),
))
} else {
None
}
};
// CDEF/LRF decision iteration
// Start with a default of CDEF 0 and RestorationFilter::None
// Try all CDEF options for each sb with current LRF; if new CDEF+LRF choice is better, select it.
// Then try all LRF options with current CDEFs; if new CDEFs+LRF choice is better, select it.
// If LRF choice changed for any plane, repeat until no changes
// Limit iterations and where we break based on speed setting (in the TODO list ;-)
let mut cdef_change = true;
let mut lrf_change = true;
while cdef_change || lrf_change {
// search for improved cdef indices, superblock by superblock, if cdef is enabled.
if let (Some((rec_copy, cdef_dirs)), Some(cdef_ref)) =
(&cdef_data, &mut cdef_work.as_mut())
{
for sby in 0..sb_h {
for sbx in 0..sb_w {
let prev_best_index = best_index[sby * sb_w + sbx];
let mut best_cost = -1.;
let mut best_new_index = -1i8;
/* offset of the superblock we're currently testing within the larger analysis area */
let loop_sbo =
TileSuperBlockOffset(SuperBlockOffset { x: sbx, y: sby });
/* cdef index testing loop */
for cdef_index in 0..(1 << fi.cdef_bits) {
let mut err = ScaledDistortion::zero();
let mut rate = 0;
let mut cdef_ref_tm = TileMut::new(
cdef_ref,
TileRect {
x: 0,
y: 0,
width: cdef_ref.planes[0].cfg.width,
height: cdef_ref.planes[0].cfg.height,
},
);
cdef_filter_superblock(
fi,
&rec_subset,
&mut cdef_ref_tm,
&tileblocks_subset.as_const(),
loop_sbo,
cdef_index,
&cdef_dirs[sby * sb_w + sbx],
);
// apply LRF if any
for pli in 0..planes {
// We need the cropped-to-visible-frame area of this SB
let wh =
if fi.sequence.use_128x128_superblock { 128 } else { 64 };
let PlaneConfig { xdec, ydec, .. } = cdef_ref.planes[pli].cfg;
let vis_width = (wh >> xdec).min(
(crop_w >> xdec)
- loop_sbo.plane_offset(&cdef_ref.planes[pli].cfg).x
as usize,
);
let vis_height = (wh >> ydec).min(
(crop_h >> ydec)
- loop_sbo.plane_offset(&cdef_ref.planes[pli].cfg).y
as usize,
);
// which LRU are we currently testing against?
if let (Some((lru_x, lru_y)), Some(lrf_ref)) = {
let rp = &ts.restoration.planes[pli];
(
rp.restoration_unit_offset(base_sbo, loop_sbo, false),
&mut lrf_work,
)
} {
// We have a valid LRU, apply LRF, compute error
match best_lrf[lru_y * lru_w[pli] + lru_x][pli] {
RestorationFilter::None {} => {
err += rdo_loop_plane_error(
base_sbo,
loop_sbo,
1,
1,
fi,
ts,
&tileblocks_subset.as_const(),
cdef_ref,
&src_subset,
pli,
);
rate += if fi.sequence.enable_restoration {
cw.count_lrf_switchable(
w,
&ts.restoration.as_const(),
best_lrf[lru_y * lru_w[pli] + lru_x][pli],
pli,
)
} else {
0 // no relative cost differeneces to different
// CDEF params. If cdef is on, it's a wash.
};
}
RestorationFilter::Sgrproj { set, xqd } => {
// only run on this single superblock
let loop_po =
loop_sbo.plane_offset(&cdef_ref.planes[pli].cfg);
// todo: experiment with borrowing border pixels
// rather than edge-extending. Right now this is
// hard-clipping to the superblock boundary.
setup_integral_image(
&mut ts.integral_buffer,
SOLVE_IMAGE_STRIDE,
vis_width,
vis_height,
vis_width,
vis_height,
&cdef_ref.planes[pli].slice(loop_po),
&cdef_ref.planes[pli].slice(loop_po),
);
sgrproj_stripe_filter(
set,
xqd,
fi,
&ts.integral_buffer,
SOLVE_IMAGE_STRIDE,
&cdef_ref.planes[pli].slice(loop_po),
&mut lrf_ref.planes[pli].region_mut(Area::Rect {
x: loop_po.x,
y: loop_po.y,
width: vis_width,
height: vis_height,
}),
);
err += rdo_loop_plane_error(
base_sbo,
loop_sbo,
1,
1,
fi,
ts,
&tileblocks_subset.as_const(),
lrf_ref,
&src_subset,
pli,
);
rate += cw.count_lrf_switchable(
w,
&ts.restoration.as_const(),
best_lrf[lru_y * lru_w[pli] + lru_x][pli],
pli,
);
}
RestorationFilter::Wiener { .. } => unreachable!(), // coming soon
}
} else {
// No actual LRU here, compute error directly from CDEF output.
err += rdo_loop_plane_error(
base_sbo,
loop_sbo,
1,
1,
fi,
ts,
&tileblocks_subset.as_const(),
cdef_ref,
&src_subset,
pli,
);
// no relative cost differeneces to different
// CDEF params. If cdef is on, it's a wash.
// rate += 0;
}
}
let cost = compute_rd_cost(fi, rate, err);
if best_cost < 0. || cost < best_cost {
best_cost = cost;
best_new_index = cdef_index as i8;
}
}
// Did we change any preexisting choices?
if best_new_index != prev_best_index {
cdef_change = true;
best_index[sby * sb_w + sbx] = best_new_index;
tileblocks_subset.set_cdef(loop_sbo, best_new_index as u8);
}
let mut cdef_ref_tm = TileMut::new(
cdef_ref,
TileRect {
x: 0,
y: 0,
width: cdef_ref.planes[0].cfg.width,
height: cdef_ref.planes[0].cfg.height,
},
);
// Keep cdef output up to date; we need it for restoration
// both below and above (padding)
cdef_filter_superblock(
fi,
rec_copy,
&mut cdef_ref_tm,
&tileblocks_subset.as_const(),
loop_sbo,
best_index[sby * sb_w + sbx] as u8,
&cdef_dirs[sby * sb_w + sbx],
);
}
}
}
if !cdef_change {
break;
}
cdef_change = false;
lrf_change = false;
// search for improved restoration filter parameters if restoration is enabled
if let Some(lrf_ref) = &mut lrf_work.as_mut() {
let lrf_input = if cdef_work.is_some() {
// When CDEF is enabled, we pull from the CDEF output
&cdef_work.as_ref().unwrap()
} else {
// When CDEF is disabled, we pull from the [optionally
// deblocked] reconstruction
&rec_subset
};
for pli in 0..planes {
// Nominal size of LRU in pixels before clipping to visible frame
let unit_size = ts.restoration.planes[pli].rp_cfg.unit_size;
// width, in sb, of an LRU in this plane
let lru_sb_w = 1 << ts.restoration.planes[pli].rp_cfg.sb_h_shift;
// height, in sb, of an LRU in this plane
let lru_sb_h = 1 << ts.restoration.planes[pli].rp_cfg.sb_v_shift;
let PlaneConfig { xdec, ydec, .. } = lrf_ref.planes[pli].cfg;
for lru_y in 0..lru_h[pli] {
// number of LRUs vertically
for lru_x in 0..lru_w[pli] {
// number of LRUs horizontally
let loop_sbo = TileSuperBlockOffset(SuperBlockOffset {
x: lru_x * lru_sb_w,
y: lru_y * lru_sb_h,
});
if ts.restoration.has_restoration_unit(
base_sbo + loop_sbo,
pli,
false,
) {
let src_plane = &src_subset.planes[pli]; // uncompressed input for reference
let lrf_in_plane = &lrf_input.planes[pli];
let lrf_po = loop_sbo.plane_offset(&src_plane.cfg);
let mut best_new_lrf = best_lrf[lru_y * lru_w[pli] + lru_x][pli];
let mut best_cost =
best_lrf_cost[lru_y * lru_w[pli] + lru_x][pli];
// Check the no filter option
{
let err = rdo_loop_plane_error(
base_sbo,
loop_sbo,
lru_sb_w,
lru_sb_h,
fi,
ts,
&tileblocks_subset.as_const(),
lrf_input,
&src_subset,
pli,
);
let rate = cw.count_lrf_switchable(
w,
&ts.restoration.as_const(),
best_new_lrf,
pli,
);
let cost = compute_rd_cost(fi, rate, err);
// Was this choice actually an improvement?
if best_cost < 0. || cost < best_cost {
best_cost = cost;
best_lrf_cost[lru_y * lru_w[pli] + lru_x][pli] = cost;
best_new_lrf = RestorationFilter::None;
}
}
// Look for a self guided filter
// We need the cropped-to-visible-frame computation area of this LRU
let vis_width = unit_size.min(
(crop_w >> xdec)
- loop_sbo.plane_offset(&lrf_ref.planes[pli].cfg).x as usize,
);
let vis_height = unit_size.min(
(crop_h >> ydec)
- loop_sbo.plane_offset(&lrf_ref.planes[pli].cfg).y as usize,
);
// todo: experiment with borrowing border pixels
// rather than edge-extending. Right now this is
// hard-clipping to the superblock boundary.
setup_integral_image(
&mut ts.integral_buffer,
SOLVE_IMAGE_STRIDE,
vis_width,
vis_height,
vis_width,
vis_height,
&lrf_in_plane.slice(lrf_po),
&lrf_in_plane.slice(lrf_po),
);
for &set in get_sgr_sets(fi.config.speed_settings.sgr_complexity)
{
let (xqd0, xqd1) = sgrproj_solve(
set,
fi,
&ts.integral_buffer,
&src_plane.slice(lrf_po),
&lrf_in_plane.slice(lrf_po),
vis_width,
vis_height,
);
let current_lrf =
RestorationFilter::Sgrproj { set, xqd: [xqd0, xqd1] };
if let RestorationFilter::Sgrproj { set, xqd } = current_lrf {
sgrproj_stripe_filter(
set,
xqd,
fi,
&ts.integral_buffer,
SOLVE_IMAGE_STRIDE,
&lrf_in_plane.slice(lrf_po),
&mut lrf_ref.planes[pli].region_mut(Area::Rect {
x: lrf_po.x,
y: lrf_po.y,
width: vis_width,
height: vis_height,
}),
);
}
let err = rdo_loop_plane_error(
base_sbo,
loop_sbo,
lru_sb_w,
lru_sb_h,
fi,
ts,
&tileblocks_subset.as_const(),
lrf_ref,
&src_subset,
pli,
);
let rate = cw.count_lrf_switchable(
w,
&ts.restoration.as_const(),
current_lrf,
pli,
);
let cost = compute_rd_cost(fi, rate, err);
if cost < best_cost {
best_cost = cost;
best_lrf_cost[lru_y * lru_w[pli] + lru_x][pli] = cost;
best_new_lrf = current_lrf;
}
}
if best_lrf[lru_y * lru_w[pli] + lru_x][pli]
.notequal(best_new_lrf)
{
best_lrf[lru_y * lru_w[pli] + lru_x][pli] = best_new_lrf;
lrf_change = true;
if let Some(ru) = ts.restoration.planes[pli]
.restoration_unit_mut(base_sbo + loop_sbo)
{
ru.filter = best_new_lrf;
}
}
}
}
}
}
}
}
}
#[test]
fn estimate_rate_test() {
assert_eq!(estimate_rate(0, TxSize::TX_4X4, 0), RDO_RATE_TABLE[0][0][0]);
}
Avoid many temporary allocations for inter pruning
This collect::<Vec<_>>() yields nearly 90,000 allocations for
5 frames of 1080p at speed 3.
Replacing Vec with ArrayVec eliminates these allocations.
// Copyright (c) 2001-2016, Alliance for Open Media. All rights reserved
// Copyright (c) 2017-2019, The rav1e contributors. All rights reserved
//
// This source code is subject to the terms of the BSD 2 Clause License and
// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
// was not distributed with this source code in the LICENSE file, you can
// obtain it at www.aomedia.org/license/software. If the Alliance for Open
// Media Patent License 1.0 was not distributed with this source code in the
// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
#![allow(non_camel_case_types)]
use crate::api::*;
use crate::cdef::*;
use crate::context::*;
use crate::deblock::*;
use crate::dist::*;
use crate::ec::{Writer, WriterCounter, OD_BITRES};
use crate::encode_block_with_modes;
use crate::encoder::{FrameInvariants, IMPORTANCE_BLOCK_SIZE};
use crate::frame::Frame;
use crate::frame::*;
use crate::header::ReferenceMode;
use crate::lrf::*;
use crate::luma_ac;
use crate::mc::MotionVector;
use crate::me::*;
use crate::motion_compensate;
use crate::partition::RefType::*;
use crate::partition::*;
use crate::predict::{
AngleDelta, IntraEdgeFilterParameters, IntraParam, PredictionMode,
RAV1E_INTER_COMPOUND_MODES, RAV1E_INTER_MODES_MINIMAL, RAV1E_INTRA_MODES,
};
use crate::rdo_tables::*;
use crate::tiling::*;
use crate::transform::{TxSet, TxSize, TxType, RAV1E_TX_TYPES};
use crate::util::{Aligned, CastFromPrimitive, Pixel};
use crate::write_tx_blocks;
use crate::write_tx_tree;
use crate::Tune;
use crate::{encode_block_post_cdef, encode_block_pre_cdef};
use crate::partition::PartitionType::*;
use arrayvec::*;
use itertools::izip;
use std::fmt;
#[derive(Copy, Clone, PartialEq)]
pub enum RDOType {
PixelDistRealRate,
TxDistRealRate,
TxDistEstRate,
}
impl RDOType {
pub fn needs_tx_dist(self) -> bool {
match self {
// Pixel-domain distortion and exact ec rate
RDOType::PixelDistRealRate => false,
// Tx-domain distortion and exact ec rate
RDOType::TxDistRealRate => true,
// Tx-domain distortion and txdist-based rate
RDOType::TxDistEstRate => true,
}
}
pub fn needs_coeff_rate(self) -> bool {
match self {
RDOType::PixelDistRealRate => true,
RDOType::TxDistRealRate => true,
RDOType::TxDistEstRate => false,
}
}
}
#[derive(Clone)]
pub struct PartitionGroupParameters {
pub rd_cost: f64,
pub part_type: PartitionType,
pub part_modes: ArrayVec<[PartitionParameters; 4]>,
}
#[derive(Clone, Debug)]
pub struct PartitionParameters {
pub rd_cost: f64,
pub bo: TileBlockOffset,
pub bsize: BlockSize,
pub pred_mode_luma: PredictionMode,
pub pred_mode_chroma: PredictionMode,
pub pred_cfl_params: CFLParams,
pub angle_delta: AngleDelta,
pub ref_frames: [RefType; 2],
pub mvs: [MotionVector; 2],
pub skip: bool,
pub has_coeff: bool,
pub tx_size: TxSize,
pub tx_type: TxType,
pub sidx: u8,
}
impl Default for PartitionParameters {
fn default() -> Self {
PartitionParameters {
rd_cost: std::f64::MAX,
bo: TileBlockOffset::default(),
bsize: BlockSize::BLOCK_INVALID,
pred_mode_luma: PredictionMode::default(),
pred_mode_chroma: PredictionMode::default(),
pred_cfl_params: CFLParams::default(),
angle_delta: AngleDelta::default(),
ref_frames: [RefType::INTRA_FRAME, RefType::NONE_FRAME],
mvs: [MotionVector::default(); 2],
skip: false,
has_coeff: true,
tx_size: TxSize::TX_4X4,
tx_type: TxType::DCT_DCT,
sidx: 0,
}
}
}
pub fn estimate_rate(qindex: u8, ts: TxSize, fast_distortion: u64) -> u64 {
let bs_index = ts as usize;
let q_bin_idx = (qindex as usize) / RDO_QUANT_DIV;
let bin_idx_down =
((fast_distortion) / RATE_EST_BIN_SIZE).min((RDO_NUM_BINS - 2) as u64);
let bin_idx_up = (bin_idx_down + 1).min((RDO_NUM_BINS - 1) as u64);
let x0 = (bin_idx_down * RATE_EST_BIN_SIZE) as i64;
let x1 = (bin_idx_up * RATE_EST_BIN_SIZE) as i64;
let y0 = RDO_RATE_TABLE[q_bin_idx][bs_index][bin_idx_down as usize] as i64;
let y1 = RDO_RATE_TABLE[q_bin_idx][bs_index][bin_idx_up as usize] as i64;
let slope = ((y1 - y0) << 8) / (x1 - x0);
(y0 + (((fast_distortion as i64 - x0) * slope) >> 8)).max(0) as u64
}
// The microbenchmarks perform better with inlining turned off
#[inline(never)]
fn cdef_dist_wxh_8x8<T: Pixel>(
src1: &PlaneRegion<'_, T>, src2: &PlaneRegion<'_, T>, bit_depth: usize,
) -> RawDistortion {
debug_assert!(src1.plane_cfg.xdec == 0);
debug_assert!(src1.plane_cfg.ydec == 0);
debug_assert!(src2.plane_cfg.xdec == 0);
debug_assert!(src2.plane_cfg.ydec == 0);
let coeff_shift = bit_depth - 8;
// Sum into columns to improve auto-vectorization
let mut sum_s_cols: [u16; 8] = [0; 8];
let mut sum_d_cols: [u16; 8] = [0; 8];
let mut sum_s2_cols: [u32; 8] = [0; 8];
let mut sum_d2_cols: [u32; 8] = [0; 8];
let mut sum_sd_cols: [u32; 8] = [0; 8];
for j in 0..8 {
let row1 = &src1[j][0..8];
let row2 = &src2[j][0..8];
for (sum_s, sum_d, sum_s2, sum_d2, sum_sd, s, d) in izip!(
&mut sum_s_cols,
&mut sum_d_cols,
&mut sum_s2_cols,
&mut sum_d2_cols,
&mut sum_sd_cols,
row1,
row2
) {
// Don't convert directly to u32 to allow better vectorization
let s: u16 = u16::cast_from(*s);
let d: u16 = u16::cast_from(*d);
*sum_s += s;
*sum_d += d;
// Convert to u32 to avoid overflows when multiplying
let s: u32 = s as u32;
let d: u32 = d as u32;
*sum_s2 += s * s;
*sum_d2 += d * d;
*sum_sd += s * d;
}
}
// Sum together the sum of columns
let sum_s: i64 =
sum_s_cols.iter().map(|&a| u32::cast_from(a)).sum::<u32>() as i64;
let sum_d: i64 =
sum_d_cols.iter().map(|&a| u32::cast_from(a)).sum::<u32>() as i64;
let sum_s2: i64 = sum_s2_cols.iter().sum::<u32>() as i64;
let sum_d2: i64 = sum_d2_cols.iter().sum::<u32>() as i64;
let sum_sd: i64 = sum_sd_cols.iter().sum::<u32>() as i64;
// Use sums to calculate distortion
let svar = sum_s2 - ((sum_s * sum_s + 32) >> 6);
let dvar = sum_d2 - ((sum_d * sum_d + 32) >> 6);
let sse = (sum_d2 + sum_s2 - 2 * sum_sd) as f64;
//The two constants were tuned for CDEF, but can probably be better tuned for use in general RDO
let ssim_boost = (4033_f64 / 16_384_f64)
* (svar + dvar + (16_384 << (2 * coeff_shift))) as f64
/ f64::sqrt(((16_265_089i64 << (4 * coeff_shift)) + svar * dvar) as f64);
RawDistortion::new((sse * ssim_boost + 0.5_f64) as u64)
}
#[allow(unused)]
pub fn cdef_dist_wxh<T: Pixel, F: Fn(Area, BlockSize) -> DistortionScale>(
src1: &PlaneRegion<'_, T>, src2: &PlaneRegion<'_, T>, w: usize, h: usize,
bit_depth: usize, compute_bias: F,
) -> Distortion {
assert!(w & 0x7 == 0);
assert!(h & 0x7 == 0);
debug_assert!(src1.plane_cfg.xdec == 0);
debug_assert!(src1.plane_cfg.ydec == 0);
debug_assert!(src2.plane_cfg.xdec == 0);
debug_assert!(src2.plane_cfg.ydec == 0);
let mut sum = Distortion::zero();
for j in 0isize..h as isize / 8 {
for i in 0isize..w as isize / 8 {
let area = Area::StartingAt { x: i * 8, y: j * 8 };
let value = cdef_dist_wxh_8x8(
&src1.subregion(area),
&src2.subregion(area),
bit_depth,
);
// cdef is always called on non-subsampled planes, so BLOCK_8X8 is
// correct here.
sum += value * compute_bias(area, BlockSize::BLOCK_8X8);
}
}
sum
}
// Sum of Squared Error for a wxh block
pub fn sse_wxh<T: Pixel, F: Fn(Area, BlockSize) -> DistortionScale>(
src1: &PlaneRegion<'_, T>, src2: &PlaneRegion<'_, T>, w: usize, h: usize,
compute_bias: F,
) -> Distortion {
assert!(w & (MI_SIZE - 1) == 0);
assert!(h & (MI_SIZE - 1) == 0);
// To bias the distortion correctly, compute it in blocks up to the size
// importance block size in a non-subsampled plane.
let imp_block_w = IMPORTANCE_BLOCK_SIZE.min(w);
let imp_block_h = IMPORTANCE_BLOCK_SIZE.min(h);
let imp_bsize = BlockSize::from_width_and_height(imp_block_w, imp_block_h);
let block_w = imp_block_w >> src1.plane_cfg.xdec;
let block_h = imp_block_h >> src1.plane_cfg.ydec;
let mut sse = Distortion::zero();
for block_y in 0..h / block_h {
for block_x in 0..w / block_w {
let mut value = 0;
for j in 0..block_h {
let s1 = &src1[block_y * block_h + j]
[block_x * block_w..(block_x + 1) * block_w];
let s2 = &src2[block_y * block_h + j]
[block_x * block_w..(block_x + 1) * block_w];
let row_sse = s1
.iter()
.zip(s2)
.map(|(&a, &b)| {
let c = (i16::cast_from(a) - i16::cast_from(b)) as i32;
(c * c) as u32
})
.sum::<u32>();
value += row_sse as u64;
}
let bias = compute_bias(
// StartingAt gives the correct block offset.
Area::StartingAt {
x: (block_x * block_w) as isize,
y: (block_y * block_h) as isize,
},
imp_bsize,
);
sse += RawDistortion::new(value) * bias;
}
}
sse
}
// Compute the pixel-domain distortion for an encode
fn compute_distortion<T: Pixel>(
fi: &FrameInvariants<T>, ts: &TileStateMut<'_, T>, bsize: BlockSize,
is_chroma_block: bool, tile_bo: TileBlockOffset, luma_only: bool,
) -> ScaledDistortion {
let area = Area::BlockStartingAt { bo: tile_bo.0 };
let input_region = ts.input_tile.planes[0].subregion(area);
let rec_region = ts.rec.planes[0].subregion(area);
let mut distortion = match fi.config.tune {
Tune::Psychovisual if bsize.width() >= 8 && bsize.height() >= 8 => {
cdef_dist_wxh(
&input_region,
&rec_region,
bsize.width(),
bsize.height(),
fi.sequence.bit_depth,
|bias_area, bsize| {
distortion_scale(
fi,
input_region.subregion(bias_area).frame_block_offset(),
bsize,
)
},
)
}
Tune::Psnr | Tune::Psychovisual => sse_wxh(
&input_region,
&rec_region,
bsize.width(),
bsize.height(),
|bias_area, bsize| {
distortion_scale(
fi,
input_region.subregion(bias_area).frame_block_offset(),
bsize,
)
},
),
} * fi.dist_scale[0];
if !luma_only {
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let mask = !(MI_SIZE - 1);
let mut w_uv = (bsize.width() >> xdec) & mask;
let mut h_uv = (bsize.height() >> ydec) & mask;
if (w_uv == 0 || h_uv == 0) && is_chroma_block {
w_uv = MI_SIZE;
h_uv = MI_SIZE;
}
// Add chroma distortion only when it is available
if fi.config.chroma_sampling != ChromaSampling::Cs400
&& w_uv > 0
&& h_uv > 0
{
for p in 1..3 {
let input_region = ts.input_tile.planes[p].subregion(area);
let rec_region = ts.rec.planes[p].subregion(area);
distortion += sse_wxh(
&input_region,
&rec_region,
w_uv,
h_uv,
|bias_area, bsize| {
distortion_scale(
fi,
input_region.subregion(bias_area).frame_block_offset(),
bsize,
)
},
) * fi.dist_scale[p];
}
};
}
distortion
}
// Compute the transform-domain distortion for an encode
fn compute_tx_distortion<T: Pixel>(
fi: &FrameInvariants<T>, ts: &TileStateMut<'_, T>, bsize: BlockSize,
is_chroma_block: bool, tile_bo: TileBlockOffset, tx_dist: ScaledDistortion,
skip: bool, luma_only: bool,
) -> ScaledDistortion {
assert!(fi.config.tune == Tune::Psnr);
let area = Area::BlockStartingAt { bo: tile_bo.0 };
let input_region = ts.input_tile.planes[0].subregion(area);
let rec_region = ts.rec.planes[0].subregion(area);
let mut distortion = if skip {
sse_wxh(
&input_region,
&rec_region,
bsize.width(),
bsize.height(),
|bias_area, bsize| {
distortion_scale(
fi,
input_region.subregion(bias_area).frame_block_offset(),
bsize,
)
},
) * fi.dist_scale[0]
} else {
tx_dist
};
if !luma_only && skip {
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let mask = !(MI_SIZE - 1);
let mut w_uv = (bsize.width() >> xdec) & mask;
let mut h_uv = (bsize.height() >> ydec) & mask;
if (w_uv == 0 || h_uv == 0) && is_chroma_block {
w_uv = MI_SIZE;
h_uv = MI_SIZE;
}
// Add chroma distortion only when it is available
if fi.config.chroma_sampling != ChromaSampling::Cs400
&& w_uv > 0
&& h_uv > 0
{
for p in 1..3 {
let input_region = ts.input_tile.planes[p].subregion(area);
let rec_region = ts.rec.planes[p].subregion(area);
distortion += sse_wxh(
&input_region,
&rec_region,
w_uv,
h_uv,
|bias_area, bsize| {
distortion_scale(
fi,
input_region.subregion(bias_area).frame_block_offset(),
bsize,
)
},
) * fi.dist_scale[p];
}
}
}
distortion
}
/// Compute a scaling factor to multiply the distortion of a block by,
/// this factor is determined using temporal RDO.
pub fn distortion_scale<T: Pixel>(
fi: &FrameInvariants<T>, frame_bo: PlaneBlockOffset, bsize: BlockSize,
) -> DistortionScale {
if !fi.config.temporal_rdo() {
return DistortionScale::default();
}
// EncoderConfig::temporal_rdo() should always return false in situations
// where distortion is computed on > 8x8 blocks, so we should never hit this
// assert.
assert!(bsize <= BlockSize::BLOCK_8X8);
let x = frame_bo.0.x >> IMPORTANCE_BLOCK_TO_BLOCK_SHIFT;
let y = frame_bo.0.y >> IMPORTANCE_BLOCK_TO_BLOCK_SHIFT;
fi.distortion_scales[y * fi.w_in_imp_b + x]
}
pub fn distortion_scale_for(
propagate_cost: f64, intra_cost: f64,
) -> DistortionScale {
// The mbtree paper \cite{mbtree} uses the following formula:
//
// QP_delta = -strength * log2(1 + (propagate_cost / intra_cost))
//
// Since this is H.264, this corresponds to the following quantizer:
//
// Q' = Q * 2^(QP_delta/6)
//
// Since lambda is proportial to Q^2, this means we want to minimize:
//
// D + lambda' * R
// = D + 2^(QP_delta / 3) * lambda * R
//
// If we want to keep lambda fixed, we can instead scale distortion and
// minimize:
//
// D * scale + lambda * R
//
// where:
//
// scale = 2^(QP_delta / -3)
// = (1 + (propagate_cost / intra_cost))^(strength / 3)
//
// The original paper empirically chooses strength = 2.0, but strength = 1.0
// seems to work best in rav1e currently, this may have something to do with
// the fact that they use 16x16 blocks whereas our "importance blocks" are
// 8x8, but everything should be scale invariant here so that's weird.
//
// @article{mbtree,
// title={A novel macroblock-tree algorithm for high-performance
// optimization of dependent video coding in H.264/AVC},
// author={Garrett-Glaser, Jason},
// journal={Tech. Rep.},
// year={2009},
// url={https://pdfs.semanticscholar.org/032f/1ab7d9db385780a02eb2d579af8303b266d2.pdf}
// }
if intra_cost == 0. {
return DistortionScale::default(); // no scaling
}
let strength = 1.0; // empirical, see comment above
let frac = (intra_cost + propagate_cost) / intra_cost;
DistortionScale::new(frac.powf(strength / 3.0))
}
/// Fixed point arithmetic version of distortion scale
#[repr(transparent)]
#[derive(Copy, Clone)]
pub struct DistortionScale(u32);
#[repr(transparent)]
pub struct RawDistortion(u64);
#[repr(transparent)]
pub struct Distortion(u64);
#[repr(transparent)]
pub struct ScaledDistortion(u64);
impl DistortionScale {
/// Bits past the radix point
const SHIFT: u32 = 12;
/// Number of bits used. Determines the max value.
/// 24 bits is likely excessive.
const BITS: u32 = 24;
pub fn new(scale: f64) -> Self {
Self(
(scale * (1 << Self::SHIFT) as f64 + 0.5)
.min(((1 << Self::BITS as u64) - 1) as f64) as u32,
)
}
/// Multiply, round and shift
/// Internal implementation, so don't use multiply trait.
fn mul_u64(self, dist: u64) -> u64 {
(self.0 as u64 * dist + (1 << Self::SHIFT >> 1)) >> Self::SHIFT
}
}
// Default value for DistortionScale is a fixed point 1
impl Default for DistortionScale {
fn default() -> Self {
Self(1 << Self::SHIFT)
}
}
impl fmt::Debug for DistortionScale {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", f64::from(*self))
}
}
impl From<DistortionScale> for f64 {
fn from(scale: DistortionScale) -> Self {
scale.0 as f64 / (1 << DistortionScale::SHIFT) as f64
}
}
impl RawDistortion {
pub fn new(dist: u64) -> Self {
Self(dist)
}
}
impl std::ops::Mul<DistortionScale> for RawDistortion {
type Output = Distortion;
fn mul(self, rhs: DistortionScale) -> Distortion {
Distortion(rhs.mul_u64(self.0))
}
}
impl Distortion {
pub const fn zero() -> Self {
Self(0)
}
}
impl std::ops::Mul<f64> for Distortion {
type Output = ScaledDistortion;
fn mul(self, rhs: f64) -> ScaledDistortion {
ScaledDistortion((self.0 as f64 * rhs) as u64)
}
}
impl std::ops::AddAssign for Distortion {
fn add_assign(&mut self, other: Self) {
self.0 += other.0;
}
}
impl ScaledDistortion {
pub const fn zero() -> Self {
Self(0)
}
}
impl std::ops::AddAssign for ScaledDistortion {
fn add_assign(&mut self, other: Self) {
self.0 += other.0;
}
}
pub fn compute_rd_cost<T: Pixel>(
fi: &FrameInvariants<T>, rate: u32, distortion: ScaledDistortion,
) -> f64 {
let rate_in_bits = (rate as f64) / ((1 << OD_BITRES) as f64);
distortion.0 as f64 + fi.lambda * rate_in_bits
}
pub fn rdo_tx_size_type<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, bsize: BlockSize, tile_bo: TileBlockOffset,
luma_mode: PredictionMode, ref_frames: [RefType; 2], mvs: [MotionVector; 2],
skip: bool,
) -> (TxSize, TxType) {
let is_inter = !luma_mode.is_intra();
let mut tx_size = max_txsize_rect_lookup[bsize as usize];
if fi.enable_inter_txfm_split && is_inter && !skip {
tx_size = sub_tx_size_map[tx_size as usize]; // Always choose one level split size
}
let mut best_tx_type = TxType::DCT_DCT;
let mut best_tx_size = tx_size;
let mut best_rd = std::f64::MAX;
let do_rdo_tx_size =
fi.tx_mode_select && fi.config.speed_settings.rdo_tx_decision && !is_inter;
let rdo_tx_depth = if do_rdo_tx_size { 2 } else { 0 };
let mut cw_checkpoint = None;
for _ in 0..=rdo_tx_depth {
let tx_set = get_tx_set(tx_size, is_inter, fi.use_reduced_tx_set);
let do_rdo_tx_type = tx_set > TxSet::TX_SET_DCTONLY
&& fi.config.speed_settings.rdo_tx_decision
&& !is_inter
&& !skip;
if !do_rdo_tx_size && !do_rdo_tx_type {
return (best_tx_size, best_tx_type);
};
if cw_checkpoint.is_none() {
// Only runs on the first iteration of the loop.
// Avoids creating the checkpoint if we early exit above.
cw_checkpoint = Some(cw.checkpoint());
}
let tx_types =
if do_rdo_tx_type { RAV1E_TX_TYPES } else { &[TxType::DCT_DCT] };
// Luma plane transform type decision
let (tx_type, rd_cost) = rdo_tx_type_decision(
fi, ts, cw, luma_mode, ref_frames, mvs, bsize, tile_bo, tx_size, tx_set,
tx_types,
);
if rd_cost < best_rd {
best_tx_size = tx_size;
best_tx_type = tx_type;
best_rd = rd_cost;
}
debug_assert!(tx_size.width_log2() <= bsize.width_log2());
debug_assert!(tx_size.height_log2() <= bsize.height_log2());
debug_assert!(
tx_size.sqr() <= TxSize::TX_32X32 || tx_type == TxType::DCT_DCT
);
let next_tx_size = sub_tx_size_map[tx_size as usize];
cw.rollback(cw_checkpoint.as_ref().unwrap());
if next_tx_size == tx_size {
break;
} else {
tx_size = next_tx_size;
};
}
(best_tx_size, best_tx_type)
}
#[inline]
fn luma_chroma_mode_rdo<T: Pixel>(
luma_mode: PredictionMode, fi: &FrameInvariants<T>, bsize: BlockSize,
tile_bo: TileBlockOffset, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, rdo_type: RDOType,
cw_checkpoint: &ContextWriterCheckpoint, best: &mut PartitionParameters,
mvs: [MotionVector; 2], ref_frames: [RefType; 2],
mode_set_chroma: &[PredictionMode], luma_mode_is_intra: bool,
mode_context: usize, mv_stack: &ArrayVec<[CandidateMV; 9]>,
angle_delta: AngleDelta,
) {
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let is_chroma_block =
has_chroma(tile_bo, bsize, xdec, ydec, fi.sequence.chroma_sampling);
// Find the best chroma prediction mode for the current luma prediction mode
let mut chroma_rdo = |skip: bool| -> bool {
let mut zero_distortion = false;
// If skip is true or segmentation is turned off, sidx is not coded.
let sidx_range = if skip || !fi.enable_segmentation {
0..=0
} else if fi.base_q_idx as i16
+ ts.segmentation.data[2][SegLvl::SEG_LVL_ALT_Q as usize]
< 1
{
0..=1
} else {
0..=2
};
for sidx in sidx_range {
cw.bc.blocks.set_segmentation_idx(tile_bo, bsize, sidx);
let (tx_size, tx_type) = rdo_tx_size_type(
fi, ts, cw, bsize, tile_bo, luma_mode, ref_frames, mvs, skip,
);
for &chroma_mode in mode_set_chroma.iter() {
let wr = &mut WriterCounter::new();
let tell = wr.tell_frac();
if bsize >= BlockSize::BLOCK_8X8 && bsize.is_sqr() {
cw.write_partition(
wr,
tile_bo,
PartitionType::PARTITION_NONE,
bsize,
);
}
// TODO(yushin): luma and chroma would have different decision based on chroma format
let need_recon_pixel =
luma_mode_is_intra && tx_size.block_size() != bsize;
encode_block_pre_cdef(&fi.sequence, ts, cw, wr, bsize, tile_bo, skip);
let (has_coeff, tx_dist) = encode_block_post_cdef(
fi,
ts,
cw,
wr,
luma_mode,
chroma_mode,
angle_delta,
ref_frames,
mvs,
bsize,
tile_bo,
skip,
CFLParams::default(),
tx_size,
tx_type,
mode_context,
mv_stack,
rdo_type,
need_recon_pixel,
false,
);
let rate = wr.tell_frac() - tell;
let distortion = if fi.use_tx_domain_distortion && !need_recon_pixel {
compute_tx_distortion(
fi,
ts,
bsize,
is_chroma_block,
tile_bo,
tx_dist,
skip,
false,
)
} else {
compute_distortion(fi, ts, bsize, is_chroma_block, tile_bo, false)
};
let is_zero_dist = distortion.0 == 0;
let rd = compute_rd_cost(fi, rate, distortion);
if rd < best.rd_cost {
//if rd < best.rd_cost || luma_mode == PredictionMode::NEW_NEWMV {
best.rd_cost = rd;
best.pred_mode_luma = luma_mode;
best.pred_mode_chroma = chroma_mode;
best.angle_delta = angle_delta;
best.ref_frames = ref_frames;
best.mvs = mvs;
best.skip = skip;
best.has_coeff = has_coeff;
best.tx_size = tx_size;
best.tx_type = tx_type;
best.sidx = sidx;
zero_distortion = is_zero_dist;
}
cw.rollback(cw_checkpoint);
}
}
zero_distortion
};
// Don't skip when using intra modes
let zero_distortion =
if !luma_mode_is_intra { chroma_rdo(true) } else { false };
// early skip
if !zero_distortion {
chroma_rdo(false);
}
}
// RDO-based mode decision
pub fn rdo_mode_decision<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, bsize: BlockSize, tile_bo: TileBlockOffset,
pmv_idxs: (usize, usize), inter_cfg: &InterConfig,
) -> PartitionParameters {
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let cw_checkpoint = cw.checkpoint();
let rdo_type = if fi.use_tx_domain_rate {
RDOType::TxDistEstRate
} else if fi.use_tx_domain_distortion {
RDOType::TxDistRealRate
} else {
RDOType::PixelDistRealRate
};
let mut best = if fi.frame_type.has_inter() {
inter_frame_rdo_mode_decision(
fi,
ts,
cw,
bsize,
tile_bo,
pmv_idxs,
inter_cfg,
&cw_checkpoint,
rdo_type,
)
} else {
PartitionParameters::default()
};
let is_chroma_block =
has_chroma(tile_bo, bsize, xdec, ydec, fi.sequence.chroma_sampling);
if !best.skip {
best = intra_frame_rdo_mode_decision(
fi,
ts,
cw,
bsize,
tile_bo,
&cw_checkpoint,
rdo_type,
best,
is_chroma_block,
);
}
if best.pred_mode_luma.is_intra() && is_chroma_block && bsize.cfl_allowed() {
cw.bc.blocks.set_segmentation_idx(tile_bo, bsize, best.sidx);
let chroma_mode = PredictionMode::UV_CFL_PRED;
let cw_checkpoint = cw.checkpoint();
let wr: &mut dyn Writer = &mut WriterCounter::new();
let angle_delta = AngleDelta { y: best.angle_delta.y, uv: 0 };
write_tx_blocks(
fi,
ts,
cw,
wr,
best.pred_mode_luma,
best.pred_mode_luma,
angle_delta,
tile_bo,
bsize,
best.tx_size,
best.tx_type,
false,
CFLParams::default(),
true,
rdo_type,
true,
);
cw.rollback(&cw_checkpoint);
if fi.sequence.chroma_sampling != ChromaSampling::Cs400 {
if let Some(cfl) = rdo_cfl_alpha(ts, tile_bo, bsize, fi) {
let wr: &mut dyn Writer = &mut WriterCounter::new();
let tell = wr.tell_frac();
encode_block_pre_cdef(
&fi.sequence,
ts,
cw,
wr,
bsize,
tile_bo,
best.skip,
);
let (has_coeff, _) = encode_block_post_cdef(
fi,
ts,
cw,
wr,
best.pred_mode_luma,
chroma_mode,
angle_delta,
best.ref_frames,
best.mvs,
bsize,
tile_bo,
best.skip,
cfl,
best.tx_size,
best.tx_type,
0,
&[],
rdo_type,
true, // For CFL, luma should be always reconstructed.
false,
);
let rate = wr.tell_frac() - tell;
// For CFL, tx-domain distortion is not an option.
let distortion =
compute_distortion(fi, ts, bsize, is_chroma_block, tile_bo, false);
let rd = compute_rd_cost(fi, rate, distortion);
if rd < best.rd_cost {
best.rd_cost = rd;
best.pred_mode_chroma = chroma_mode;
best.angle_delta = angle_delta;
best.has_coeff = has_coeff;
best.pred_cfl_params = cfl;
}
cw.rollback(&cw_checkpoint);
}
}
}
cw.bc.blocks.set_mode(tile_bo, bsize, best.pred_mode_luma);
cw.bc.blocks.set_ref_frames(tile_bo, bsize, best.ref_frames);
cw.bc.blocks.set_motion_vectors(tile_bo, bsize, best.mvs);
assert!(best.rd_cost >= 0_f64);
PartitionParameters {
bo: tile_bo,
bsize,
pred_mode_luma: best.pred_mode_luma,
pred_mode_chroma: best.pred_mode_chroma,
pred_cfl_params: best.pred_cfl_params,
angle_delta: best.angle_delta,
ref_frames: best.ref_frames,
mvs: best.mvs,
rd_cost: best.rd_cost,
skip: best.skip,
has_coeff: best.has_coeff,
tx_size: best.tx_size,
tx_type: best.tx_type,
sidx: best.sidx,
}
}
fn inter_frame_rdo_mode_decision<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, bsize: BlockSize, tile_bo: TileBlockOffset,
pmv_idxs: (usize, usize), inter_cfg: &InterConfig,
cw_checkpoint: &ContextWriterCheckpoint, rdo_type: RDOType,
) -> PartitionParameters {
let mut best = PartitionParameters::default();
// we can never have more than 7 reference frame sets
let mut ref_frames_set = ArrayVec::<[_; 7]>::new();
// again, max of 7 ref slots
let mut ref_slot_set = ArrayVec::<[_; 7]>::new();
// our implementation never returns more than 3 at the moment
let mut mvs_from_me = ArrayVec::<[_; 3]>::new();
let mut fwdref = None;
let mut bwdref = None;
for i in inter_cfg.allowed_ref_frames().iter().copied() {
// Don't search LAST3 since it's used only for probs
if i == LAST3_FRAME {
continue;
}
if !ref_slot_set.contains(&fi.ref_frames[i.to_index()]) {
if fwdref == None && i.is_fwd_ref() {
fwdref = Some(ref_frames_set.len());
}
if bwdref == None && i.is_bwd_ref() {
bwdref = Some(ref_frames_set.len());
}
ref_frames_set.push([i, NONE_FRAME]);
let slot_idx = fi.ref_frames[i.to_index()];
ref_slot_set.push(slot_idx);
}
}
assert!(!ref_frames_set.is_empty());
let mut inter_mode_set = ArrayVec::<[(PredictionMode, usize); 20]>::new();
let mut mvs_set = ArrayVec::<[[MotionVector; 2]; 20]>::new();
let mut satds = ArrayVec::<[u32; 20]>::new();
let mut mv_stacks = ArrayVec::<[_; 20]>::new();
let mut mode_contexts = ArrayVec::<[_; 7]>::new();
let pmvs = ts.half_res_pmvs[pmv_idxs.0][pmv_idxs.1];
let motion_estimation = if fi.config.speed_settings.diamond_me {
crate::me::DiamondSearch::motion_estimation
} else {
crate::me::FullSearch::motion_estimation
};
for (i, &ref_frames) in ref_frames_set.iter().enumerate() {
let mut mv_stack = ArrayVec::<[CandidateMV; 9]>::new();
mode_contexts.push(cw.find_mvrefs(
tile_bo,
ref_frames,
&mut mv_stack,
bsize,
fi,
false,
));
let mut pmv = [MotionVector::default(); 2];
if !mv_stack.is_empty() {
pmv[0] = mv_stack[0].this_mv;
}
if mv_stack.len() > 1 {
pmv[1] = mv_stack[1].this_mv;
}
let ref_slot = ref_slot_set[i] as usize;
let cmv = pmvs[ref_slot].unwrap_or_else(Default::default);
let b_me =
motion_estimation(fi, ts, bsize, tile_bo, ref_frames[0], cmv, pmv);
if !fi.config.speed_settings.encode_bottomup
&& (bsize == BlockSize::BLOCK_32X32 || bsize == BlockSize::BLOCK_64X64)
{
ts.half_res_pmvs[pmv_idxs.0][pmv_idxs.1][ref_slot] = Some(b_me);
};
mvs_from_me.push([b_me, MotionVector::default()]);
for &x in RAV1E_INTER_MODES_MINIMAL {
inter_mode_set.push((x, i));
}
if !mv_stack.is_empty() {
inter_mode_set.push((PredictionMode::NEAR0MV, i));
}
if mv_stack.len() >= 2 {
inter_mode_set.push((PredictionMode::GLOBALMV, i));
}
let include_near_mvs = fi.config.speed_settings.include_near_mvs;
if include_near_mvs {
if mv_stack.len() >= 3 {
inter_mode_set.push((PredictionMode::NEAR1MV, i));
}
if mv_stack.len() >= 4 {
inter_mode_set.push((PredictionMode::NEAR2MV, i));
}
}
let same_row_col = |x: &CandidateMV| {
x.this_mv.row == mvs_from_me[i][0].row
&& x.this_mv.col == mvs_from_me[i][0].col
};
if !mv_stack
.iter()
.take(if include_near_mvs { 4 } else { 2 })
.any(same_row_col)
&& (mvs_from_me[i][0].row != 0 || mvs_from_me[i][0].col != 0)
{
inter_mode_set.push((PredictionMode::NEWMV, i));
}
mv_stacks.push(mv_stack);
}
let sz = bsize.width_mi().min(bsize.height_mi());
// To use non single reference modes, block width and height must be greater than 4.
if fi.reference_mode != ReferenceMode::SINGLE && sz >= 2 {
// Adding compound candidate
if let Some(r0) = fwdref {
if let Some(r1) = bwdref {
let ref_frames = [ref_frames_set[r0][0], ref_frames_set[r1][0]];
ref_frames_set.push(ref_frames);
let mv0 = mvs_from_me[r0][0];
let mv1 = mvs_from_me[r1][0];
mvs_from_me.push([mv0, mv1]);
let mut mv_stack = ArrayVec::<[CandidateMV; 9]>::new();
mode_contexts.push(cw.find_mvrefs(
tile_bo,
ref_frames,
&mut mv_stack,
bsize,
fi,
true,
));
for &x in RAV1E_INTER_COMPOUND_MODES {
inter_mode_set.push((x, ref_frames_set.len() - 1));
}
mv_stacks.push(mv_stack);
}
}
}
let num_modes_rdo = if fi.config.speed_settings.prediction_modes
>= PredictionModesSetting::ComplexAll
{
inter_mode_set.len()
} else {
9 // This number is determined by AWCY test
};
inter_mode_set.iter().for_each(|&(luma_mode, i)| {
let mvs = match luma_mode {
PredictionMode::NEWMV | PredictionMode::NEW_NEWMV => mvs_from_me[i],
PredictionMode::NEARESTMV | PredictionMode::NEAREST_NEARESTMV => {
if !mv_stacks[i].is_empty() {
[mv_stacks[i][0].this_mv, mv_stacks[i][0].comp_mv]
} else {
[MotionVector::default(); 2]
}
}
PredictionMode::NEAR0MV | PredictionMode::NEAR_NEARMV => {
if mv_stacks[i].len() > 1 {
[mv_stacks[i][1].this_mv, mv_stacks[i][1].comp_mv]
} else {
[MotionVector::default(); 2]
}
}
PredictionMode::NEAR1MV | PredictionMode::NEAR2MV => [
mv_stacks[i]
[luma_mode as usize - PredictionMode::NEAR0MV as usize + 1]
.this_mv,
mv_stacks[i]
[luma_mode as usize - PredictionMode::NEAR0MV as usize + 1]
.comp_mv,
],
PredictionMode::NEAREST_NEWMV => {
[mv_stacks[i][0].this_mv, mvs_from_me[i][1]]
}
PredictionMode::NEW_NEARESTMV => {
[mvs_from_me[i][0], mv_stacks[i][0].comp_mv]
}
PredictionMode::GLOBALMV | PredictionMode::GLOBAL_GLOBALMV => {
[MotionVector::default(); 2]
}
_ => {
unimplemented!();
}
};
mvs_set.push(mvs);
// Calculate SATD for each mode
if num_modes_rdo != inter_mode_set.len() {
let tile_rect = ts.tile_rect();
let rec = &mut ts.rec.planes[0];
let po = tile_bo.plane_offset(rec.plane_cfg);
let mut rec_region =
rec.subregion_mut(Area::BlockStartingAt { bo: tile_bo.0 });
luma_mode.predict_inter(
fi,
tile_rect,
0,
po,
&mut rec_region,
bsize.width(),
bsize.height(),
ref_frames_set[i],
mvs,
);
let plane_org = ts.input_tile.planes[0]
.subregion(Area::BlockStartingAt { bo: tile_bo.0 });
let plane_ref = rec_region.as_const();
let satd = get_satd(
&plane_org,
&plane_ref,
bsize,
fi.sequence.bit_depth,
fi.cpu_feature_level,
);
satds.push(satd);
} else {
satds.push(0);
}
});
let mut sorted =
izip!(inter_mode_set, mvs_set, satds).collect::<ArrayVec<[_; 20]>>();
if num_modes_rdo != sorted.len() {
sorted.sort_by_key(|((_mode, _i), _mvs, satd)| *satd);
}
sorted.iter().take(num_modes_rdo).for_each(
|&((luma_mode, i), mvs, _satd)| {
let mode_set_chroma = ArrayVec::from([luma_mode]);
luma_chroma_mode_rdo(
luma_mode,
fi,
bsize,
tile_bo,
ts,
cw,
rdo_type,
cw_checkpoint,
&mut best,
mvs,
ref_frames_set[i],
&mode_set_chroma,
false,
mode_contexts[i],
&mv_stacks[i],
AngleDelta::default(),
);
},
);
best
}
fn intra_frame_rdo_mode_decision<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, bsize: BlockSize, tile_bo: TileBlockOffset,
cw_checkpoint: &ContextWriterCheckpoint, rdo_type: RDOType,
mut best: PartitionParameters, is_chroma_block: bool,
) -> PartitionParameters {
let num_modes_rdo: usize;
let mut modes = ArrayVec::<[_; INTRA_MODES]>::new();
// Reduce number of prediction modes at higher speed levels
num_modes_rdo = if (fi.frame_type == FrameType::KEY
&& fi.config.speed_settings.prediction_modes
>= PredictionModesSetting::ComplexKeyframes)
|| (fi.frame_type.has_inter()
&& fi.config.speed_settings.prediction_modes
>= PredictionModesSetting::ComplexAll)
{
7
} else {
3
};
let intra_mode_set = RAV1E_INTRA_MODES;
// Find mode with lowest rate cost
{
let probs_all = if fi.frame_type.has_inter() {
cw.get_cdf_intra_mode(bsize)
} else {
cw.get_cdf_intra_mode_kf(tile_bo)
}
.iter()
.take(INTRA_MODES)
.scan(32768, |z, &a| {
let d = *z - a;
*z = a;
Some(!d)
})
.collect::<ArrayVec<[_; INTRA_MODES]>>();
modes.try_extend_from_slice(intra_mode_set).unwrap();
modes.sort_by_key(|&a| probs_all[a as usize]);
}
// If tx partition (i.e. fi.tx_mode_select) is enabled, the below intra prediction screening
// may be improved by emulating prediction for each tx block.
{
let satds = {
// FIXME: If tx partition is used, this whole sads block should be fixed
let tx_size = bsize.tx_size();
let edge_buf = {
let rec = &ts.rec.planes[0].as_const();
let po = tile_bo.plane_offset(rec.plane_cfg);
// FIXME: If tx partition is used, get_intra_edges() should be called for each tx block
get_intra_edges(
rec,
tile_bo,
0,
0,
bsize,
po,
tx_size,
fi.sequence.bit_depth,
None,
fi.sequence.enable_intra_edge_filter,
IntraParam::None,
)
};
let ief_params = if fi.sequence.enable_intra_edge_filter {
let above_block_info = ts.above_block_info(tile_bo, 0);
let left_block_info = ts.left_block_info(tile_bo, 0);
Some(IntraEdgeFilterParameters::new(
0,
above_block_info,
left_block_info,
))
} else {
None
};
let mut satds_all = [0; INTRA_MODES];
for &luma_mode in modes.iter().skip(num_modes_rdo / 2) {
let tile_rect = ts.tile_rect();
let rec = &mut ts.rec.planes[0];
let mut rec_region =
rec.subregion_mut(Area::BlockStartingAt { bo: tile_bo.0 });
// FIXME: If tx partition is used, luma_mode.predict_intra() should be called for each tx block
luma_mode.predict_intra(
tile_rect,
&mut rec_region,
tx_size,
fi.sequence.bit_depth,
&[0i16; 2],
IntraParam::None,
if luma_mode.is_directional() { ief_params } else { None },
&edge_buf,
fi.cpu_feature_level,
);
let plane_org = ts.input_tile.planes[0]
.subregion(Area::BlockStartingAt { bo: tile_bo.0 });
let plane_ref = rec_region.as_const();
satds_all[luma_mode as usize] = get_satd(
&plane_org,
&plane_ref,
tx_size.block_size(),
fi.sequence.bit_depth,
fi.cpu_feature_level,
);
}
satds_all
};
modes[num_modes_rdo / 2..].sort_by_key(|&a| satds[a as usize]);
}
debug_assert!(num_modes_rdo >= 1);
modes.iter().take(num_modes_rdo).for_each(|&luma_mode| {
let mvs = [MotionVector::default(); 2];
let ref_frames = [INTRA_FRAME, NONE_FRAME];
let mut mode_set_chroma = ArrayVec::<[_; 2]>::new();
mode_set_chroma.push(luma_mode);
if is_chroma_block && luma_mode != PredictionMode::DC_PRED {
mode_set_chroma.push(PredictionMode::DC_PRED);
}
luma_chroma_mode_rdo(
luma_mode,
fi,
bsize,
tile_bo,
ts,
cw,
rdo_type,
cw_checkpoint,
&mut best,
mvs,
ref_frames,
&mode_set_chroma,
true,
0,
&ArrayVec::<[CandidateMV; 9]>::new(),
AngleDelta::default(),
);
});
if fi.config.speed_settings.fine_directional_intra
&& bsize >= BlockSize::BLOCK_8X8
{
// Find the best angle delta for the current best prediction mode
let luma_deltas = best.pred_mode_luma.angle_delta_count();
let chroma_deltas = best.pred_mode_chroma.angle_delta_count();
let mvs = [MotionVector::default(); 2];
let ref_frames = [INTRA_FRAME, NONE_FRAME];
let mode_set_chroma = [best.pred_mode_chroma];
let mv_stack = ArrayVec::<[_; 9]>::new();
let mut best_angle_delta_y = best.angle_delta.y;
let mut angle_delta_rdo = |y, uv| -> i8 {
if best.angle_delta.y != y || best.angle_delta.uv != uv {
luma_chroma_mode_rdo(
best.pred_mode_luma,
fi,
bsize,
tile_bo,
ts,
cw,
rdo_type,
cw_checkpoint,
&mut best,
mvs,
ref_frames,
&mode_set_chroma,
true,
0,
&mv_stack,
AngleDelta { y, uv },
);
}
best.angle_delta.y
};
for i in 0..luma_deltas {
let angle_delta_y =
if luma_deltas == 1 { 0 } else { i - MAX_ANGLE_DELTA as i8 };
let angle_delta_uv = if chroma_deltas == 1 { 0 } else { angle_delta_y };
best_angle_delta_y = angle_delta_rdo(angle_delta_y, angle_delta_uv);
}
for j in 0..chroma_deltas {
let angle_delta_uv =
if chroma_deltas == 1 { 0 } else { j - MAX_ANGLE_DELTA as i8 };
angle_delta_rdo(best_angle_delta_y, angle_delta_uv);
}
}
best
}
pub fn rdo_cfl_alpha<T: Pixel>(
ts: &mut TileStateMut<'_, T>, tile_bo: TileBlockOffset, bsize: BlockSize,
fi: &FrameInvariants<T>,
) -> Option<CFLParams> {
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let uv_tx_size = bsize.largest_chroma_tx_size(xdec, ydec);
debug_assert!(bsize.subsampled_size(xdec, ydec) == uv_tx_size.block_size());
let mut ac: Aligned<[i16; 32 * 32]> = Aligned::uninitialized();
luma_ac(&mut ac.data, ts, tile_bo, bsize);
let best_alpha: ArrayVec<[i16; 2]> = (1..3)
.map(|p| {
let &PlaneConfig { xdec, ydec, .. } = ts.rec.planes[p].plane_cfg;
let tile_rect = ts.tile_rect().decimated(xdec, ydec);
let rec = &mut ts.rec.planes[p];
let input = &ts.input_tile.planes[p];
let po = tile_bo.plane_offset(rec.plane_cfg);
let edge_buf = get_intra_edges(
&rec.as_const(),
tile_bo,
0,
0,
bsize,
po,
uv_tx_size,
fi.sequence.bit_depth,
Some(PredictionMode::UV_CFL_PRED),
fi.sequence.enable_intra_edge_filter,
IntraParam::None,
);
let mut alpha_cost = |alpha: i16| -> u64 {
let mut rec_region =
rec.subregion_mut(Area::BlockStartingAt { bo: tile_bo.0 });
PredictionMode::UV_CFL_PRED.predict_intra(
tile_rect,
&mut rec_region,
uv_tx_size,
fi.sequence.bit_depth,
&ac.data,
IntraParam::Alpha(alpha),
None,
&edge_buf,
fi.cpu_feature_level,
);
sse_wxh(
&input.subregion(Area::BlockStartingAt { bo: tile_bo.0 }),
&rec_region.as_const(),
uv_tx_size.width(),
uv_tx_size.height(),
|_, _| DistortionScale::default(), // We're not doing RDO here.
)
.0
};
let mut best = (alpha_cost(0), 0);
let mut count = 2;
for alpha in 1i16..=16i16 {
let cost = (alpha_cost(alpha), alpha_cost(-alpha));
if cost.0 < best.0 {
best = (cost.0, alpha);
count += 2;
}
if cost.1 < best.0 {
best = (cost.1, -alpha);
count += 2;
}
if count < alpha {
break;
}
}
best.1
})
.collect();
if best_alpha[0] == 0 && best_alpha[1] == 0 {
None
} else {
Some(CFLParams::from_alpha(best_alpha[0], best_alpha[1]))
}
}
// RDO-based transform type decision
pub fn rdo_tx_type_decision<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, mode: PredictionMode, ref_frames: [RefType; 2],
mvs: [MotionVector; 2], bsize: BlockSize, tile_bo: TileBlockOffset,
tx_size: TxSize, tx_set: TxSet, tx_types: &[TxType],
) -> (TxType, f64) {
let mut best_type = TxType::DCT_DCT;
let mut best_rd = std::f64::MAX;
let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg;
let is_chroma_block =
has_chroma(tile_bo, bsize, xdec, ydec, fi.sequence.chroma_sampling);
let is_inter = !mode.is_intra();
let cw_checkpoint = cw.checkpoint();
let rdo_type = if fi.use_tx_domain_distortion {
RDOType::TxDistRealRate
} else {
RDOType::PixelDistRealRate
};
let need_recon_pixel = tx_size.block_size() != bsize && !is_inter;
for &tx_type in tx_types {
// Skip unsupported transform types
if av1_tx_used[tx_set as usize][tx_type as usize] == 0 {
continue;
}
if is_inter {
motion_compensate(
fi, ts, cw, mode, ref_frames, mvs, bsize, tile_bo, true,
);
}
let wr: &mut dyn Writer = &mut WriterCounter::new();
let tell = wr.tell_frac();
let (_, tx_dist) = if is_inter {
write_tx_tree(
fi,
ts,
cw,
wr,
mode,
0,
tile_bo,
bsize,
tx_size,
tx_type,
false,
true,
rdo_type,
need_recon_pixel,
)
} else {
write_tx_blocks(
fi,
ts,
cw,
wr,
mode,
mode,
AngleDelta::default(),
tile_bo,
bsize,
tx_size,
tx_type,
false,
CFLParams::default(), // Unused.
true,
rdo_type,
need_recon_pixel,
)
};
let rate = wr.tell_frac() - tell;
let distortion = if fi.use_tx_domain_distortion {
compute_tx_distortion(
fi,
ts,
bsize,
is_chroma_block,
tile_bo,
tx_dist,
false,
true,
)
} else {
compute_distortion(fi, ts, bsize, is_chroma_block, tile_bo, true)
};
let rd = compute_rd_cost(fi, rate, distortion);
if rd < best_rd {
best_rd = rd;
best_type = tx_type;
}
cw.rollback(&cw_checkpoint);
}
assert!(best_rd >= 0_f64);
(best_type, best_rd)
}
pub fn get_sub_partitions(
four_partitions: &[TileBlockOffset; 4], partition: PartitionType,
) -> ArrayVec<[TileBlockOffset; 4]> {
let mut partition_offsets = ArrayVec::<[TileBlockOffset; 4]>::new();
partition_offsets.push(four_partitions[0]);
if partition == PARTITION_NONE {
return partition_offsets;
}
if partition == PARTITION_VERT || partition == PARTITION_SPLIT {
partition_offsets.push(four_partitions[1]);
};
if partition == PARTITION_HORZ || partition == PARTITION_SPLIT {
partition_offsets.push(four_partitions[2]);
};
if partition == PARTITION_SPLIT {
partition_offsets.push(four_partitions[3]);
};
partition_offsets
}
pub fn get_sub_partitions_with_border_check(
four_partitions: &[TileBlockOffset; 4], partition: PartitionType,
mi_width: usize, mi_height: usize, subsize: BlockSize,
) -> ArrayVec<[TileBlockOffset; 4]> {
let mut partition_offsets = ArrayVec::<[TileBlockOffset; 4]>::new();
partition_offsets.push(four_partitions[0]);
if partition == PARTITION_NONE {
return partition_offsets;
}
let hbsw = subsize.width_mi(); // Half the block size width in blocks
let hbsh = subsize.height_mi(); // Half the block size height in blocks
if (partition == PARTITION_VERT || partition == PARTITION_SPLIT)
&& four_partitions[1].0.x + hbsw <= mi_width
&& four_partitions[1].0.y + hbsh <= mi_height
{
partition_offsets.push(four_partitions[1]);
};
if (partition == PARTITION_HORZ || partition == PARTITION_SPLIT)
&& four_partitions[2].0.x + hbsw <= mi_width
&& four_partitions[2].0.y + hbsh <= mi_height
{
partition_offsets.push(four_partitions[2]);
};
if partition == PARTITION_SPLIT
&& four_partitions[3].0.x + hbsw <= mi_width
&& four_partitions[3].0.y + hbsh <= mi_height
{
partition_offsets.push(four_partitions[3]);
};
partition_offsets
}
#[inline(always)]
fn rdo_partition_none<T: Pixel>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, bsize: BlockSize, tile_bo: TileBlockOffset,
pmv_idx: usize, inter_cfg: &InterConfig,
child_modes: &mut ArrayVec<[PartitionParameters; 4]>,
) -> Option<f64> {
let pmv_inner_idx = if bsize > BlockSize::BLOCK_32X32 {
0
} else {
((tile_bo.0.x & 32) >> 5) + ((tile_bo.0.y & 32) >> 4) + 1
};
let mode = rdo_mode_decision(
fi,
ts,
cw,
bsize,
tile_bo,
(pmv_idx, pmv_inner_idx),
inter_cfg,
);
let cost = mode.rd_cost;
child_modes.push(mode);
Some(cost)
}
// VERTICAL, HORIZONTAL or simple SPLIT
#[inline(always)]
fn rdo_partition_simple<T: Pixel, W: Writer>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, w_pre_cdef: &mut W, w_post_cdef: &mut W,
bsize: BlockSize, tile_bo: TileBlockOffset, pmv_idx: usize,
inter_cfg: &InterConfig, partition: PartitionType, rdo_type: RDOType,
best_rd: f64, child_modes: &mut ArrayVec<[PartitionParameters; 4]>,
) -> Option<f64> {
let subsize = bsize.subsize(partition);
debug_assert!(subsize != BlockSize::BLOCK_INVALID);
let cost = if bsize >= BlockSize::BLOCK_8X8 {
let w: &mut W = if cw.bc.cdef_coded { w_post_cdef } else { w_pre_cdef };
let tell = w.tell_frac();
cw.write_partition(w, tile_bo, partition, bsize);
compute_rd_cost(fi, w.tell_frac() - tell, ScaledDistortion::zero())
} else {
0.0
};
//pmv = best_pred_modes[0].mvs[0];
// assert!(best_pred_modes.len() <= 4);
let hbsw = subsize.width_mi(); // Half the block size width in blocks
let hbsh = subsize.height_mi(); // Half the block size height in blocks
let four_partitions = [
tile_bo,
TileBlockOffset(BlockOffset {
x: tile_bo.0.x + hbsw as usize,
y: tile_bo.0.y,
}),
TileBlockOffset(BlockOffset {
x: tile_bo.0.x,
y: tile_bo.0.y + hbsh as usize,
}),
TileBlockOffset(BlockOffset {
x: tile_bo.0.x + hbsw as usize,
y: tile_bo.0.y + hbsh as usize,
}),
];
let partitions = get_sub_partitions_with_border_check(
&four_partitions,
partition,
ts.mi_width,
ts.mi_height,
subsize,
);
let pmv_idxs = partitions
.iter()
.map(|&offset| {
if subsize > BlockSize::BLOCK_32X32 {
0
} else {
((offset.0.x & 32) >> 5) + ((offset.0.y & 32) >> 4) + 1
}
})
.collect::<ArrayVec<[_; 4]>>();
let mut rd_cost_sum = 0.0;
for (&offset, pmv_inner_idx) in partitions.iter().zip(pmv_idxs) {
let mode_decision = rdo_mode_decision(
fi,
ts,
cw,
subsize,
offset,
(pmv_idx, pmv_inner_idx),
inter_cfg,
);
rd_cost_sum += mode_decision.rd_cost;
if fi.enable_early_exit && rd_cost_sum > best_rd {
return None;
}
if subsize >= BlockSize::BLOCK_8X8 && subsize.is_sqr() {
let w: &mut W = if cw.bc.cdef_coded { w_post_cdef } else { w_pre_cdef };
cw.write_partition(w, offset, PartitionType::PARTITION_NONE, subsize);
}
encode_block_with_modes(
fi,
ts,
cw,
w_pre_cdef,
w_post_cdef,
subsize,
offset,
&mode_decision,
rdo_type,
false,
);
child_modes.push(mode_decision);
}
Some(cost + rd_cost_sum)
}
// RDO-based single level partitioning decision
pub fn rdo_partition_decision<T: Pixel, W: Writer>(
fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>,
cw: &mut ContextWriter, w_pre_cdef: &mut W, w_post_cdef: &mut W,
bsize: BlockSize, tile_bo: TileBlockOffset,
cached_block: &PartitionGroupParameters, pmv_idx: usize,
partition_types: &[PartitionType], rdo_type: RDOType,
inter_cfg: &InterConfig,
) -> PartitionGroupParameters {
let mut best_partition = cached_block.part_type;
let mut best_rd = cached_block.rd_cost;
let mut best_pred_modes = cached_block.part_modes.clone();
let cw_checkpoint = cw.checkpoint();
let w_pre_checkpoint = w_pre_cdef.checkpoint();
let w_post_checkpoint = w_post_cdef.checkpoint();
for &partition in partition_types {
// Do not re-encode results we already have
if partition == cached_block.part_type {
continue;
}
let mut child_modes = ArrayVec::<[_; 4]>::new();
let cost = match partition {
PARTITION_NONE if bsize <= BlockSize::BLOCK_64X64 => rdo_partition_none(
fi,
ts,
cw,
bsize,
tile_bo,
pmv_idx,
inter_cfg,
&mut child_modes,
),
PARTITION_SPLIT | PARTITION_HORZ | PARTITION_VERT => {
rdo_partition_simple(
fi,
ts,
cw,
w_pre_cdef,
w_post_cdef,
bsize,
tile_bo,
pmv_idx,
inter_cfg,
partition,
rdo_type,
best_rd,
&mut child_modes,
)
}
_ => {
unreachable!();
}
};
if let Some(rd) = cost {
if rd < best_rd {
best_rd = rd;
best_partition = partition;
best_pred_modes = child_modes.clone();
}
}
cw.rollback(&cw_checkpoint);
w_pre_cdef.rollback(&w_pre_checkpoint);
w_post_cdef.rollback(&w_post_checkpoint);
}
assert!(best_rd >= 0_f64);
PartitionGroupParameters {
rd_cost: best_rd,
part_type: best_partition,
part_modes: best_pred_modes,
}
}
fn rdo_loop_plane_error<T: Pixel>(
base_sbo: TileSuperBlockOffset, offset_sbo: TileSuperBlockOffset,
sb_w: usize, sb_h: usize, fi: &FrameInvariants<T>, ts: &TileStateMut<'_, T>,
blocks: &TileBlocks<'_>, test: &Frame<u16>, src: &Frame<u16>, pli: usize,
) -> ScaledDistortion {
let sb_w_blocks =
if fi.sequence.use_128x128_superblock { 16 } else { 8 } * sb_w;
let sb_h_blocks =
if fi.sequence.use_128x128_superblock { 16 } else { 8 } * sb_h;
// Each direction block is 8x8 in y, potentially smaller if subsampled in chroma
// accumulating in-frame and unpadded
let mut err = Distortion::zero();
for by in 0..sb_h_blocks {
for bx in 0..sb_w_blocks {
let loop_bo = offset_sbo.block_offset(bx << 1, by << 1);
if loop_bo.0.x < blocks.cols() && loop_bo.0.y < blocks.rows() {
let src_plane = &src.planes[pli];
let test_plane = &test.planes[pli];
let PlaneConfig { xdec, ydec, .. } = src_plane.cfg;
debug_assert_eq!(xdec, test_plane.cfg.xdec);
debug_assert_eq!(ydec, test_plane.cfg.ydec);
// Unfortunately, our distortion biases are only available via
// Frame-absolute addressing, so we need a block offset
// relative to the full frame origin (not the tile or analysis
// area)
let frame_bo = (base_sbo + offset_sbo).block_offset(bx << 1, by << 1);
let bias = distortion_scale(
fi,
ts.to_frame_block_offset(frame_bo),
BlockSize::BLOCK_8X8,
);
let src_region =
src_plane.region(Area::BlockStartingAt { bo: loop_bo.0 });
let test_region =
test_plane.region(Area::BlockStartingAt { bo: loop_bo.0 });
err += if pli == 0 {
// For loop filters, We intentionally use cdef_dist even with
// `--tune Psnr`. Using SSE instead gives no PSNR gain but has a
// significant negative impact on other metrics and visual quality.
cdef_dist_wxh_8x8(&src_region, &test_region, fi.sequence.bit_depth)
* bias
} else {
sse_wxh(&src_region, &test_region, 8 >> xdec, 8 >> ydec, |_, _| bias)
};
}
}
}
err * fi.dist_scale[pli]
}
// Passed in a superblock offset representing the upper left corner of
// the LRU area we're optimizing. This area covers the largest LRU in
// any of the present planes, but may consist of a number of
// superblocks and full, smaller LRUs in the other planes
pub fn rdo_loop_decision<T: Pixel>(
base_sbo: TileSuperBlockOffset, fi: &FrameInvariants<T>,
ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer,
deblock_p: bool,
) {
let planes = if fi.sequence.chroma_sampling == ChromaSampling::Cs400 {
1
} else {
MAX_PLANES
};
assert!(fi.sequence.enable_cdef || fi.sequence.enable_restoration);
// Determine area of optimization: Which plane has the largest LRUs?
// How many LRUs for each?
let mut sb_w = 1; // how many superblocks wide the largest LRU
// is/how many SBs we're processing (same thing)
let mut sb_h = 1; // how many superblocks wide the largest LRU
// is/how many SBs we're processing (same thing)
let mut lru_w = [0; MAX_PLANES]; // how many LRUs we're processing
let mut lru_h = [0; MAX_PLANES]; // how many LRUs we're processing
for pli in 0..planes {
let sb_h_shift = ts.restoration.planes[pli].rp_cfg.sb_h_shift;
let sb_v_shift = ts.restoration.planes[pli].rp_cfg.sb_v_shift;
if sb_w < (1 << sb_h_shift) {
sb_w = 1 << sb_h_shift;
}
if sb_h < (1 << sb_v_shift) {
sb_h = 1 << sb_v_shift;
}
}
for pli in 0..planes {
let sb_h_shift = ts.restoration.planes[pli].rp_cfg.sb_h_shift;
let sb_v_shift = ts.restoration.planes[pli].rp_cfg.sb_v_shift;
lru_w[pli] = sb_w / (1 << sb_h_shift);
lru_h[pli] = sb_h / (1 << sb_v_shift);
}
// The superblock width/height determinations may be calling for us
// to compute over superblocks that do not actually exist in the
// frame (off the right or lower edge). Trim sb width/height down
// to actual superblocks. Note that these last superblocks on the
// right/bottom may themselves still span the edge of the frame, but
// they do hold at least some visible pixels.
sb_w = sb_w.min(ts.sb_width - base_sbo.0.x);
sb_h = sb_h.min(ts.sb_height - base_sbo.0.y);
// We have need to know the Y visible pixel limits as well (the
// sb_w/sb_h figures above can be used to determine how many
// allocated pixels, possibly beyond the visible frame, exist).
let crop_w =
fi.width - ((ts.sbo.0.x + base_sbo.0.x) << SUPERBLOCK_TO_PLANE_SHIFT);
let crop_h =
fi.height - ((ts.sbo.0.y + base_sbo.0.y) << SUPERBLOCK_TO_PLANE_SHIFT);
let pixel_w = crop_w.min(sb_w << SUPERBLOCK_TO_PLANE_SHIFT);
let pixel_h = crop_h.min(sb_h << SUPERBLOCK_TO_PLANE_SHIFT);
// Based on `RestorationState::new`
const MAX_SB_SHIFT: usize = 4;
const MAX_SB_SIZE: usize = 1 << MAX_SB_SHIFT;
const MAX_LRU_SIZE: usize = MAX_SB_SIZE;
// Static allocation relies on the "minimal LRU area for all N planes" invariant.
let mut best_index = [-1; MAX_SB_SIZE * MAX_SB_SIZE];
let mut best_lrf =
[[RestorationFilter::None; MAX_PLANES]; MAX_LRU_SIZE * MAX_LRU_SIZE];
// due to imprecision in the reconstruction parameter solver, we
// need to make sure we don't fall into a limit cycle. Track our
// best cost at LRF so that we can break if we get a solution that doesn't
// improve at the reconstruction stage.
let mut best_lrf_cost = [[-1.0; MAX_PLANES]; MAX_LRU_SIZE * MAX_LRU_SIZE];
// Loop filter RDO is an iterative process and we need temporary
// scratch data to hold the results of deblocking, cdef, and the
// loop reconstruction filter so that each can be partially updated
// without recomputing the entire stack. Construct
// largest-LRU-sized frames for each, accounting for padding
// required by deblocking, cdef and [optionally] LR.
// NOTE: the CDEF code requires padding to simplify addressing.
// Right now, the padded area does borrow neighboring pixels for the
// border so long as they're within the tile [as opposed to simply
// flagging the border pixels as inactive]. LR code currently does
// not need and will not use padding area. It always edge-extends
// the passed in rectangle.
let mut rec_subset = {
let const_rec = ts.rec.as_const();
// a padding of 8 gets us a full block of border. CDEF
// only needs 2 pixels, but deblocking is happier with full
// blocks.
cdef_padded_tile_copy(
&const_rec,
base_sbo,
(pixel_w + 7) >> 3,
(pixel_h + 7) >> 3,
8,
planes,
)
};
// sub-setted region of the TileBlocks for our working frame area
let mut tileblocks_subset = cw.bc.blocks.subregion(
base_sbo.block_offset(0, 0).0.x,
base_sbo.block_offset(0, 0).0.y,
sb_w << SUPERBLOCK_TO_BLOCK_SHIFT,
sb_h << SUPERBLOCK_TO_BLOCK_SHIFT,
);
// why copy and not just a view? Because CDEF optimization requires
// u16 working space. This avoids adding another generic buffer
// typing parameter and expanding code to handle all the possible
// input/output combinations. In the future we may decide to prefer
// that over the additional temp buffer (after doing the work needed
// to allow CDEF opt to work on 8 bit).
let src_subset = {
cdef_padded_tile_copy(
&ts.input_tile,
base_sbo,
(pixel_w + 7) >> 3,
(pixel_h + 7) >> 3,
0,
planes,
)
};
if deblock_p {
// Find a good deblocking filter solution for the passed in area.
// This is not RDO of deblocking itself, merely a solution to get
// better results from CDEF/LRF RDO.
let deblock_levels = deblock_filter_optimize(
fi,
&rec_subset.as_tile(),
&src_subset.as_tile(),
&tileblocks_subset.as_const(),
crop_w,
crop_h,
);
// Deblock the contents of our reconstruction copy.
if deblock_levels[0] != 0 || deblock_levels[1] != 0 {
// copy ts.deblock because we need to set some of our own values here
let mut deblock_copy = *ts.deblock;
deblock_copy.levels = deblock_levels;
// finally, deblock the temp frame
deblock_filter_frame(
&deblock_copy,
&mut rec_subset.as_tile_mut(),
&tileblocks_subset.as_const(),
crop_w,
crop_h,
fi.sequence.bit_depth,
planes,
);
}
}
let mut cdef_work = if fi.sequence.enable_cdef {
Some(cdef_padded_tile_copy(
&rec_subset.as_tile(),
TileSuperBlockOffset(SuperBlockOffset { x: 0, y: 0 }),
(pixel_w + 7) >> 3,
(pixel_h + 7) >> 3,
0,
planes,
))
} else {
None
};
let mut lrf_work = if fi.sequence.enable_restoration {
Some(cdef_block8_frame(
(pixel_w + 7) >> 3,
(pixel_h + 7) >> 3,
&ts.rec.as_const(),
))
} else {
None
};
// Precompute directional analysis for CDEF
let cdef_data = {
if cdef_work.is_some() {
Some((
&rec_subset,
cdef_analyze_superblock_range(
fi,
&rec_subset,
&tileblocks_subset.as_const(),
sb_w,
sb_h,
),
))
} else {
None
}
};
// CDEF/LRF decision iteration
// Start with a default of CDEF 0 and RestorationFilter::None
// Try all CDEF options for each sb with current LRF; if new CDEF+LRF choice is better, select it.
// Then try all LRF options with current CDEFs; if new CDEFs+LRF choice is better, select it.
// If LRF choice changed for any plane, repeat until no changes
// Limit iterations and where we break based on speed setting (in the TODO list ;-)
let mut cdef_change = true;
let mut lrf_change = true;
while cdef_change || lrf_change {
// search for improved cdef indices, superblock by superblock, if cdef is enabled.
if let (Some((rec_copy, cdef_dirs)), Some(cdef_ref)) =
(&cdef_data, &mut cdef_work.as_mut())
{
for sby in 0..sb_h {
for sbx in 0..sb_w {
let prev_best_index = best_index[sby * sb_w + sbx];
let mut best_cost = -1.;
let mut best_new_index = -1i8;
/* offset of the superblock we're currently testing within the larger analysis area */
let loop_sbo =
TileSuperBlockOffset(SuperBlockOffset { x: sbx, y: sby });
/* cdef index testing loop */
for cdef_index in 0..(1 << fi.cdef_bits) {
let mut err = ScaledDistortion::zero();
let mut rate = 0;
let mut cdef_ref_tm = TileMut::new(
cdef_ref,
TileRect {
x: 0,
y: 0,
width: cdef_ref.planes[0].cfg.width,
height: cdef_ref.planes[0].cfg.height,
},
);
cdef_filter_superblock(
fi,
&rec_subset,
&mut cdef_ref_tm,
&tileblocks_subset.as_const(),
loop_sbo,
cdef_index,
&cdef_dirs[sby * sb_w + sbx],
);
// apply LRF if any
for pli in 0..planes {
// We need the cropped-to-visible-frame area of this SB
let wh =
if fi.sequence.use_128x128_superblock { 128 } else { 64 };
let PlaneConfig { xdec, ydec, .. } = cdef_ref.planes[pli].cfg;
let vis_width = (wh >> xdec).min(
(crop_w >> xdec)
- loop_sbo.plane_offset(&cdef_ref.planes[pli].cfg).x
as usize,
);
let vis_height = (wh >> ydec).min(
(crop_h >> ydec)
- loop_sbo.plane_offset(&cdef_ref.planes[pli].cfg).y
as usize,
);
// which LRU are we currently testing against?
if let (Some((lru_x, lru_y)), Some(lrf_ref)) = {
let rp = &ts.restoration.planes[pli];
(
rp.restoration_unit_offset(base_sbo, loop_sbo, false),
&mut lrf_work,
)
} {
// We have a valid LRU, apply LRF, compute error
match best_lrf[lru_y * lru_w[pli] + lru_x][pli] {
RestorationFilter::None {} => {
err += rdo_loop_plane_error(
base_sbo,
loop_sbo,
1,
1,
fi,
ts,
&tileblocks_subset.as_const(),
cdef_ref,
&src_subset,
pli,
);
rate += if fi.sequence.enable_restoration {
cw.count_lrf_switchable(
w,
&ts.restoration.as_const(),
best_lrf[lru_y * lru_w[pli] + lru_x][pli],
pli,
)
} else {
0 // no relative cost differeneces to different
// CDEF params. If cdef is on, it's a wash.
};
}
RestorationFilter::Sgrproj { set, xqd } => {
// only run on this single superblock
let loop_po =
loop_sbo.plane_offset(&cdef_ref.planes[pli].cfg);
// todo: experiment with borrowing border pixels
// rather than edge-extending. Right now this is
// hard-clipping to the superblock boundary.
setup_integral_image(
&mut ts.integral_buffer,
SOLVE_IMAGE_STRIDE,
vis_width,
vis_height,
vis_width,
vis_height,
&cdef_ref.planes[pli].slice(loop_po),
&cdef_ref.planes[pli].slice(loop_po),
);
sgrproj_stripe_filter(
set,
xqd,
fi,
&ts.integral_buffer,
SOLVE_IMAGE_STRIDE,
&cdef_ref.planes[pli].slice(loop_po),
&mut lrf_ref.planes[pli].region_mut(Area::Rect {
x: loop_po.x,
y: loop_po.y,
width: vis_width,
height: vis_height,
}),
);
err += rdo_loop_plane_error(
base_sbo,
loop_sbo,
1,
1,
fi,
ts,
&tileblocks_subset.as_const(),
lrf_ref,
&src_subset,
pli,
);
rate += cw.count_lrf_switchable(
w,
&ts.restoration.as_const(),
best_lrf[lru_y * lru_w[pli] + lru_x][pli],
pli,
);
}
RestorationFilter::Wiener { .. } => unreachable!(), // coming soon
}
} else {
// No actual LRU here, compute error directly from CDEF output.
err += rdo_loop_plane_error(
base_sbo,
loop_sbo,
1,
1,
fi,
ts,
&tileblocks_subset.as_const(),
cdef_ref,
&src_subset,
pli,
);
// no relative cost differeneces to different
// CDEF params. If cdef is on, it's a wash.
// rate += 0;
}
}
let cost = compute_rd_cost(fi, rate, err);
if best_cost < 0. || cost < best_cost {
best_cost = cost;
best_new_index = cdef_index as i8;
}
}
// Did we change any preexisting choices?
if best_new_index != prev_best_index {
cdef_change = true;
best_index[sby * sb_w + sbx] = best_new_index;
tileblocks_subset.set_cdef(loop_sbo, best_new_index as u8);
}
let mut cdef_ref_tm = TileMut::new(
cdef_ref,
TileRect {
x: 0,
y: 0,
width: cdef_ref.planes[0].cfg.width,
height: cdef_ref.planes[0].cfg.height,
},
);
// Keep cdef output up to date; we need it for restoration
// both below and above (padding)
cdef_filter_superblock(
fi,
rec_copy,
&mut cdef_ref_tm,
&tileblocks_subset.as_const(),
loop_sbo,
best_index[sby * sb_w + sbx] as u8,
&cdef_dirs[sby * sb_w + sbx],
);
}
}
}
if !cdef_change {
break;
}
cdef_change = false;
lrf_change = false;
// search for improved restoration filter parameters if restoration is enabled
if let Some(lrf_ref) = &mut lrf_work.as_mut() {
let lrf_input = if cdef_work.is_some() {
// When CDEF is enabled, we pull from the CDEF output
&cdef_work.as_ref().unwrap()
} else {
// When CDEF is disabled, we pull from the [optionally
// deblocked] reconstruction
&rec_subset
};
for pli in 0..planes {
// Nominal size of LRU in pixels before clipping to visible frame
let unit_size = ts.restoration.planes[pli].rp_cfg.unit_size;
// width, in sb, of an LRU in this plane
let lru_sb_w = 1 << ts.restoration.planes[pli].rp_cfg.sb_h_shift;
// height, in sb, of an LRU in this plane
let lru_sb_h = 1 << ts.restoration.planes[pli].rp_cfg.sb_v_shift;
let PlaneConfig { xdec, ydec, .. } = lrf_ref.planes[pli].cfg;
for lru_y in 0..lru_h[pli] {
// number of LRUs vertically
for lru_x in 0..lru_w[pli] {
// number of LRUs horizontally
let loop_sbo = TileSuperBlockOffset(SuperBlockOffset {
x: lru_x * lru_sb_w,
y: lru_y * lru_sb_h,
});
if ts.restoration.has_restoration_unit(
base_sbo + loop_sbo,
pli,
false,
) {
let src_plane = &src_subset.planes[pli]; // uncompressed input for reference
let lrf_in_plane = &lrf_input.planes[pli];
let lrf_po = loop_sbo.plane_offset(&src_plane.cfg);
let mut best_new_lrf = best_lrf[lru_y * lru_w[pli] + lru_x][pli];
let mut best_cost =
best_lrf_cost[lru_y * lru_w[pli] + lru_x][pli];
// Check the no filter option
{
let err = rdo_loop_plane_error(
base_sbo,
loop_sbo,
lru_sb_w,
lru_sb_h,
fi,
ts,
&tileblocks_subset.as_const(),
lrf_input,
&src_subset,
pli,
);
let rate = cw.count_lrf_switchable(
w,
&ts.restoration.as_const(),
best_new_lrf,
pli,
);
let cost = compute_rd_cost(fi, rate, err);
// Was this choice actually an improvement?
if best_cost < 0. || cost < best_cost {
best_cost = cost;
best_lrf_cost[lru_y * lru_w[pli] + lru_x][pli] = cost;
best_new_lrf = RestorationFilter::None;
}
}
// Look for a self guided filter
// We need the cropped-to-visible-frame computation area of this LRU
let vis_width = unit_size.min(
(crop_w >> xdec)
- loop_sbo.plane_offset(&lrf_ref.planes[pli].cfg).x as usize,
);
let vis_height = unit_size.min(
(crop_h >> ydec)
- loop_sbo.plane_offset(&lrf_ref.planes[pli].cfg).y as usize,
);
// todo: experiment with borrowing border pixels
// rather than edge-extending. Right now this is
// hard-clipping to the superblock boundary.
setup_integral_image(
&mut ts.integral_buffer,
SOLVE_IMAGE_STRIDE,
vis_width,
vis_height,
vis_width,
vis_height,
&lrf_in_plane.slice(lrf_po),
&lrf_in_plane.slice(lrf_po),
);
for &set in get_sgr_sets(fi.config.speed_settings.sgr_complexity)
{
let (xqd0, xqd1) = sgrproj_solve(
set,
fi,
&ts.integral_buffer,
&src_plane.slice(lrf_po),
&lrf_in_plane.slice(lrf_po),
vis_width,
vis_height,
);
let current_lrf =
RestorationFilter::Sgrproj { set, xqd: [xqd0, xqd1] };
if let RestorationFilter::Sgrproj { set, xqd } = current_lrf {
sgrproj_stripe_filter(
set,
xqd,
fi,
&ts.integral_buffer,
SOLVE_IMAGE_STRIDE,
&lrf_in_plane.slice(lrf_po),
&mut lrf_ref.planes[pli].region_mut(Area::Rect {
x: lrf_po.x,
y: lrf_po.y,
width: vis_width,
height: vis_height,
}),
);
}
let err = rdo_loop_plane_error(
base_sbo,
loop_sbo,
lru_sb_w,
lru_sb_h,
fi,
ts,
&tileblocks_subset.as_const(),
lrf_ref,
&src_subset,
pli,
);
let rate = cw.count_lrf_switchable(
w,
&ts.restoration.as_const(),
current_lrf,
pli,
);
let cost = compute_rd_cost(fi, rate, err);
if cost < best_cost {
best_cost = cost;
best_lrf_cost[lru_y * lru_w[pli] + lru_x][pli] = cost;
best_new_lrf = current_lrf;
}
}
if best_lrf[lru_y * lru_w[pli] + lru_x][pli]
.notequal(best_new_lrf)
{
best_lrf[lru_y * lru_w[pli] + lru_x][pli] = best_new_lrf;
lrf_change = true;
if let Some(ru) = ts.restoration.planes[pli]
.restoration_unit_mut(base_sbo + loop_sbo)
{
ru.filter = best_new_lrf;
}
}
}
}
}
}
}
}
}
#[test]
fn estimate_rate_test() {
assert_eq!(estimate_rate(0, TxSize::TX_4X4, 0), RDO_RATE_TABLE[0][0][0]);
}
|
//! Serializing Rust structures into TOML.
//!
//! This module contains all the Serde support for serializing Rust structures
//! into TOML documents (as strings). Note that some top-level functions here
//! are also provided at the top of the crate.
//!
//! Note that the TOML format has a restriction that if a table itself contains
//! tables, all keys with non-table values must be emitted first. This is
//! typically easy to ensure happens when you're defining a `struct` as you can
//! reorder the fields manually, but when working with maps (such as `BTreeMap`
//! or `HashMap`) this can lead to serialization errors. In those situations you
//! may use the `tables_last` function in this module like so:
//!
//! ```rust
//! # use serde_derive::Serialize;
//! # use std::collections::HashMap;
//! #[derive(Serialize)]
//! struct Manifest {
//! package: Package,
//! #[serde(serialize_with = "toml::ser::tables_last")]
//! dependencies: HashMap<String, Dependency>,
//! }
//! # type Package = String;
//! # type Dependency = String;
//! # fn main() {}
//! ```
use std::cell::Cell;
use std::error;
use std::fmt::{self, Write};
use std::marker;
use std::rc::Rc;
use crate::datetime;
use serde::ser;
/// Serialize the given data structure as a TOML byte vector.
///
/// Serialization can fail if `T`'s implementation of `Serialize` decides to
/// fail, if `T` contains a map with non-string keys, or if `T` attempts to
/// serialize an unsupported datatype such as an enum, tuple, or tuple struct.
pub fn to_vec<T: ?Sized>(value: &T) -> Result<Vec<u8>, Error>
where
T: ser::Serialize,
{
to_string(value).map(|e| e.into_bytes())
}
/// Serialize the given data structure as a String of TOML.
///
/// Serialization can fail if `T`'s implementation of `Serialize` decides to
/// fail, if `T` contains a map with non-string keys, or if `T` attempts to
/// serialize an unsupported datatype such as an enum, tuple, or tuple struct.
///
/// # Examples
///
/// ```
/// use serde_derive::Serialize;
///
/// #[derive(Serialize)]
/// struct Config {
/// database: Database,
/// }
///
/// #[derive(Serialize)]
/// struct Database {
/// ip: String,
/// port: Vec<u16>,
/// connection_max: u32,
/// enabled: bool,
/// }
///
/// fn main() {
/// let config = Config {
/// database: Database {
/// ip: "192.168.1.1".to_string(),
/// port: vec![8001, 8002, 8003],
/// connection_max: 5000,
/// enabled: false,
/// },
/// };
///
/// let toml = toml::to_string(&config).unwrap();
/// println!("{}", toml)
/// }
/// ```
pub fn to_string<T: ?Sized>(value: &T) -> Result<String, Error>
where
T: ser::Serialize,
{
let mut dst = String::with_capacity(128);
value.serialize(&mut Serializer::new(&mut dst))?;
Ok(dst)
}
/// Serialize the given data structure as a "pretty" String of TOML.
///
/// This is identical to `to_string` except the output string has a more
/// "pretty" output. See `Serializer::pretty` for more details.
pub fn to_string_pretty<T: ?Sized>(value: &T) -> Result<String, Error>
where
T: ser::Serialize,
{
let mut dst = String::with_capacity(128);
value.serialize(&mut Serializer::pretty(&mut dst))?;
Ok(dst)
}
/// Errors that can occur when serializing a type.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Error {
/// Indicates that a Rust type was requested to be serialized but it was not
/// supported.
///
/// Currently the TOML format does not support serializing types such as
/// enums, tuples and tuple structs.
UnsupportedType,
/// The key of all TOML maps must be strings, but serialization was
/// attempted where the key of a map was not a string.
KeyNotString,
/// An error that we never omit but keep for backwards compatibility
#[doc(hidden)]
KeyNewline,
/// An array had to be homogenous, but now it is allowed to be heterogenous.
#[doc(hidden)]
ArrayMixedType,
/// All values in a TOML table must be emitted before further tables are
/// emitted. If a value is emitted *after* a table then this error is
/// generated.
ValueAfterTable,
/// A serialized date was invalid.
DateInvalid,
/// A serialized number was invalid.
NumberInvalid,
/// None was attempted to be serialized, but it's not supported.
UnsupportedNone,
/// A custom error which could be generated when serializing a particular
/// type.
Custom(String),
#[doc(hidden)]
__Nonexhaustive,
}
#[derive(Debug, Default, Clone)]
/// Internal place for holding array setings
struct ArraySettings {
indent: usize,
trailing_comma: bool,
}
impl ArraySettings {
fn pretty() -> ArraySettings {
ArraySettings {
indent: 4,
trailing_comma: true,
}
}
}
#[derive(Debug, Default, Clone)]
/// String settings
struct StringSettings {
/// Whether to use literal strings when possible
literal: bool,
}
impl StringSettings {
fn pretty() -> StringSettings {
StringSettings { literal: true }
}
}
#[derive(Debug, Default, Clone)]
/// Internal struct for holding serialization settings
struct Settings {
array: Option<ArraySettings>,
string: Option<StringSettings>,
}
/// Serialization implementation for TOML.
///
/// This structure implements serialization support for TOML to serialize an
/// arbitrary type to TOML. Note that the TOML format does not support all
/// datatypes in Rust, such as enums, tuples, and tuple structs. These types
/// will generate an error when serialized.
///
/// Currently a serializer always writes its output to an in-memory `String`,
/// which is passed in when creating the serializer itself.
pub struct Serializer<'a> {
dst: &'a mut String,
state: State<'a>,
settings: Rc<Settings>,
}
#[derive(Debug, Copy, Clone)]
enum ArrayState {
Started,
StartedAsATable,
}
#[derive(Debug, Clone)]
enum State<'a> {
Table {
key: &'a str,
parent: &'a State<'a>,
first: &'a Cell<bool>,
table_emitted: &'a Cell<bool>,
},
Array {
parent: &'a State<'a>,
first: &'a Cell<bool>,
type_: &'a Cell<Option<ArrayState>>,
len: Option<usize>,
},
End,
}
#[doc(hidden)]
pub struct SerializeSeq<'a, 'b> {
ser: &'b mut Serializer<'a>,
first: Cell<bool>,
type_: Cell<Option<ArrayState>>,
len: Option<usize>,
}
#[doc(hidden)]
pub enum SerializeTable<'a, 'b> {
Datetime(&'b mut Serializer<'a>),
Table {
ser: &'b mut Serializer<'a>,
key: String,
first: Cell<bool>,
table_emitted: Cell<bool>,
},
}
impl<'a> Serializer<'a> {
/// Creates a new serializer which will emit TOML into the buffer provided.
///
/// The serializer can then be used to serialize a type after which the data
/// will be present in `dst`.
pub fn new(dst: &'a mut String) -> Serializer<'a> {
Serializer {
dst,
state: State::End,
settings: Rc::new(Settings::default()),
}
}
/// Instantiate a "pretty" formatter
///
/// By default this will use:
///
/// - pretty strings: strings with newlines will use the `'''` syntax. See
/// `Serializer::pretty_string`
/// - pretty arrays: each item in arrays will be on a newline, have an indentation of 4 and
/// have a trailing comma. See `Serializer::pretty_array`
pub fn pretty(dst: &'a mut String) -> Serializer<'a> {
Serializer {
dst,
state: State::End,
settings: Rc::new(Settings {
array: Some(ArraySettings::pretty()),
string: Some(StringSettings::pretty()),
}),
}
}
/// Enable or Disable pretty strings
///
/// If enabled, literal strings will be used when possible and strings with
/// one or more newlines will use triple quotes (i.e.: `'''` or `"""`)
///
/// # Examples
///
/// Instead of:
///
/// ```toml,ignore
/// single = "no newlines"
/// text = "\nfoo\nbar\n"
/// ```
///
/// You will have:
///
/// ```toml,ignore
/// single = 'no newlines'
/// text = '''
/// foo
/// bar
/// '''
/// ```
pub fn pretty_string(&mut self, value: bool) -> &mut Self {
Rc::get_mut(&mut self.settings).unwrap().string = if value {
Some(StringSettings::pretty())
} else {
None
};
self
}
/// Enable or Disable Literal strings for pretty strings
///
/// If enabled, literal strings will be used when possible and strings with
/// one or more newlines will use triple quotes (i.e.: `'''` or `"""`)
///
/// If disabled, literal strings will NEVER be used and strings with one or
/// more newlines will use `"""`
///
/// # Examples
///
/// Instead of:
///
/// ```toml,ignore
/// single = "no newlines"
/// text = "\nfoo\nbar\n"
/// ```
///
/// You will have:
///
/// ```toml,ignore
/// single = "no newlines"
/// text = """
/// foo
/// bar
/// """
/// ```
pub fn pretty_string_literal(&mut self, value: bool) -> &mut Self {
let use_default = if let Some(ref mut s) = Rc::get_mut(&mut self.settings).unwrap().string {
s.literal = value;
false
} else {
true
};
if use_default {
let mut string = StringSettings::pretty();
string.literal = value;
Rc::get_mut(&mut self.settings).unwrap().string = Some(string);
}
self
}
/// Enable or Disable pretty arrays
///
/// If enabled, arrays will always have each item on their own line.
///
/// Some specific features can be controlled via other builder methods:
///
/// - `Serializer::pretty_array_indent`: set the indent to a value other
/// than 4.
/// - `Serializer::pretty_array_trailing_comma`: enable/disable the trailing
/// comma on the last item.
///
/// # Examples
///
/// Instead of:
///
/// ```toml,ignore
/// array = ["foo", "bar"]
/// ```
///
/// You will have:
///
/// ```toml,ignore
/// array = [
/// "foo",
/// "bar",
/// ]
/// ```
pub fn pretty_array(&mut self, value: bool) -> &mut Self {
Rc::get_mut(&mut self.settings).unwrap().array = if value {
Some(ArraySettings::pretty())
} else {
None
};
self
}
/// Set the indent for pretty arrays
///
/// See `Serializer::pretty_array` for more details.
pub fn pretty_array_indent(&mut self, value: usize) -> &mut Self {
let use_default = if let Some(ref mut a) = Rc::get_mut(&mut self.settings).unwrap().array {
a.indent = value;
false
} else {
true
};
if use_default {
let mut array = ArraySettings::pretty();
array.indent = value;
Rc::get_mut(&mut self.settings).unwrap().array = Some(array);
}
self
}
/// Specify whether to use a trailing comma when serializing pretty arrays
///
/// See `Serializer::pretty_array` for more details.
pub fn pretty_array_trailing_comma(&mut self, value: bool) -> &mut Self {
let use_default = if let Some(ref mut a) = Rc::get_mut(&mut self.settings).unwrap().array {
a.trailing_comma = value;
false
} else {
true
};
if use_default {
let mut array = ArraySettings::pretty();
array.trailing_comma = value;
Rc::get_mut(&mut self.settings).unwrap().array = Some(array);
}
self
}
fn display<T: fmt::Display>(&mut self, t: T, type_: ArrayState) -> Result<(), Error> {
self.emit_key(type_)?;
write!(self.dst, "{}", t).map_err(ser::Error::custom)?;
if let State::Table { .. } = self.state {
self.dst.push_str("\n");
}
Ok(())
}
fn emit_key(&mut self, type_: ArrayState) -> Result<(), Error> {
self.array_type(type_)?;
let state = self.state.clone();
self._emit_key(&state)
}
// recursive implementation of `emit_key` above
fn _emit_key(&mut self, state: &State<'_>) -> Result<(), Error> {
match *state {
State::End => Ok(()),
State::Array {
parent,
first,
type_,
len,
} => {
assert!(type_.get().is_some());
if first.get() {
self._emit_key(parent)?;
}
self.emit_array(first, len)
}
State::Table {
parent,
first,
table_emitted,
key,
} => {
if table_emitted.get() {
return Err(Error::ValueAfterTable);
}
if first.get() {
self.emit_table_header(parent)?;
first.set(false);
}
self.escape_key(key)?;
self.dst.push_str(" = ");
Ok(())
}
}
}
fn emit_array(&mut self, first: &Cell<bool>, len: Option<usize>) -> Result<(), Error> {
match (len, &self.settings.array) {
(Some(0..=1), _) | (_, &None) => {
if first.get() {
self.dst.push_str("[")
} else {
self.dst.push_str(", ")
}
}
(_, &Some(ref a)) => {
if first.get() {
self.dst.push_str("[\n")
} else {
self.dst.push_str(",\n")
}
for _ in 0..a.indent {
self.dst.push_str(" ");
}
}
}
Ok(())
}
fn array_type(&mut self, type_: ArrayState) -> Result<(), Error> {
let prev = match self.state {
State::Array { type_, .. } => type_,
_ => return Ok(()),
};
if prev.get().is_none() {
prev.set(Some(type_));
}
Ok(())
}
fn escape_key(&mut self, key: &str) -> Result<(), Error> {
let ok = key.chars().all(|c| match c {
'a'..='z' | 'A'..='Z' | '0'..='9' | '-' | '_' => true,
_ => false,
});
if ok {
write!(self.dst, "{}", key).map_err(ser::Error::custom)?;
} else {
self.emit_str(key, true)?;
}
Ok(())
}
fn emit_str(&mut self, value: &str, is_key: bool) -> Result<(), Error> {
#[derive(PartialEq)]
enum Type {
NewlineTripple,
OnelineTripple,
OnelineSingle,
}
enum Repr {
/// represent as a literal string (using '')
Literal(String, Type),
/// represent the std way (using "")
Std(Type),
}
fn do_pretty(value: &str) -> Repr {
// For doing pretty prints we store in a new String
// because there are too many cases where pretty cannot
// work. We need to determine:
// - if we are a "multi-line" pretty (if there are \n)
// - if ['''] appears if multi or ['] if single
// - if there are any invalid control characters
//
// Doing it any other way would require multiple passes
// to determine if a pretty string works or not.
let mut out = String::with_capacity(value.len() * 2);
let mut ty = Type::OnelineSingle;
// found consecutive single quotes
let mut max_found_singles = 0;
let mut found_singles = 0;
let mut can_be_pretty = true;
for ch in value.chars() {
if can_be_pretty {
if ch == '\'' {
found_singles += 1;
if found_singles >= 3 {
can_be_pretty = false;
}
} else {
if found_singles > max_found_singles {
max_found_singles = found_singles;
}
found_singles = 0
}
match ch {
'\t' => {}
'\n' => ty = Type::NewlineTripple,
// Escape codes are needed if any ascii control
// characters are present, including \b \f \r.
c if c <= '\u{1f}' || c == '\u{7f}' => can_be_pretty = false,
_ => {}
}
out.push(ch);
} else {
// the string cannot be represented as pretty,
// still check if it should be multiline
if ch == '\n' {
ty = Type::NewlineTripple;
}
}
}
if can_be_pretty && found_singles > 0 && value.ends_with('\'') {
// We cannot escape the ending quote so we must use """
can_be_pretty = false;
}
if !can_be_pretty {
debug_assert!(ty != Type::OnelineTripple);
return Repr::Std(ty);
}
if found_singles > max_found_singles {
max_found_singles = found_singles;
}
debug_assert!(max_found_singles < 3);
if ty == Type::OnelineSingle && max_found_singles >= 1 {
// no newlines, but must use ''' because it has ' in it
ty = Type::OnelineTripple;
}
Repr::Literal(out, ty)
}
let repr = if !is_key && self.settings.string.is_some() {
match (&self.settings.string, do_pretty(value)) {
(&Some(StringSettings { literal: false, .. }), Repr::Literal(_, ty)) => {
Repr::Std(ty)
}
(_, r) => r,
}
} else {
Repr::Std(Type::OnelineSingle)
};
match repr {
Repr::Literal(literal, ty) => {
// A pretty string
match ty {
Type::NewlineTripple => self.dst.push_str("'''\n"),
Type::OnelineTripple => self.dst.push_str("'''"),
Type::OnelineSingle => self.dst.push('\''),
}
self.dst.push_str(&literal);
match ty {
Type::OnelineSingle => self.dst.push('\''),
_ => self.dst.push_str("'''"),
}
}
Repr::Std(ty) => {
match ty {
Type::NewlineTripple => self.dst.push_str("\"\"\"\n"),
// note: OnelineTripple can happen if do_pretty wants to do
// '''it's one line'''
// but settings.string.literal == false
Type::OnelineSingle | Type::OnelineTripple => self.dst.push('"'),
}
for ch in value.chars() {
match ch {
'\u{8}' => self.dst.push_str("\\b"),
'\u{9}' => self.dst.push_str("\\t"),
'\u{a}' => match ty {
Type::NewlineTripple => self.dst.push('\n'),
Type::OnelineSingle => self.dst.push_str("\\n"),
_ => unreachable!(),
},
'\u{c}' => self.dst.push_str("\\f"),
'\u{d}' => self.dst.push_str("\\r"),
'\u{22}' => self.dst.push_str("\\\""),
'\u{5c}' => self.dst.push_str("\\\\"),
c if c <= '\u{1f}' || c == '\u{7f}' => {
write!(self.dst, "\\u{:04X}", ch as u32).map_err(ser::Error::custom)?;
}
ch => self.dst.push(ch),
}
}
match ty {
Type::NewlineTripple => self.dst.push_str("\"\"\""),
Type::OnelineSingle | Type::OnelineTripple => self.dst.push('"'),
}
}
}
Ok(())
}
fn emit_table_header(&mut self, state: &State<'_>) -> Result<(), Error> {
let array_of_tables = match *state {
State::End => return Ok(()),
State::Array { .. } => true,
_ => false,
};
// Unlike [..]s, we can't omit [[..]] ancestors, so be sure to emit table
// headers for them.
let mut p = state;
if let State::Array { first, parent, .. } = *state {
if first.get() {
p = parent;
}
}
while let State::Table { first, parent, .. } = *p {
p = parent;
if !first.get() {
break;
}
if let State::Array {
parent: &State::Table { .. },
..
} = *parent
{
self.emit_table_header(parent)?;
break;
}
}
match *state {
State::Table { first, .. } => {
if !first.get() {
// Newline if we are a table that is not the first
// table in the document.
self.dst.push('\n');
}
}
State::Array { parent, first, .. } => {
if !first.get() {
// Always newline if we are not the first item in the
// table-array
self.dst.push('\n');
} else if let State::Table { first, .. } = *parent {
if !first.get() {
// Newline if we are not the first item in the document
self.dst.push('\n');
}
}
}
_ => {}
}
self.dst.push_str("[");
if array_of_tables {
self.dst.push_str("[");
}
self.emit_key_part(state)?;
if array_of_tables {
self.dst.push_str("]");
}
self.dst.push_str("]\n");
Ok(())
}
fn emit_key_part(&mut self, key: &State<'_>) -> Result<bool, Error> {
match *key {
State::Array { parent, .. } => self.emit_key_part(parent),
State::End => Ok(true),
State::Table {
key,
parent,
table_emitted,
..
} => {
table_emitted.set(true);
let first = self.emit_key_part(parent)?;
if !first {
self.dst.push_str(".");
}
self.escape_key(key)?;
Ok(false)
}
}
}
}
macro_rules! serialize_float {
($this:expr, $v:expr) => {{
$this.emit_key(ArrayState::Started)?;
if ($v.is_nan() || $v == 0.0) && $v.is_sign_negative() {
write!($this.dst, "-").map_err(ser::Error::custom)?;
}
if $v.is_nan() {
write!($this.dst, "nan").map_err(ser::Error::custom)?;
} else {
write!($this.dst, "{}", $v).map_err(ser::Error::custom)?;
}
if $v % 1.0 == 0.0 {
write!($this.dst, ".0").map_err(ser::Error::custom)?;
}
if let State::Table { .. } = $this.state {
$this.dst.push_str("\n");
}
return Ok(());
}};
}
impl<'a, 'b> ser::Serializer for &'b mut Serializer<'a> {
type Ok = ();
type Error = Error;
type SerializeSeq = SerializeSeq<'a, 'b>;
type SerializeTuple = SerializeSeq<'a, 'b>;
type SerializeTupleStruct = SerializeSeq<'a, 'b>;
type SerializeTupleVariant = SerializeSeq<'a, 'b>;
type SerializeMap = SerializeTable<'a, 'b>;
type SerializeStruct = SerializeTable<'a, 'b>;
type SerializeStructVariant = ser::Impossible<(), Error>;
fn serialize_bool(self, v: bool) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_i8(self, v: i8) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_i16(self, v: i16) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_i32(self, v: i32) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_i64(self, v: i64) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_u8(self, v: u8) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_u16(self, v: u16) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_u32(self, v: u32) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_u64(self, v: u64) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_f32(self, v: f32) -> Result<(), Self::Error> {
serialize_float!(self, v)
}
fn serialize_f64(self, v: f64) -> Result<(), Self::Error> {
serialize_float!(self, v)
}
fn serialize_char(self, v: char) -> Result<(), Self::Error> {
let mut buf = [0; 4];
self.serialize_str(v.encode_utf8(&mut buf))
}
fn serialize_str(self, value: &str) -> Result<(), Self::Error> {
self.emit_key(ArrayState::Started)?;
self.emit_str(value, false)?;
if let State::Table { .. } = self.state {
self.dst.push_str("\n");
}
Ok(())
}
fn serialize_bytes(self, value: &[u8]) -> Result<(), Self::Error> {
use serde::ser::Serialize;
value.serialize(self)
}
fn serialize_none(self) -> Result<(), Self::Error> {
Err(Error::UnsupportedNone)
}
fn serialize_some<T: ?Sized>(self, value: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
value.serialize(self)
}
fn serialize_unit(self) -> Result<(), Self::Error> {
Err(Error::UnsupportedType)
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<(), Self::Error> {
Err(Error::UnsupportedType)
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<(), Self::Error> {
self.serialize_str(variant)
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
Err(Error::UnsupportedType)
}
fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
self.array_type(ArrayState::Started)?;
Ok(SerializeSeq {
ser: self,
first: Cell::new(true),
type_: Cell::new(None),
len,
})
}
fn serialize_tuple(self, len: usize) -> Result<Self::SerializeTuple, Self::Error> {
self.serialize_seq(Some(len))
}
fn serialize_tuple_struct(
self,
_name: &'static str,
len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
self.serialize_seq(Some(len))
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
self.serialize_seq(Some(len))
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
self.array_type(ArrayState::StartedAsATable)?;
Ok(SerializeTable::Table {
ser: self,
key: String::new(),
first: Cell::new(true),
table_emitted: Cell::new(false),
})
}
fn serialize_struct(
self,
name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
if name == datetime::NAME {
self.array_type(ArrayState::Started)?;
Ok(SerializeTable::Datetime(self))
} else {
self.array_type(ArrayState::StartedAsATable)?;
Ok(SerializeTable::Table {
ser: self,
key: String::new(),
first: Cell::new(true),
table_emitted: Cell::new(false),
})
}
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(Error::UnsupportedType)
}
}
impl<'a, 'b> ser::SerializeSeq for SerializeSeq<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
value.serialize(&mut Serializer {
dst: &mut *self.ser.dst,
state: State::Array {
parent: &self.ser.state,
first: &self.first,
type_: &self.type_,
len: self.len,
},
settings: self.ser.settings.clone(),
})?;
self.first.set(false);
Ok(())
}
fn end(self) -> Result<(), Error> {
match self.type_.get() {
Some(ArrayState::StartedAsATable) => return Ok(()),
Some(ArrayState::Started) => match (self.len, &self.ser.settings.array) {
(Some(0..=1), _) | (_, &None) => {
self.ser.dst.push_str("]");
}
(_, &Some(ref a)) => {
if a.trailing_comma {
self.ser.dst.push_str(",");
}
self.ser.dst.push_str("\n]");
}
},
None => {
assert!(self.first.get());
self.ser.emit_key(ArrayState::Started)?;
self.ser.dst.push_str("[]")
}
}
if let State::Table { .. } = self.ser.state {
self.ser.dst.push_str("\n");
}
Ok(())
}
}
impl<'a, 'b> ser::SerializeTuple for SerializeSeq<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
ser::SerializeSeq::serialize_element(self, value)
}
fn end(self) -> Result<(), Error> {
ser::SerializeSeq::end(self)
}
}
impl<'a, 'b> ser::SerializeTupleVariant for SerializeSeq<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
ser::SerializeSeq::serialize_element(self, value)
}
fn end(self) -> Result<(), Error> {
ser::SerializeSeq::end(self)
}
}
impl<'a, 'b> ser::SerializeTupleStruct for SerializeSeq<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
ser::SerializeSeq::serialize_element(self, value)
}
fn end(self) -> Result<(), Error> {
ser::SerializeSeq::end(self)
}
}
impl<'a, 'b> ser::SerializeMap for SerializeTable<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_key<T: ?Sized>(&mut self, input: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
match *self {
SerializeTable::Datetime(_) => panic!(), // shouldn't be possible
SerializeTable::Table { ref mut key, .. } => {
key.truncate(0);
*key = input.serialize(StringExtractor)?;
}
}
Ok(())
}
fn serialize_value<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
match *self {
SerializeTable::Datetime(_) => panic!(), // shouldn't be possible
SerializeTable::Table {
ref mut ser,
ref key,
ref first,
ref table_emitted,
..
} => {
let res = value.serialize(&mut Serializer {
dst: &mut *ser.dst,
state: State::Table {
key,
parent: &ser.state,
first,
table_emitted,
},
settings: ser.settings.clone(),
});
match res {
Ok(()) => first.set(false),
Err(Error::UnsupportedNone) => {}
Err(e) => return Err(e),
}
}
}
Ok(())
}
fn end(self) -> Result<(), Error> {
match self {
SerializeTable::Datetime(_) => panic!(), // shouldn't be possible
SerializeTable::Table { ser, first, .. } => {
if first.get() {
let state = ser.state.clone();
ser.emit_table_header(&state)?;
}
}
}
Ok(())
}
}
impl<'a, 'b> ser::SerializeStruct for SerializeTable<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized>(&mut self, key: &'static str, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
match *self {
SerializeTable::Datetime(ref mut ser) => {
if key == datetime::FIELD {
value.serialize(DateStrEmitter(&mut *ser))?;
} else {
return Err(Error::DateInvalid);
}
}
SerializeTable::Table {
ref mut ser,
ref first,
ref table_emitted,
..
} => {
let res = value.serialize(&mut Serializer {
dst: &mut *ser.dst,
state: State::Table {
key,
parent: &ser.state,
first,
table_emitted,
},
settings: ser.settings.clone(),
});
match res {
Ok(()) => first.set(false),
Err(Error::UnsupportedNone) => {}
Err(e) => return Err(e),
}
}
}
Ok(())
}
fn end(self) -> Result<(), Error> {
match self {
SerializeTable::Datetime(_) => {}
SerializeTable::Table { ser, first, .. } => {
if first.get() {
let state = ser.state.clone();
ser.emit_table_header(&state)?;
}
}
}
Ok(())
}
}
struct DateStrEmitter<'a, 'b>(&'b mut Serializer<'a>);
impl<'a, 'b> ser::Serializer for DateStrEmitter<'a, 'b> {
type Ok = ();
type Error = Error;
type SerializeSeq = ser::Impossible<(), Error>;
type SerializeTuple = ser::Impossible<(), Error>;
type SerializeTupleStruct = ser::Impossible<(), Error>;
type SerializeTupleVariant = ser::Impossible<(), Error>;
type SerializeMap = ser::Impossible<(), Error>;
type SerializeStruct = ser::Impossible<(), Error>;
type SerializeStructVariant = ser::Impossible<(), Error>;
fn serialize_bool(self, _v: bool) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_i8(self, _v: i8) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_i16(self, _v: i16) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_i32(self, _v: i32) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_i64(self, _v: i64) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_u8(self, _v: u8) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_u16(self, _v: u16) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_u32(self, _v: u32) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_u64(self, _v: u64) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_f32(self, _v: f32) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_f64(self, _v: f64) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_char(self, _v: char) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_str(self, value: &str) -> Result<(), Self::Error> {
self.0.display(value, ArrayState::Started)?;
Ok(())
}
fn serialize_bytes(self, _value: &[u8]) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_none(self) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_some<T: ?Sized>(self, _value: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
Err(Error::DateInvalid)
}
fn serialize_unit(self) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
_value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
Err(Error::DateInvalid)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
Err(Error::DateInvalid)
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(Error::DateInvalid)
}
}
struct StringExtractor;
impl ser::Serializer for StringExtractor {
type Ok = String;
type Error = Error;
type SerializeSeq = ser::Impossible<String, Error>;
type SerializeTuple = ser::Impossible<String, Error>;
type SerializeTupleStruct = ser::Impossible<String, Error>;
type SerializeTupleVariant = ser::Impossible<String, Error>;
type SerializeMap = ser::Impossible<String, Error>;
type SerializeStruct = ser::Impossible<String, Error>;
type SerializeStructVariant = ser::Impossible<String, Error>;
fn serialize_bool(self, _v: bool) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_i8(self, _v: i8) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_i16(self, _v: i16) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_i32(self, _v: i32) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_i64(self, _v: i64) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_u8(self, _v: u8) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_u16(self, _v: u16) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_u32(self, _v: u32) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_u64(self, _v: u64) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_f32(self, _v: f32) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_f64(self, _v: f64) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_char(self, _v: char) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_str(self, value: &str) -> Result<String, Self::Error> {
Ok(value.to_string())
}
fn serialize_bytes(self, _value: &[u8]) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_none(self) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_some<T: ?Sized>(self, _value: &T) -> Result<String, Self::Error>
where
T: ser::Serialize,
{
Err(Error::KeyNotString)
}
fn serialize_unit(self) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
value: &T,
) -> Result<String, Self::Error>
where
T: ser::Serialize,
{
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<String, Self::Error>
where
T: ser::Serialize,
{
Err(Error::KeyNotString)
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(Error::KeyNotString)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::UnsupportedType => "unsupported Rust type".fmt(f),
Error::KeyNotString => "map key was not a string".fmt(f),
Error::ValueAfterTable => "values must be emitted before tables".fmt(f),
Error::DateInvalid => "a serialized date was invalid".fmt(f),
Error::NumberInvalid => "a serialized number was invalid".fmt(f),
Error::UnsupportedNone => "unsupported None value".fmt(f),
Error::Custom(ref s) => s.fmt(f),
Error::KeyNewline => unreachable!(),
Error::ArrayMixedType => unreachable!(),
Error::__Nonexhaustive => panic!(),
}
}
}
impl error::Error for Error {}
impl ser::Error for Error {
fn custom<T: fmt::Display>(msg: T) -> Error {
Error::Custom(msg.to_string())
}
}
enum Category {
Primitive,
Array,
Table,
}
/// Convenience function to serialize items in a map in an order valid with
/// TOML.
///
/// TOML carries the restriction that keys in a table must be serialized last if
/// their value is a table itself. This isn't always easy to guarantee, so this
/// helper can be used like so:
///
/// ```rust
/// # use serde_derive::Serialize;
/// # use std::collections::HashMap;
/// #[derive(Serialize)]
/// struct Manifest {
/// package: Package,
/// #[serde(serialize_with = "toml::ser::tables_last")]
/// dependencies: HashMap<String, Dependency>,
/// }
/// # type Package = String;
/// # type Dependency = String;
/// # fn main() {}
/// ```
pub fn tables_last<'a, I, K, V, S>(data: &'a I, serializer: S) -> Result<S::Ok, S::Error>
where
&'a I: IntoIterator<Item = (K, V)>,
K: ser::Serialize,
V: ser::Serialize,
S: ser::Serializer,
{
use serde::ser::SerializeMap;
let mut map = serializer.serialize_map(None)?;
for (k, v) in data {
if let Category::Primitive = v.serialize(Categorize::new())? {
map.serialize_entry(&k, &v)?;
}
}
for (k, v) in data {
if let Category::Array = v.serialize(Categorize::new())? {
map.serialize_entry(&k, &v)?;
}
}
for (k, v) in data {
if let Category::Table = v.serialize(Categorize::new())? {
map.serialize_entry(&k, &v)?;
}
}
map.end()
}
struct Categorize<E>(marker::PhantomData<E>);
impl<E> Categorize<E> {
fn new() -> Self {
Categorize(marker::PhantomData)
}
}
impl<E: ser::Error> ser::Serializer for Categorize<E> {
type Ok = Category;
type Error = E;
type SerializeSeq = Self;
type SerializeTuple = Self;
type SerializeTupleStruct = Self;
type SerializeTupleVariant = Self;
type SerializeMap = Self;
type SerializeStruct = Self;
type SerializeStructVariant = ser::Impossible<Category, E>;
fn serialize_bool(self, _: bool) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_i8(self, _: i8) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_i16(self, _: i16) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_i32(self, _: i32) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_i64(self, _: i64) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_u8(self, _: u8) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_u16(self, _: u16) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_u32(self, _: u32) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_u64(self, _: u64) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_f32(self, _: f32) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_f64(self, _: f64) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_char(self, _: char) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_str(self, _: &str) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_bytes(self, _: &[u8]) -> Result<Self::Ok, Self::Error> {
Ok(Category::Array)
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
fn serialize_some<T: ?Sized + ser::Serialize>(self, v: &T) -> Result<Self::Ok, Self::Error> {
v.serialize(self)
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
fn serialize_unit_struct(self, _: &'static str) -> Result<Self::Ok, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
fn serialize_unit_variant(
self,
_: &'static str,
_: u32,
_: &'static str,
) -> Result<Self::Ok, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
fn serialize_newtype_struct<T: ?Sized + ser::Serialize>(
self,
_: &'static str,
v: &T,
) -> Result<Self::Ok, Self::Error> {
v.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized + ser::Serialize>(
self,
_: &'static str,
_: u32,
_: &'static str,
_: &T,
) -> Result<Self::Ok, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
fn serialize_seq(self, _: Option<usize>) -> Result<Self, Self::Error> {
Ok(self)
}
fn serialize_tuple(self, _: usize) -> Result<Self::SerializeTuple, Self::Error> {
Ok(self)
}
fn serialize_tuple_struct(
self,
_: &'static str,
_: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Ok(self)
}
fn serialize_tuple_variant(
self,
_: &'static str,
_: u32,
_: &'static str,
_: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Ok(self)
}
fn serialize_map(self, _: Option<usize>) -> Result<Self, Self::Error> {
Ok(self)
}
fn serialize_struct(self, _: &'static str, _: usize) -> Result<Self, Self::Error> {
Ok(self)
}
fn serialize_struct_variant(
self,
_: &'static str,
_: u32,
_: &'static str,
_: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
}
impl<E: ser::Error> ser::SerializeSeq for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_element<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Array)
}
}
impl<E: ser::Error> ser::SerializeTuple for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_element<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Array)
}
}
impl<E: ser::Error> ser::SerializeTupleVariant for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_field<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Array)
}
}
impl<E: ser::Error> ser::SerializeTupleStruct for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_field<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Array)
}
}
impl<E: ser::Error> ser::SerializeMap for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_key<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn serialize_value<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Table)
}
}
impl<E: ser::Error> ser::SerializeStruct for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_field<T: ?Sized>(&mut self, _: &'static str, _: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Table)
}
}
Fix serialization of -0.0. (#426)
//! Serializing Rust structures into TOML.
//!
//! This module contains all the Serde support for serializing Rust structures
//! into TOML documents (as strings). Note that some top-level functions here
//! are also provided at the top of the crate.
//!
//! Note that the TOML format has a restriction that if a table itself contains
//! tables, all keys with non-table values must be emitted first. This is
//! typically easy to ensure happens when you're defining a `struct` as you can
//! reorder the fields manually, but when working with maps (such as `BTreeMap`
//! or `HashMap`) this can lead to serialization errors. In those situations you
//! may use the `tables_last` function in this module like so:
//!
//! ```rust
//! # use serde_derive::Serialize;
//! # use std::collections::HashMap;
//! #[derive(Serialize)]
//! struct Manifest {
//! package: Package,
//! #[serde(serialize_with = "toml::ser::tables_last")]
//! dependencies: HashMap<String, Dependency>,
//! }
//! # type Package = String;
//! # type Dependency = String;
//! # fn main() {}
//! ```
use std::cell::Cell;
use std::error;
use std::fmt::{self, Write};
use std::marker;
use std::rc::Rc;
use crate::datetime;
use serde::ser;
/// Serialize the given data structure as a TOML byte vector.
///
/// Serialization can fail if `T`'s implementation of `Serialize` decides to
/// fail, if `T` contains a map with non-string keys, or if `T` attempts to
/// serialize an unsupported datatype such as an enum, tuple, or tuple struct.
pub fn to_vec<T: ?Sized>(value: &T) -> Result<Vec<u8>, Error>
where
T: ser::Serialize,
{
to_string(value).map(|e| e.into_bytes())
}
/// Serialize the given data structure as a String of TOML.
///
/// Serialization can fail if `T`'s implementation of `Serialize` decides to
/// fail, if `T` contains a map with non-string keys, or if `T` attempts to
/// serialize an unsupported datatype such as an enum, tuple, or tuple struct.
///
/// # Examples
///
/// ```
/// use serde_derive::Serialize;
///
/// #[derive(Serialize)]
/// struct Config {
/// database: Database,
/// }
///
/// #[derive(Serialize)]
/// struct Database {
/// ip: String,
/// port: Vec<u16>,
/// connection_max: u32,
/// enabled: bool,
/// }
///
/// fn main() {
/// let config = Config {
/// database: Database {
/// ip: "192.168.1.1".to_string(),
/// port: vec![8001, 8002, 8003],
/// connection_max: 5000,
/// enabled: false,
/// },
/// };
///
/// let toml = toml::to_string(&config).unwrap();
/// println!("{}", toml)
/// }
/// ```
pub fn to_string<T: ?Sized>(value: &T) -> Result<String, Error>
where
T: ser::Serialize,
{
let mut dst = String::with_capacity(128);
value.serialize(&mut Serializer::new(&mut dst))?;
Ok(dst)
}
/// Serialize the given data structure as a "pretty" String of TOML.
///
/// This is identical to `to_string` except the output string has a more
/// "pretty" output. See `Serializer::pretty` for more details.
pub fn to_string_pretty<T: ?Sized>(value: &T) -> Result<String, Error>
where
T: ser::Serialize,
{
let mut dst = String::with_capacity(128);
value.serialize(&mut Serializer::pretty(&mut dst))?;
Ok(dst)
}
/// Errors that can occur when serializing a type.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Error {
/// Indicates that a Rust type was requested to be serialized but it was not
/// supported.
///
/// Currently the TOML format does not support serializing types such as
/// enums, tuples and tuple structs.
UnsupportedType,
/// The key of all TOML maps must be strings, but serialization was
/// attempted where the key of a map was not a string.
KeyNotString,
/// An error that we never omit but keep for backwards compatibility
#[doc(hidden)]
KeyNewline,
/// An array had to be homogenous, but now it is allowed to be heterogenous.
#[doc(hidden)]
ArrayMixedType,
/// All values in a TOML table must be emitted before further tables are
/// emitted. If a value is emitted *after* a table then this error is
/// generated.
ValueAfterTable,
/// A serialized date was invalid.
DateInvalid,
/// A serialized number was invalid.
NumberInvalid,
/// None was attempted to be serialized, but it's not supported.
UnsupportedNone,
/// A custom error which could be generated when serializing a particular
/// type.
Custom(String),
#[doc(hidden)]
__Nonexhaustive,
}
#[derive(Debug, Default, Clone)]
/// Internal place for holding array setings
struct ArraySettings {
indent: usize,
trailing_comma: bool,
}
impl ArraySettings {
fn pretty() -> ArraySettings {
ArraySettings {
indent: 4,
trailing_comma: true,
}
}
}
#[derive(Debug, Default, Clone)]
/// String settings
struct StringSettings {
/// Whether to use literal strings when possible
literal: bool,
}
impl StringSettings {
fn pretty() -> StringSettings {
StringSettings { literal: true }
}
}
#[derive(Debug, Default, Clone)]
/// Internal struct for holding serialization settings
struct Settings {
array: Option<ArraySettings>,
string: Option<StringSettings>,
}
/// Serialization implementation for TOML.
///
/// This structure implements serialization support for TOML to serialize an
/// arbitrary type to TOML. Note that the TOML format does not support all
/// datatypes in Rust, such as enums, tuples, and tuple structs. These types
/// will generate an error when serialized.
///
/// Currently a serializer always writes its output to an in-memory `String`,
/// which is passed in when creating the serializer itself.
pub struct Serializer<'a> {
dst: &'a mut String,
state: State<'a>,
settings: Rc<Settings>,
}
#[derive(Debug, Copy, Clone)]
enum ArrayState {
Started,
StartedAsATable,
}
#[derive(Debug, Clone)]
enum State<'a> {
Table {
key: &'a str,
parent: &'a State<'a>,
first: &'a Cell<bool>,
table_emitted: &'a Cell<bool>,
},
Array {
parent: &'a State<'a>,
first: &'a Cell<bool>,
type_: &'a Cell<Option<ArrayState>>,
len: Option<usize>,
},
End,
}
#[doc(hidden)]
pub struct SerializeSeq<'a, 'b> {
ser: &'b mut Serializer<'a>,
first: Cell<bool>,
type_: Cell<Option<ArrayState>>,
len: Option<usize>,
}
#[doc(hidden)]
pub enum SerializeTable<'a, 'b> {
Datetime(&'b mut Serializer<'a>),
Table {
ser: &'b mut Serializer<'a>,
key: String,
first: Cell<bool>,
table_emitted: Cell<bool>,
},
}
impl<'a> Serializer<'a> {
/// Creates a new serializer which will emit TOML into the buffer provided.
///
/// The serializer can then be used to serialize a type after which the data
/// will be present in `dst`.
pub fn new(dst: &'a mut String) -> Serializer<'a> {
Serializer {
dst,
state: State::End,
settings: Rc::new(Settings::default()),
}
}
/// Instantiate a "pretty" formatter
///
/// By default this will use:
///
/// - pretty strings: strings with newlines will use the `'''` syntax. See
/// `Serializer::pretty_string`
/// - pretty arrays: each item in arrays will be on a newline, have an indentation of 4 and
/// have a trailing comma. See `Serializer::pretty_array`
pub fn pretty(dst: &'a mut String) -> Serializer<'a> {
Serializer {
dst,
state: State::End,
settings: Rc::new(Settings {
array: Some(ArraySettings::pretty()),
string: Some(StringSettings::pretty()),
}),
}
}
/// Enable or Disable pretty strings
///
/// If enabled, literal strings will be used when possible and strings with
/// one or more newlines will use triple quotes (i.e.: `'''` or `"""`)
///
/// # Examples
///
/// Instead of:
///
/// ```toml,ignore
/// single = "no newlines"
/// text = "\nfoo\nbar\n"
/// ```
///
/// You will have:
///
/// ```toml,ignore
/// single = 'no newlines'
/// text = '''
/// foo
/// bar
/// '''
/// ```
pub fn pretty_string(&mut self, value: bool) -> &mut Self {
Rc::get_mut(&mut self.settings).unwrap().string = if value {
Some(StringSettings::pretty())
} else {
None
};
self
}
/// Enable or Disable Literal strings for pretty strings
///
/// If enabled, literal strings will be used when possible and strings with
/// one or more newlines will use triple quotes (i.e.: `'''` or `"""`)
///
/// If disabled, literal strings will NEVER be used and strings with one or
/// more newlines will use `"""`
///
/// # Examples
///
/// Instead of:
///
/// ```toml,ignore
/// single = "no newlines"
/// text = "\nfoo\nbar\n"
/// ```
///
/// You will have:
///
/// ```toml,ignore
/// single = "no newlines"
/// text = """
/// foo
/// bar
/// """
/// ```
pub fn pretty_string_literal(&mut self, value: bool) -> &mut Self {
let use_default = if let Some(ref mut s) = Rc::get_mut(&mut self.settings).unwrap().string {
s.literal = value;
false
} else {
true
};
if use_default {
let mut string = StringSettings::pretty();
string.literal = value;
Rc::get_mut(&mut self.settings).unwrap().string = Some(string);
}
self
}
/// Enable or Disable pretty arrays
///
/// If enabled, arrays will always have each item on their own line.
///
/// Some specific features can be controlled via other builder methods:
///
/// - `Serializer::pretty_array_indent`: set the indent to a value other
/// than 4.
/// - `Serializer::pretty_array_trailing_comma`: enable/disable the trailing
/// comma on the last item.
///
/// # Examples
///
/// Instead of:
///
/// ```toml,ignore
/// array = ["foo", "bar"]
/// ```
///
/// You will have:
///
/// ```toml,ignore
/// array = [
/// "foo",
/// "bar",
/// ]
/// ```
pub fn pretty_array(&mut self, value: bool) -> &mut Self {
Rc::get_mut(&mut self.settings).unwrap().array = if value {
Some(ArraySettings::pretty())
} else {
None
};
self
}
/// Set the indent for pretty arrays
///
/// See `Serializer::pretty_array` for more details.
pub fn pretty_array_indent(&mut self, value: usize) -> &mut Self {
let use_default = if let Some(ref mut a) = Rc::get_mut(&mut self.settings).unwrap().array {
a.indent = value;
false
} else {
true
};
if use_default {
let mut array = ArraySettings::pretty();
array.indent = value;
Rc::get_mut(&mut self.settings).unwrap().array = Some(array);
}
self
}
/// Specify whether to use a trailing comma when serializing pretty arrays
///
/// See `Serializer::pretty_array` for more details.
pub fn pretty_array_trailing_comma(&mut self, value: bool) -> &mut Self {
let use_default = if let Some(ref mut a) = Rc::get_mut(&mut self.settings).unwrap().array {
a.trailing_comma = value;
false
} else {
true
};
if use_default {
let mut array = ArraySettings::pretty();
array.trailing_comma = value;
Rc::get_mut(&mut self.settings).unwrap().array = Some(array);
}
self
}
fn display<T: fmt::Display>(&mut self, t: T, type_: ArrayState) -> Result<(), Error> {
self.emit_key(type_)?;
write!(self.dst, "{}", t).map_err(ser::Error::custom)?;
if let State::Table { .. } = self.state {
self.dst.push_str("\n");
}
Ok(())
}
fn emit_key(&mut self, type_: ArrayState) -> Result<(), Error> {
self.array_type(type_)?;
let state = self.state.clone();
self._emit_key(&state)
}
// recursive implementation of `emit_key` above
fn _emit_key(&mut self, state: &State<'_>) -> Result<(), Error> {
match *state {
State::End => Ok(()),
State::Array {
parent,
first,
type_,
len,
} => {
assert!(type_.get().is_some());
if first.get() {
self._emit_key(parent)?;
}
self.emit_array(first, len)
}
State::Table {
parent,
first,
table_emitted,
key,
} => {
if table_emitted.get() {
return Err(Error::ValueAfterTable);
}
if first.get() {
self.emit_table_header(parent)?;
first.set(false);
}
self.escape_key(key)?;
self.dst.push_str(" = ");
Ok(())
}
}
}
fn emit_array(&mut self, first: &Cell<bool>, len: Option<usize>) -> Result<(), Error> {
match (len, &self.settings.array) {
(Some(0..=1), _) | (_, &None) => {
if first.get() {
self.dst.push_str("[")
} else {
self.dst.push_str(", ")
}
}
(_, &Some(ref a)) => {
if first.get() {
self.dst.push_str("[\n")
} else {
self.dst.push_str(",\n")
}
for _ in 0..a.indent {
self.dst.push_str(" ");
}
}
}
Ok(())
}
fn array_type(&mut self, type_: ArrayState) -> Result<(), Error> {
let prev = match self.state {
State::Array { type_, .. } => type_,
_ => return Ok(()),
};
if prev.get().is_none() {
prev.set(Some(type_));
}
Ok(())
}
fn escape_key(&mut self, key: &str) -> Result<(), Error> {
let ok = key.chars().all(|c| match c {
'a'..='z' | 'A'..='Z' | '0'..='9' | '-' | '_' => true,
_ => false,
});
if ok {
write!(self.dst, "{}", key).map_err(ser::Error::custom)?;
} else {
self.emit_str(key, true)?;
}
Ok(())
}
fn emit_str(&mut self, value: &str, is_key: bool) -> Result<(), Error> {
#[derive(PartialEq)]
enum Type {
NewlineTripple,
OnelineTripple,
OnelineSingle,
}
enum Repr {
/// represent as a literal string (using '')
Literal(String, Type),
/// represent the std way (using "")
Std(Type),
}
fn do_pretty(value: &str) -> Repr {
// For doing pretty prints we store in a new String
// because there are too many cases where pretty cannot
// work. We need to determine:
// - if we are a "multi-line" pretty (if there are \n)
// - if ['''] appears if multi or ['] if single
// - if there are any invalid control characters
//
// Doing it any other way would require multiple passes
// to determine if a pretty string works or not.
let mut out = String::with_capacity(value.len() * 2);
let mut ty = Type::OnelineSingle;
// found consecutive single quotes
let mut max_found_singles = 0;
let mut found_singles = 0;
let mut can_be_pretty = true;
for ch in value.chars() {
if can_be_pretty {
if ch == '\'' {
found_singles += 1;
if found_singles >= 3 {
can_be_pretty = false;
}
} else {
if found_singles > max_found_singles {
max_found_singles = found_singles;
}
found_singles = 0
}
match ch {
'\t' => {}
'\n' => ty = Type::NewlineTripple,
// Escape codes are needed if any ascii control
// characters are present, including \b \f \r.
c if c <= '\u{1f}' || c == '\u{7f}' => can_be_pretty = false,
_ => {}
}
out.push(ch);
} else {
// the string cannot be represented as pretty,
// still check if it should be multiline
if ch == '\n' {
ty = Type::NewlineTripple;
}
}
}
if can_be_pretty && found_singles > 0 && value.ends_with('\'') {
// We cannot escape the ending quote so we must use """
can_be_pretty = false;
}
if !can_be_pretty {
debug_assert!(ty != Type::OnelineTripple);
return Repr::Std(ty);
}
if found_singles > max_found_singles {
max_found_singles = found_singles;
}
debug_assert!(max_found_singles < 3);
if ty == Type::OnelineSingle && max_found_singles >= 1 {
// no newlines, but must use ''' because it has ' in it
ty = Type::OnelineTripple;
}
Repr::Literal(out, ty)
}
let repr = if !is_key && self.settings.string.is_some() {
match (&self.settings.string, do_pretty(value)) {
(&Some(StringSettings { literal: false, .. }), Repr::Literal(_, ty)) => {
Repr::Std(ty)
}
(_, r) => r,
}
} else {
Repr::Std(Type::OnelineSingle)
};
match repr {
Repr::Literal(literal, ty) => {
// A pretty string
match ty {
Type::NewlineTripple => self.dst.push_str("'''\n"),
Type::OnelineTripple => self.dst.push_str("'''"),
Type::OnelineSingle => self.dst.push('\''),
}
self.dst.push_str(&literal);
match ty {
Type::OnelineSingle => self.dst.push('\''),
_ => self.dst.push_str("'''"),
}
}
Repr::Std(ty) => {
match ty {
Type::NewlineTripple => self.dst.push_str("\"\"\"\n"),
// note: OnelineTripple can happen if do_pretty wants to do
// '''it's one line'''
// but settings.string.literal == false
Type::OnelineSingle | Type::OnelineTripple => self.dst.push('"'),
}
for ch in value.chars() {
match ch {
'\u{8}' => self.dst.push_str("\\b"),
'\u{9}' => self.dst.push_str("\\t"),
'\u{a}' => match ty {
Type::NewlineTripple => self.dst.push('\n'),
Type::OnelineSingle => self.dst.push_str("\\n"),
_ => unreachable!(),
},
'\u{c}' => self.dst.push_str("\\f"),
'\u{d}' => self.dst.push_str("\\r"),
'\u{22}' => self.dst.push_str("\\\""),
'\u{5c}' => self.dst.push_str("\\\\"),
c if c <= '\u{1f}' || c == '\u{7f}' => {
write!(self.dst, "\\u{:04X}", ch as u32).map_err(ser::Error::custom)?;
}
ch => self.dst.push(ch),
}
}
match ty {
Type::NewlineTripple => self.dst.push_str("\"\"\""),
Type::OnelineSingle | Type::OnelineTripple => self.dst.push('"'),
}
}
}
Ok(())
}
fn emit_table_header(&mut self, state: &State<'_>) -> Result<(), Error> {
let array_of_tables = match *state {
State::End => return Ok(()),
State::Array { .. } => true,
_ => false,
};
// Unlike [..]s, we can't omit [[..]] ancestors, so be sure to emit table
// headers for them.
let mut p = state;
if let State::Array { first, parent, .. } = *state {
if first.get() {
p = parent;
}
}
while let State::Table { first, parent, .. } = *p {
p = parent;
if !first.get() {
break;
}
if let State::Array {
parent: &State::Table { .. },
..
} = *parent
{
self.emit_table_header(parent)?;
break;
}
}
match *state {
State::Table { first, .. } => {
if !first.get() {
// Newline if we are a table that is not the first
// table in the document.
self.dst.push('\n');
}
}
State::Array { parent, first, .. } => {
if !first.get() {
// Always newline if we are not the first item in the
// table-array
self.dst.push('\n');
} else if let State::Table { first, .. } = *parent {
if !first.get() {
// Newline if we are not the first item in the document
self.dst.push('\n');
}
}
}
_ => {}
}
self.dst.push_str("[");
if array_of_tables {
self.dst.push_str("[");
}
self.emit_key_part(state)?;
if array_of_tables {
self.dst.push_str("]");
}
self.dst.push_str("]\n");
Ok(())
}
fn emit_key_part(&mut self, key: &State<'_>) -> Result<bool, Error> {
match *key {
State::Array { parent, .. } => self.emit_key_part(parent),
State::End => Ok(true),
State::Table {
key,
parent,
table_emitted,
..
} => {
table_emitted.set(true);
let first = self.emit_key_part(parent)?;
if !first {
self.dst.push_str(".");
}
self.escape_key(key)?;
Ok(false)
}
}
}
}
macro_rules! serialize_float {
($this:expr, $v:expr) => {{
$this.emit_key(ArrayState::Started)?;
match ($v.is_sign_negative(), $v.is_nan(), $v == 0.0) {
(true, true, _) => write!($this.dst, "-nan"),
(false, true, _) => write!($this.dst, "nan"),
(true, false, true) => write!($this.dst, "-0.0"),
(false, false, true) => write!($this.dst, "0.0"),
(_, false, false) => write!($this.dst, "{}", $v).and_then(|_| {
if $v % 1.0 == 0.0 {
write!($this.dst, ".0")
} else {
Ok(())
}
}),
}
.map_err(ser::Error::custom)?;
if let State::Table { .. } = $this.state {
$this.dst.push_str("\n");
}
return Ok(());
}};
}
impl<'a, 'b> ser::Serializer for &'b mut Serializer<'a> {
type Ok = ();
type Error = Error;
type SerializeSeq = SerializeSeq<'a, 'b>;
type SerializeTuple = SerializeSeq<'a, 'b>;
type SerializeTupleStruct = SerializeSeq<'a, 'b>;
type SerializeTupleVariant = SerializeSeq<'a, 'b>;
type SerializeMap = SerializeTable<'a, 'b>;
type SerializeStruct = SerializeTable<'a, 'b>;
type SerializeStructVariant = ser::Impossible<(), Error>;
fn serialize_bool(self, v: bool) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_i8(self, v: i8) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_i16(self, v: i16) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_i32(self, v: i32) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_i64(self, v: i64) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_u8(self, v: u8) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_u16(self, v: u16) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_u32(self, v: u32) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_u64(self, v: u64) -> Result<(), Self::Error> {
self.display(v, ArrayState::Started)
}
fn serialize_f32(self, v: f32) -> Result<(), Self::Error> {
serialize_float!(self, v)
}
fn serialize_f64(self, v: f64) -> Result<(), Self::Error> {
serialize_float!(self, v)
}
fn serialize_char(self, v: char) -> Result<(), Self::Error> {
let mut buf = [0; 4];
self.serialize_str(v.encode_utf8(&mut buf))
}
fn serialize_str(self, value: &str) -> Result<(), Self::Error> {
self.emit_key(ArrayState::Started)?;
self.emit_str(value, false)?;
if let State::Table { .. } = self.state {
self.dst.push_str("\n");
}
Ok(())
}
fn serialize_bytes(self, value: &[u8]) -> Result<(), Self::Error> {
use serde::ser::Serialize;
value.serialize(self)
}
fn serialize_none(self) -> Result<(), Self::Error> {
Err(Error::UnsupportedNone)
}
fn serialize_some<T: ?Sized>(self, value: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
value.serialize(self)
}
fn serialize_unit(self) -> Result<(), Self::Error> {
Err(Error::UnsupportedType)
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<(), Self::Error> {
Err(Error::UnsupportedType)
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<(), Self::Error> {
self.serialize_str(variant)
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
Err(Error::UnsupportedType)
}
fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
self.array_type(ArrayState::Started)?;
Ok(SerializeSeq {
ser: self,
first: Cell::new(true),
type_: Cell::new(None),
len,
})
}
fn serialize_tuple(self, len: usize) -> Result<Self::SerializeTuple, Self::Error> {
self.serialize_seq(Some(len))
}
fn serialize_tuple_struct(
self,
_name: &'static str,
len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
self.serialize_seq(Some(len))
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
self.serialize_seq(Some(len))
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
self.array_type(ArrayState::StartedAsATable)?;
Ok(SerializeTable::Table {
ser: self,
key: String::new(),
first: Cell::new(true),
table_emitted: Cell::new(false),
})
}
fn serialize_struct(
self,
name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
if name == datetime::NAME {
self.array_type(ArrayState::Started)?;
Ok(SerializeTable::Datetime(self))
} else {
self.array_type(ArrayState::StartedAsATable)?;
Ok(SerializeTable::Table {
ser: self,
key: String::new(),
first: Cell::new(true),
table_emitted: Cell::new(false),
})
}
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(Error::UnsupportedType)
}
}
impl<'a, 'b> ser::SerializeSeq for SerializeSeq<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
value.serialize(&mut Serializer {
dst: &mut *self.ser.dst,
state: State::Array {
parent: &self.ser.state,
first: &self.first,
type_: &self.type_,
len: self.len,
},
settings: self.ser.settings.clone(),
})?;
self.first.set(false);
Ok(())
}
fn end(self) -> Result<(), Error> {
match self.type_.get() {
Some(ArrayState::StartedAsATable) => return Ok(()),
Some(ArrayState::Started) => match (self.len, &self.ser.settings.array) {
(Some(0..=1), _) | (_, &None) => {
self.ser.dst.push_str("]");
}
(_, &Some(ref a)) => {
if a.trailing_comma {
self.ser.dst.push_str(",");
}
self.ser.dst.push_str("\n]");
}
},
None => {
assert!(self.first.get());
self.ser.emit_key(ArrayState::Started)?;
self.ser.dst.push_str("[]")
}
}
if let State::Table { .. } = self.ser.state {
self.ser.dst.push_str("\n");
}
Ok(())
}
}
impl<'a, 'b> ser::SerializeTuple for SerializeSeq<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
ser::SerializeSeq::serialize_element(self, value)
}
fn end(self) -> Result<(), Error> {
ser::SerializeSeq::end(self)
}
}
impl<'a, 'b> ser::SerializeTupleVariant for SerializeSeq<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
ser::SerializeSeq::serialize_element(self, value)
}
fn end(self) -> Result<(), Error> {
ser::SerializeSeq::end(self)
}
}
impl<'a, 'b> ser::SerializeTupleStruct for SerializeSeq<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
ser::SerializeSeq::serialize_element(self, value)
}
fn end(self) -> Result<(), Error> {
ser::SerializeSeq::end(self)
}
}
impl<'a, 'b> ser::SerializeMap for SerializeTable<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_key<T: ?Sized>(&mut self, input: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
match *self {
SerializeTable::Datetime(_) => panic!(), // shouldn't be possible
SerializeTable::Table { ref mut key, .. } => {
key.truncate(0);
*key = input.serialize(StringExtractor)?;
}
}
Ok(())
}
fn serialize_value<T: ?Sized>(&mut self, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
match *self {
SerializeTable::Datetime(_) => panic!(), // shouldn't be possible
SerializeTable::Table {
ref mut ser,
ref key,
ref first,
ref table_emitted,
..
} => {
let res = value.serialize(&mut Serializer {
dst: &mut *ser.dst,
state: State::Table {
key,
parent: &ser.state,
first,
table_emitted,
},
settings: ser.settings.clone(),
});
match res {
Ok(()) => first.set(false),
Err(Error::UnsupportedNone) => {}
Err(e) => return Err(e),
}
}
}
Ok(())
}
fn end(self) -> Result<(), Error> {
match self {
SerializeTable::Datetime(_) => panic!(), // shouldn't be possible
SerializeTable::Table { ser, first, .. } => {
if first.get() {
let state = ser.state.clone();
ser.emit_table_header(&state)?;
}
}
}
Ok(())
}
}
impl<'a, 'b> ser::SerializeStruct for SerializeTable<'a, 'b> {
type Ok = ();
type Error = Error;
fn serialize_field<T: ?Sized>(&mut self, key: &'static str, value: &T) -> Result<(), Error>
where
T: ser::Serialize,
{
match *self {
SerializeTable::Datetime(ref mut ser) => {
if key == datetime::FIELD {
value.serialize(DateStrEmitter(&mut *ser))?;
} else {
return Err(Error::DateInvalid);
}
}
SerializeTable::Table {
ref mut ser,
ref first,
ref table_emitted,
..
} => {
let res = value.serialize(&mut Serializer {
dst: &mut *ser.dst,
state: State::Table {
key,
parent: &ser.state,
first,
table_emitted,
},
settings: ser.settings.clone(),
});
match res {
Ok(()) => first.set(false),
Err(Error::UnsupportedNone) => {}
Err(e) => return Err(e),
}
}
}
Ok(())
}
fn end(self) -> Result<(), Error> {
match self {
SerializeTable::Datetime(_) => {}
SerializeTable::Table { ser, first, .. } => {
if first.get() {
let state = ser.state.clone();
ser.emit_table_header(&state)?;
}
}
}
Ok(())
}
}
struct DateStrEmitter<'a, 'b>(&'b mut Serializer<'a>);
impl<'a, 'b> ser::Serializer for DateStrEmitter<'a, 'b> {
type Ok = ();
type Error = Error;
type SerializeSeq = ser::Impossible<(), Error>;
type SerializeTuple = ser::Impossible<(), Error>;
type SerializeTupleStruct = ser::Impossible<(), Error>;
type SerializeTupleVariant = ser::Impossible<(), Error>;
type SerializeMap = ser::Impossible<(), Error>;
type SerializeStruct = ser::Impossible<(), Error>;
type SerializeStructVariant = ser::Impossible<(), Error>;
fn serialize_bool(self, _v: bool) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_i8(self, _v: i8) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_i16(self, _v: i16) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_i32(self, _v: i32) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_i64(self, _v: i64) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_u8(self, _v: u8) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_u16(self, _v: u16) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_u32(self, _v: u32) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_u64(self, _v: u64) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_f32(self, _v: f32) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_f64(self, _v: f64) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_char(self, _v: char) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_str(self, value: &str) -> Result<(), Self::Error> {
self.0.display(value, ArrayState::Started)?;
Ok(())
}
fn serialize_bytes(self, _value: &[u8]) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_none(self) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_some<T: ?Sized>(self, _value: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
Err(Error::DateInvalid)
}
fn serialize_unit(self) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
) -> Result<(), Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
_value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
Err(Error::DateInvalid)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
Err(Error::DateInvalid)
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Err(Error::DateInvalid)
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(Error::DateInvalid)
}
}
struct StringExtractor;
impl ser::Serializer for StringExtractor {
type Ok = String;
type Error = Error;
type SerializeSeq = ser::Impossible<String, Error>;
type SerializeTuple = ser::Impossible<String, Error>;
type SerializeTupleStruct = ser::Impossible<String, Error>;
type SerializeTupleVariant = ser::Impossible<String, Error>;
type SerializeMap = ser::Impossible<String, Error>;
type SerializeStruct = ser::Impossible<String, Error>;
type SerializeStructVariant = ser::Impossible<String, Error>;
fn serialize_bool(self, _v: bool) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_i8(self, _v: i8) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_i16(self, _v: i16) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_i32(self, _v: i32) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_i64(self, _v: i64) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_u8(self, _v: u8) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_u16(self, _v: u16) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_u32(self, _v: u32) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_u64(self, _v: u64) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_f32(self, _v: f32) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_f64(self, _v: f64) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_char(self, _v: char) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_str(self, value: &str) -> Result<String, Self::Error> {
Ok(value.to_string())
}
fn serialize_bytes(self, _value: &[u8]) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_none(self) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_some<T: ?Sized>(self, _value: &T) -> Result<String, Self::Error>
where
T: ser::Serialize,
{
Err(Error::KeyNotString)
}
fn serialize_unit(self) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
) -> Result<String, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
value: &T,
) -> Result<String, Self::Error>
where
T: ser::Serialize,
{
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_value: &T,
) -> Result<String, Self::Error>
where
T: ser::Serialize,
{
Err(Error::KeyNotString)
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Err(Error::KeyNotString)
}
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(Error::KeyNotString)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::UnsupportedType => "unsupported Rust type".fmt(f),
Error::KeyNotString => "map key was not a string".fmt(f),
Error::ValueAfterTable => "values must be emitted before tables".fmt(f),
Error::DateInvalid => "a serialized date was invalid".fmt(f),
Error::NumberInvalid => "a serialized number was invalid".fmt(f),
Error::UnsupportedNone => "unsupported None value".fmt(f),
Error::Custom(ref s) => s.fmt(f),
Error::KeyNewline => unreachable!(),
Error::ArrayMixedType => unreachable!(),
Error::__Nonexhaustive => panic!(),
}
}
}
impl error::Error for Error {}
impl ser::Error for Error {
fn custom<T: fmt::Display>(msg: T) -> Error {
Error::Custom(msg.to_string())
}
}
enum Category {
Primitive,
Array,
Table,
}
/// Convenience function to serialize items in a map in an order valid with
/// TOML.
///
/// TOML carries the restriction that keys in a table must be serialized last if
/// their value is a table itself. This isn't always easy to guarantee, so this
/// helper can be used like so:
///
/// ```rust
/// # use serde_derive::Serialize;
/// # use std::collections::HashMap;
/// #[derive(Serialize)]
/// struct Manifest {
/// package: Package,
/// #[serde(serialize_with = "toml::ser::tables_last")]
/// dependencies: HashMap<String, Dependency>,
/// }
/// # type Package = String;
/// # type Dependency = String;
/// # fn main() {}
/// ```
pub fn tables_last<'a, I, K, V, S>(data: &'a I, serializer: S) -> Result<S::Ok, S::Error>
where
&'a I: IntoIterator<Item = (K, V)>,
K: ser::Serialize,
V: ser::Serialize,
S: ser::Serializer,
{
use serde::ser::SerializeMap;
let mut map = serializer.serialize_map(None)?;
for (k, v) in data {
if let Category::Primitive = v.serialize(Categorize::new())? {
map.serialize_entry(&k, &v)?;
}
}
for (k, v) in data {
if let Category::Array = v.serialize(Categorize::new())? {
map.serialize_entry(&k, &v)?;
}
}
for (k, v) in data {
if let Category::Table = v.serialize(Categorize::new())? {
map.serialize_entry(&k, &v)?;
}
}
map.end()
}
struct Categorize<E>(marker::PhantomData<E>);
impl<E> Categorize<E> {
fn new() -> Self {
Categorize(marker::PhantomData)
}
}
impl<E: ser::Error> ser::Serializer for Categorize<E> {
type Ok = Category;
type Error = E;
type SerializeSeq = Self;
type SerializeTuple = Self;
type SerializeTupleStruct = Self;
type SerializeTupleVariant = Self;
type SerializeMap = Self;
type SerializeStruct = Self;
type SerializeStructVariant = ser::Impossible<Category, E>;
fn serialize_bool(self, _: bool) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_i8(self, _: i8) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_i16(self, _: i16) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_i32(self, _: i32) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_i64(self, _: i64) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_u8(self, _: u8) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_u16(self, _: u16) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_u32(self, _: u32) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_u64(self, _: u64) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_f32(self, _: f32) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_f64(self, _: f64) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_char(self, _: char) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_str(self, _: &str) -> Result<Self::Ok, Self::Error> {
Ok(Category::Primitive)
}
fn serialize_bytes(self, _: &[u8]) -> Result<Self::Ok, Self::Error> {
Ok(Category::Array)
}
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
fn serialize_some<T: ?Sized + ser::Serialize>(self, v: &T) -> Result<Self::Ok, Self::Error> {
v.serialize(self)
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
fn serialize_unit_struct(self, _: &'static str) -> Result<Self::Ok, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
fn serialize_unit_variant(
self,
_: &'static str,
_: u32,
_: &'static str,
) -> Result<Self::Ok, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
fn serialize_newtype_struct<T: ?Sized + ser::Serialize>(
self,
_: &'static str,
v: &T,
) -> Result<Self::Ok, Self::Error> {
v.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized + ser::Serialize>(
self,
_: &'static str,
_: u32,
_: &'static str,
_: &T,
) -> Result<Self::Ok, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
fn serialize_seq(self, _: Option<usize>) -> Result<Self, Self::Error> {
Ok(self)
}
fn serialize_tuple(self, _: usize) -> Result<Self::SerializeTuple, Self::Error> {
Ok(self)
}
fn serialize_tuple_struct(
self,
_: &'static str,
_: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
Ok(self)
}
fn serialize_tuple_variant(
self,
_: &'static str,
_: u32,
_: &'static str,
_: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
Ok(self)
}
fn serialize_map(self, _: Option<usize>) -> Result<Self, Self::Error> {
Ok(self)
}
fn serialize_struct(self, _: &'static str, _: usize) -> Result<Self, Self::Error> {
Ok(self)
}
fn serialize_struct_variant(
self,
_: &'static str,
_: u32,
_: &'static str,
_: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
Err(ser::Error::custom("unsupported"))
}
}
impl<E: ser::Error> ser::SerializeSeq for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_element<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Array)
}
}
impl<E: ser::Error> ser::SerializeTuple for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_element<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Array)
}
}
impl<E: ser::Error> ser::SerializeTupleVariant for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_field<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Array)
}
}
impl<E: ser::Error> ser::SerializeTupleStruct for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_field<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Array)
}
}
impl<E: ser::Error> ser::SerializeMap for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_key<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn serialize_value<T: ?Sized + ser::Serialize>(&mut self, _: &T) -> Result<(), Self::Error> {
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Table)
}
}
impl<E: ser::Error> ser::SerializeStruct for Categorize<E> {
type Ok = Category;
type Error = E;
fn serialize_field<T: ?Sized>(&mut self, _: &'static str, _: &T) -> Result<(), Self::Error>
where
T: ser::Serialize,
{
Ok(())
}
fn end(self) -> Result<Self::Ok, Self::Error> {
Ok(Category::Table)
}
}
|
use std::iter;
use byteorder::{BigEndian, ByteOrder};
pub struct SHA512 {}
impl SHA512 {
/// Only supports messages with at most 2^64 - 1 bits for now
pub fn pad(bytes: &mut Vec<u8>) {
let len = len(bytes);
bytes.push(0x80);
let padding = (128 + 112 - bytes.len() % 128) % 128;
bytes.extend(iter::repeat(0).take(padding));
bytes.extend_from_slice(&[0; 8]);
bytes.extend_from_slice(&len);
}
}
fn len(bytes: &[u8]) -> [u8; 8] {
let mut len = [0; 8];
BigEndian::write_u64(&mut len, 8 * bytes.len() as u64);
len
}
#[cfg(test)]
mod tests {
use sha::*;
use test_helpers::*;
#[test]
fn test_pad() {
let mut message = vec![0b01100001, 0b01100010, 0b01100011, 0b01100100, 0b01100101];
let expected = h2b(
"6162636465800000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000028",
);
SHA512::pad(&mut message);
assert_eq!(expected, message);
}
}
sha: add sha-512
use std::iter;
use byteorder::{BigEndian, ByteOrder};
pub const K: [u64; 80] = [
0x428a2f98d728ae22,
0x7137449123ef65cd,
0xb5c0fbcfec4d3b2f,
0xe9b5dba58189dbbc,
0x3956c25bf348b538,
0x59f111f1b605d019,
0x923f82a4af194f9b,
0xab1c5ed5da6d8118,
0xd807aa98a3030242,
0x12835b0145706fbe,
0x243185be4ee4b28c,
0x550c7dc3d5ffb4e2,
0x72be5d74f27b896f,
0x80deb1fe3b1696b1,
0x9bdc06a725c71235,
0xc19bf174cf692694,
0xe49b69c19ef14ad2,
0xefbe4786384f25e3,
0x0fc19dc68b8cd5b5,
0x240ca1cc77ac9c65,
0x2de92c6f592b0275,
0x4a7484aa6ea6e483,
0x5cb0a9dcbd41fbd4,
0x76f988da831153b5,
0x983e5152ee66dfab,
0xa831c66d2db43210,
0xb00327c898fb213f,
0xbf597fc7beef0ee4,
0xc6e00bf33da88fc2,
0xd5a79147930aa725,
0x06ca6351e003826f,
0x142929670a0e6e70,
0x27b70a8546d22ffc,
0x2e1b21385c26c926,
0x4d2c6dfc5ac42aed,
0x53380d139d95b3df,
0x650a73548baf63de,
0x766a0abb3c77b2a8,
0x81c2c92e47edaee6,
0x92722c851482353b,
0xa2bfe8a14cf10364,
0xa81a664bbc423001,
0xc24b8b70d0f89791,
0xc76c51a30654be30,
0xd192e819d6ef5218,
0xd69906245565a910,
0xf40e35855771202a,
0x106aa07032bbd1b8,
0x19a4c116b8d2d0c8,
0x1e376c085141ab53,
0x2748774cdf8eeb99,
0x34b0bcb5e19b48a8,
0x391c0cb3c5c95a63,
0x4ed8aa4ae3418acb,
0x5b9cca4f7763e373,
0x682e6ff3d6b2b8a3,
0x748f82ee5defb2fc,
0x78a5636f43172f60,
0x84c87814a1f0ab72,
0x8cc702081a6439ec,
0x90befffa23631e28,
0xa4506cebde82bde9,
0xbef9a3f7b2c67915,
0xc67178f2e372532b,
0xca273eceea26619c,
0xd186b8c721c0c207,
0xeada7dd6cde0eb1e,
0xf57d4f7fee6ed178,
0x06f067aa72176fba,
0x0a637dc5a2c898a6,
0x113f9804bef90dae,
0x1b710b35131c471b,
0x28db77f523047d84,
0x32caab7b40c72493,
0x3c9ebe0a15c9bebc,
0x431d67c49c100d4c,
0x4cc5d4becb3e42b6,
0x597f299cfc657e2a,
0x5fcb6fab3ad6faec,
0x6c44198c4a475817,
];
pub struct SHA512 {}
impl SHA512 {
pub fn digest(message: &[u8]) -> [u8; 64] {
let mut message = message.to_vec();
Self::pad(&mut message);
let mut hash: [u64; 8] = [
0x6a09e667f3bcc908,
0xbb67ae8584caa73b,
0x3c6ef372fe94f82b,
0xa54ff53a5f1d36f1,
0x510e527fade682d1,
0x9b05688c2b3e6c1f,
0x1f83d9abfb41bd6b,
0x5be0cd19137e2179,
];
let mut w = [0; 80];
for chunk in message.chunks(128) {
for (wt, long) in w.iter_mut().zip(chunk.chunks(8)) {
*wt = BigEndian::read_u64(long);
}
for t in 16..80 {
w[t] = Self::ssig1(w[t - 2])
.wrapping_add(w[t - 7])
.wrapping_add(Self::ssig0(w[t - 15]))
.wrapping_add(w[t - 16]);
}
let mut a = hash[0];
let mut b = hash[1];
let mut c = hash[2];
let mut d = hash[3];
let mut e = hash[4];
let mut f = hash[5];
let mut g = hash[6];
let mut h = hash[7];
for (&kt, &wt) in K.iter().zip(w.iter()) {
let t1 = h.wrapping_add(Self::bsig1(e))
.wrapping_add(Self::ch(e, f, g))
.wrapping_add(kt)
.wrapping_add(wt);
let t2 = Self::bsig0(a).wrapping_add(Self::maj(a, b, c));
h = g;
g = f;
f = e;
e = d.wrapping_add(t1);
d = c;
c = b;
b = a;
a = t1.wrapping_add(t2);
}
hash[0] = hash[0].wrapping_add(a);
hash[1] = hash[1].wrapping_add(b);
hash[2] = hash[2].wrapping_add(c);
hash[3] = hash[3].wrapping_add(d);
hash[4] = hash[4].wrapping_add(e);
hash[5] = hash[5].wrapping_add(f);
hash[6] = hash[6].wrapping_add(g);
hash[7] = hash[7].wrapping_add(h);
}
let mut digest = [0; 64];
for (chunk, &long) in digest.chunks_mut(8).zip(&hash) {
BigEndian::write_u64(chunk, long);
}
digest
}
pub fn ch(x: u64, y: u64, z: u64) -> u64 {
(x & y) ^ (!x & z)
}
pub fn maj(x: u64, y: u64, z: u64) -> u64 {
(x & y) ^ (x & z) ^ (y & z)
}
pub fn bsig0(x: u64) -> u64 {
x.rotate_right(28) ^ x.rotate_right(34) ^ x.rotate_right(39)
}
pub fn bsig1(x: u64) -> u64 {
x.rotate_right(14) ^ x.rotate_right(18) ^ x.rotate_right(41)
}
pub fn ssig0(x: u64) -> u64 {
x.rotate_right(1) ^ x.rotate_right(8) ^ (x >> 7)
}
pub fn ssig1(x: u64) -> u64 {
x.rotate_right(19) ^ x.rotate_right(61) ^ (x >> 6)
}
/// Only supports messages with at most 2^64 - 1 bits for now
pub fn pad(bytes: &mut Vec<u8>) {
let len = len(bytes);
bytes.push(0x80);
let padding = (128 + 112 - bytes.len() % 128) % 128;
bytes.extend(iter::repeat(0).take(padding));
bytes.extend_from_slice(&[0; 8]);
bytes.extend_from_slice(&len);
}
}
fn len(bytes: &[u8]) -> [u8; 8] {
let mut len = [0; 8];
BigEndian::write_u64(&mut len, 8 * bytes.len() as u64);
len
}
#[cfg(test)]
mod tests {
use sha::*;
use test_helpers::*;
#[test]
fn test_pad() {
let mut message = vec![0b01100001, 0b01100010, 0b01100011, 0b01100100, 0b01100101];
let expected = h2b(
"6162636465800000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000028",
);
SHA512::pad(&mut message);
assert_eq!(expected, message);
}
#[test]
fn test_digest() {
let expected = h2b(
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce\
47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e",
);
let actual = SHA512::digest(&[]);
assert_eq!(64, actual.len());
for (lhs, rhs) in expected.iter().zip(actual.iter()) {
assert_eq!(lhs, rhs);
}
}
}
|
//! The AWS SQS API.
#![cfg_attr(feature = "nightly-testing", allow(while_let_loop))]
include!(concat!(env!("OUT_DIR"), "/sqs.rs"));
#[cfg(test)]
mod test {
use std::collections::HashMap;
use sqs::{SqsClient, SendMessageRequest, ReceiveMessageRequest, MessageAttributeValue};
use super::super::{Region, SignedRequest};
use super::super::mock::*;
extern crate env_logger;
#[test]
fn should_serialize_map_parameters_in_query_string() {
let mock = MockRequestDispatcher::with_status(200)
.with_body(r#"<?xml version="1.0" encoding="UTF-8"?>
<SendMessageResponse>
<SendMessageResult>
<MD5OfMessageBody>
fafb00f5732ab283681e124bf8747ed1
</MD5OfMessageBody>
<MD5OfMessageAttributes>
3ae8f24a165a8cedc005670c81a27295
</MD5OfMessageAttributes>
<MessageId>
5fea7756-0ea4-451a-a703-a558b933e274
</MessageId>
</SendMessageResult>
<ResponseMetadata>
<RequestId>
27daac76-34dd-47df-bd01-1f6e873584a0
</RequestId>
</ResponseMetadata>
</SendMessageResponse>"#)
.with_request_checker(|request: &SignedRequest| {
assert_eq!("POST", request.method);
assert_eq!("/", request.path);
assert_eq!(None, request.payload);
assert_eq!(Some(&Some("test_attribute_name".to_owned())),
request.params.get("MessageAttribute.1.Name"));
assert_eq!(Some(&Some("test_attribute_value".to_owned())),
request.params.get("MessageAttribute.1.Value.StringValue"));
assert_eq!(Some(&Some("String".to_owned())),
request.params.get("MessageAttribute.1.Value.DataType"));
});
let mut message_attributes = HashMap::new();
message_attributes.insert("test_attribute_name".to_owned(),
MessageAttributeValue {
string_value: Some("test_attribute_value".to_owned()),
data_type: "String".to_owned(),
..Default::default()
});
let request = SendMessageRequest {
message_body: "foo".to_owned(),
queue_url: "bar".to_owned(),
message_attributes: Some(message_attributes),
..Default::default()
};
let client =
SqsClient::with_request_dispatcher(mock, MockCredentialsProvider, Region::UsEast1);
let _result = client.send_message(&request).unwrap();
}
#[test]
fn should_fix_issue_323() {
let mock = MockRequestDispatcher::with_status(200)
.with_body(r#"<?xml version="1.0" encoding="UTF-8"?>
<ReceiveMessageResponse>
<ReceiveMessageResult>
<Message>
<MessageId>
5fea7756-0ea4-451a-a703-a558b933e274
</MessageId>
<ReceiptHandle>
MbZj6wDWli+JvwwJaBV+3dcjk2YW2vA3+STFFljTM8tJJg6HRG6PYSasuWXPJB+Cw
Lj1FjgXUv1uSj1gUPAWV66FU/WeR4mq2OKpEGYWbnLmpRCJVAyeMjeU5ZBdtcQ+QE
auMZc8ZRv37sIW2iJKq3M9MFx1YvV11A2x/KSbkJ0=
</ReceiptHandle>
<MD5OfBody>
fafb00f5732ab283681e124bf8747ed1
</MD5OfBody>
<Body>This is a test message</Body>
<Attribute>
<Name>SenderId</Name>
<Value>195004372649</Value>
</Attribute>
</Message>
</ReceiveMessageResult>
<ResponseMetadata>
<RequestId>
b6633655-283d-45b4-aee4-4e84e0ae6afa
</RequestId>
</ResponseMetadata>
</ReceiveMessageResponse>"#)
.with_request_checker(|request: &SignedRequest| {
assert_eq!("POST", request.method);
assert_eq!("/", request.path);
// assert_eq!(None, request.payload);
assert_eq!(request.params.get("Action"), Some(&Some("ReceiveMessage".to_owned())));
assert_eq!(request.params.get("MaxNumberOfMessages"), Some(&Some("1".to_owned())));
assert_eq!(request.params.get("VisibilityTimeout"), Some(&Some("2".to_owned())));
assert_eq!(request.params.get("WaitTimeSeconds"), Some(&Some("3".to_owned())));
assert_eq!(request.params.get("Integer"), None);
});
let request = ReceiveMessageRequest {
max_number_of_messages: Some(1),
queue_url: "foo".to_owned(),
visibility_timeout: Some(2),
wait_time_seconds: Some(3),
..Default::default()
};
let client = SqsClient::with_request_dispatcher(mock, MockCredentialsProvider, Region::UsEast1);
let _result = client.receive_message(&request).unwrap();
}
}
remove commented-out line
//! The AWS SQS API.
#![cfg_attr(feature = "nightly-testing", allow(while_let_loop))]
include!(concat!(env!("OUT_DIR"), "/sqs.rs"));
#[cfg(test)]
mod test {
use std::collections::HashMap;
use sqs::{SqsClient, SendMessageRequest, ReceiveMessageRequest, MessageAttributeValue};
use super::super::{Region, SignedRequest};
use super::super::mock::*;
extern crate env_logger;
#[test]
fn should_serialize_map_parameters_in_query_string() {
let mock = MockRequestDispatcher::with_status(200)
.with_body(r#"<?xml version="1.0" encoding="UTF-8"?>
<SendMessageResponse>
<SendMessageResult>
<MD5OfMessageBody>
fafb00f5732ab283681e124bf8747ed1
</MD5OfMessageBody>
<MD5OfMessageAttributes>
3ae8f24a165a8cedc005670c81a27295
</MD5OfMessageAttributes>
<MessageId>
5fea7756-0ea4-451a-a703-a558b933e274
</MessageId>
</SendMessageResult>
<ResponseMetadata>
<RequestId>
27daac76-34dd-47df-bd01-1f6e873584a0
</RequestId>
</ResponseMetadata>
</SendMessageResponse>"#)
.with_request_checker(|request: &SignedRequest| {
assert_eq!("POST", request.method);
assert_eq!("/", request.path);
assert_eq!(None, request.payload);
assert_eq!(Some(&Some("test_attribute_name".to_owned())),
request.params.get("MessageAttribute.1.Name"));
assert_eq!(Some(&Some("test_attribute_value".to_owned())),
request.params.get("MessageAttribute.1.Value.StringValue"));
assert_eq!(Some(&Some("String".to_owned())),
request.params.get("MessageAttribute.1.Value.DataType"));
});
let mut message_attributes = HashMap::new();
message_attributes.insert("test_attribute_name".to_owned(),
MessageAttributeValue {
string_value: Some("test_attribute_value".to_owned()),
data_type: "String".to_owned(),
..Default::default()
});
let request = SendMessageRequest {
message_body: "foo".to_owned(),
queue_url: "bar".to_owned(),
message_attributes: Some(message_attributes),
..Default::default()
};
let client =
SqsClient::with_request_dispatcher(mock, MockCredentialsProvider, Region::UsEast1);
let _result = client.send_message(&request).unwrap();
}
#[test]
fn should_fix_issue_323() {
let mock = MockRequestDispatcher::with_status(200)
.with_body(r#"<?xml version="1.0" encoding="UTF-8"?>
<ReceiveMessageResponse>
<ReceiveMessageResult>
<Message>
<MessageId>
5fea7756-0ea4-451a-a703-a558b933e274
</MessageId>
<ReceiptHandle>
MbZj6wDWli+JvwwJaBV+3dcjk2YW2vA3+STFFljTM8tJJg6HRG6PYSasuWXPJB+Cw
Lj1FjgXUv1uSj1gUPAWV66FU/WeR4mq2OKpEGYWbnLmpRCJVAyeMjeU5ZBdtcQ+QE
auMZc8ZRv37sIW2iJKq3M9MFx1YvV11A2x/KSbkJ0=
</ReceiptHandle>
<MD5OfBody>
fafb00f5732ab283681e124bf8747ed1
</MD5OfBody>
<Body>This is a test message</Body>
<Attribute>
<Name>SenderId</Name>
<Value>195004372649</Value>
</Attribute>
</Message>
</ReceiveMessageResult>
<ResponseMetadata>
<RequestId>
b6633655-283d-45b4-aee4-4e84e0ae6afa
</RequestId>
</ResponseMetadata>
</ReceiveMessageResponse>"#)
.with_request_checker(|request: &SignedRequest| {
assert_eq!("POST", request.method);
assert_eq!("/", request.path);
assert_eq!(request.params.get("Action"), Some(&Some("ReceiveMessage".to_owned())));
assert_eq!(request.params.get("MaxNumberOfMessages"), Some(&Some("1".to_owned())));
assert_eq!(request.params.get("VisibilityTimeout"), Some(&Some("2".to_owned())));
assert_eq!(request.params.get("WaitTimeSeconds"), Some(&Some("3".to_owned())));
assert_eq!(request.params.get("Integer"), None);
});
let request = ReceiveMessageRequest {
max_number_of_messages: Some(1),
queue_url: "foo".to_owned(),
visibility_timeout: Some(2),
wait_time_seconds: Some(3),
..Default::default()
};
let client = SqsClient::with_request_dispatcher(mock, MockCredentialsProvider, Region::UsEast1);
let _result = client.receive_message(&request).unwrap();
}
}
|
extern crate getopts;
use std::os;
use std::io;
use std::collections::HashMap;
use getopts::getopts;
use getopts::optflag;
use getopts::usage;
fn main() {
let args: Vec<String> = os::args()
.iter()
.map(|x| x.to_string())
.collect();
let opts = [
optflag("h", "help", "Show help.")
];
let matches = match getopts(args.tail(), opts) {
Ok(m) => { m }
Err(f) => { fail!(f.to_str()) }
};
if matches.opt_present("h") {
println!("{}", usage("Produce a sorted-by-frequency list of lines from input.", opts));
return;
}
let mut lines: HashMap<String,int> = HashMap::new();
for line in io::stdin().lines() {
lines.insert_or_update_with(line.unwrap(), 1, |_k, v| *v = *v + 1);
}
let mut sorted_lines: Vec<(int, String)> = Vec::new();
for (line, count) in lines.iter() {
sorted_lines.push((*count, line.clone()));
}
sorted_lines.sort_by(|a, b| b.cmp(a));
for &(ref count, ref line) in sorted_lines.iter() {
print!("{}: {}", count, line);
}
}
Remove colon in suc output.
extern crate getopts;
use std::os;
use std::io;
use std::collections::HashMap;
use getopts::getopts;
use getopts::optflag;
use getopts::usage;
fn main() {
let args: Vec<String> = os::args()
.iter()
.map(|x| x.to_string())
.collect();
let opts = [
optflag("h", "help", "Show help.")
];
let matches = match getopts(args.tail(), opts) {
Ok(m) => { m }
Err(f) => { fail!(f.to_str()) }
};
if matches.opt_present("h") {
println!("{}", usage("Produce a sorted-by-frequency list of lines from input.", opts));
return;
}
let mut lines: HashMap<String,int> = HashMap::new();
for line in io::stdin().lines() {
lines.insert_or_update_with(line.unwrap(), 1, |_k, v| *v = *v + 1);
}
let mut sorted_lines: Vec<(int, String)> = Vec::new();
for (line, count) in lines.iter() {
sorted_lines.push((*count, line.clone()));
}
sorted_lines.sort_by(|a, b| b.cmp(a));
for &(ref count, ref line) in sorted_lines.iter() {
print!("{} {}", count, line);
}
}
|
use std::io::prelude::*;
use rand::OsRng;
use num::traits::FromPrimitive;
use tls_result::{TlsResult, TlsError, TlsErrorKind};
use tls_result::TlsErrorKind::{UnexpectedMessage, RecordOverflow, BadRecordMac, AlertReceived};
use alert::{self, Alert};
use handshake::{Handshake, HandshakeBuffer};
use util::u64_be_array;
use util::{ReadExt, WriteExt};
use cipher::{Encryptor, Decryptor};
use tls_item::TlsItem;
use self::ContentType::{ChangeCipherSpecTy, AlertTy, HandshakeTy, ApplicationDataTy};
use self::Message::{HandshakeMessage, ChangeCipherSpecMessage, AlertMessage,
ApplicationDataMessage};
pub static TLS_VERSION: (u8, u8) = (3, 3);
enum_from_primitive! {
#[repr(u8)]
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ContentType {
ChangeCipherSpecTy = 20,
AlertTy = 21,
HandshakeTy = 22,
ApplicationDataTy = 23,
// HeartBeat = 24, RFC 6520 extension :-)
}
}
/// maximum length of Record (excluding content_type, version, length fields)
pub const RECORD_MAX_LEN: usize = 1 << 14;
/// maximum length of EncryptedRecord (excluding content_type, version, length fields)
pub const ENC_RECORD_MAX_LEN: usize = (1 << 14) + 2048;
/// corresponds to `TLSPlaintext` in Section 6.2.1.
#[derive(Debug)]
pub struct Record {
pub content_type: ContentType,
pub ver_major: u8,
pub ver_minor: u8,
// fragment length < 2^14
pub fragment: Vec<u8>,
}
impl Record {
pub fn new(content_type: ContentType,
ver_major: u8,
ver_minor: u8,
fragment: Vec<u8>) -> Record {
let len = fragment.len();
if len > RECORD_MAX_LEN {
panic!("record too long: {} > 2^14", len);
}
Record {
content_type: content_type,
ver_major: ver_major,
ver_minor: ver_minor,
fragment: fragment,
}
}
}
pub struct RecordWriter<W: Write> {
writer: W,
// if encryptor is None, handshake is not done yet.
encryptor: Option<Box<Encryptor + Send + 'static>>,
write_count: u64,
}
impl<W: Write> RecordWriter<W> {
pub fn new(writer: W) -> RecordWriter<W> {
RecordWriter {
writer: writer,
encryptor: None,
write_count: 0,
}
}
#[inline]
pub fn get_mut(&mut self) -> &mut W {
&mut self.writer
}
pub fn set_encryptor(&mut self, encryptor: Box<Encryptor + Send + 'static>) {
self.encryptor = Some(encryptor);
self.write_count = 0;
}
pub fn write_record(&mut self, record: Record) -> TlsResult<()> {
let encrypted_fragment = match self.encryptor {
None => record.fragment,
Some(ref mut encryptor) => {
let seq_num = u64_be_array(self.write_count);
let mut ad = Vec::new();
ad.extend(&seq_num);
ad.push(record.content_type as u8);
ad.push(record.ver_major);
ad.push(record.ver_minor);
let frag_len = record.fragment.len() as u16;
ad.push((frag_len >> 8) as u8);
ad.push(frag_len as u8);
let encrypted_fragment = encryptor.encrypt(&seq_num,
&record.fragment,
&ad);
encrypted_fragment
}
};
let fragment_len = encrypted_fragment.len();
if fragment_len > ENC_RECORD_MAX_LEN {
panic!("record too long: {} > 2^14 + 2048", fragment_len);
}
try!(self.writer.write_u8(record.content_type as u8));
try!(self.writer.write_u8(record.ver_major));
try!(self.writer.write_u8(record.ver_minor));
try!(self.writer.write_be_u16(fragment_len as u16));
try!(self.writer.write_all(&encrypted_fragment));
self.write_count += 1;
Ok(())
}
pub fn write_data(&mut self, ty: ContentType, data: &[u8]) -> TlsResult<()> {
let (major, minor) = TLS_VERSION;
// TODO: configurable maxlen
for fragment in data.chunks(RECORD_MAX_LEN) {
let fragment = fragment.to_vec();
let record = Record::new(ty, major, minor, fragment);
try!(self.write_record(record));
}
Ok(())
}
pub fn write_handshake(&mut self, handshake: &Handshake) -> TlsResult<()> {
let mut data = Vec::new();
try!(handshake.tls_write(&mut data));
self.write_data(HandshakeTy, &data)
}
pub fn write_alert(&mut self, alert: &Alert) -> TlsResult<()> {
let mut data = Vec::new();
try!(alert.tls_write(&mut data));
self.write_data(AlertTy, &data)
}
pub fn write_change_cipher_spec(&mut self) -> TlsResult<()> {
self.write_data(ChangeCipherSpecTy, &[1u8])
}
pub fn write_application_data(&mut self, data: &[u8]) -> TlsResult<()> {
if self.encryptor.is_none() {
panic!("attempted to write ApplicationData before handshake");
}
self.write_data(ApplicationDataTy, data)
}
}
pub enum Message {
HandshakeMessage(Handshake),
ChangeCipherSpecMessage,
AlertMessage(Alert),
ApplicationDataMessage(Vec<u8>),
}
pub struct RecordReader<R: ReadExt> {
reader: R,
// if decryptor is none, handshake is not done yet.
decryptor: Option<Box<Decryptor + Send + 'static>>,
read_count: u64,
handshake_buffer: HandshakeBuffer,
}
impl<R: ReadExt> RecordReader<R> {
pub fn new(reader: R) -> RecordReader<R> {
RecordReader {
reader: reader,
decryptor: None,
read_count: 0,
handshake_buffer: HandshakeBuffer::new(),
}
}
#[inline]
pub fn get_mut(&mut self) -> &mut R {
&mut self.reader
}
pub fn set_decryptor(&mut self, decryptor: Box<Decryptor + Send + 'static>) {
self.decryptor = Some(decryptor);
self.read_count = 0;
}
fn read_record(&mut self) -> TlsResult<Record> {
let content_type = {
let ty = try!(self.reader.read_u8());
let ct: Option<ContentType> = FromPrimitive::from_u8(ty);
match ct {
Some(ty) => ty,
None => return tls_err!(UnexpectedMessage, "unexpected ContentType: {}", ty),
}
};
let major = try!(self.reader.read_u8());
let minor = try!(self.reader.read_u8());
let len = {
let len = try!(self.reader.read_be_u16()) as usize;
if len > ENC_RECORD_MAX_LEN {
return tls_err!(RecordOverflow, "TLSEncryptedText too long: {}", len);
}
len
};
let fragment = try!(self.reader.read_exact(len as usize));
let record = match self.decryptor {
None => {
if fragment.len() > RECORD_MAX_LEN {
return tls_err!(RecordOverflow,
"decrypted record too long: {}",
fragment.len());
}
Record::new(content_type, major, minor, fragment)
}
Some(ref mut decryptor) => {
let seq_num = u64_be_array(self.read_count);
let mut ad = Vec::new();
ad.extend(&seq_num);
ad.push(content_type as u8); // TLSCompressed.type
ad.push(major);
ad.push(minor);
let mac_len = decryptor.mac_len();
let total_len = fragment.len();
if total_len < mac_len {
return tls_err!(BadRecordMac, "encrypted message too short: {}", total_len);
}
let frag_len = (total_len - mac_len) as u16;
ad.push((frag_len >> 8) as u8);
ad.push(frag_len as u8);
// TODO: "seq_num as nonce" is chacha20poly1305-specific
let data = try!(decryptor.decrypt(&seq_num, &fragment, &ad));
if data.len() > RECORD_MAX_LEN {
// decryption routine went wrong.
return panic!("decrypted record too long: {}", data.len());
}
Record::new(content_type, major, minor, data)
}
};
self.read_count += 1;
Ok(record)
}
/// read records until a "complete" message is found, and return the message.
/// if invalid ChangeCipherSpec/Alert/Handshake message is found, return Err.
/// (application record is always considered "complete" and "valid"
/// since it is opaque to TLS layer.)
pub fn read_message(&mut self) -> TlsResult<Message> {
match try!(self.handshake_buffer.get_message()) {
Some(handshake_msg) => return Ok(HandshakeMessage(handshake_msg)),
None => {}
}
// ok, no message found. read it from network!
loop {
// TODO: what if handshake record is present in buffer then
// other record comes? is it legal?
let record = try!(self.read_record());
match record.content_type {
ChangeCipherSpecTy => {
if record.fragment.len() != 1 || record.fragment[0] != 1 {
return tls_err!(UnexpectedMessage, "invalid ChangeCipherSpec arrived");
}
return Ok(ChangeCipherSpecMessage);
}
AlertTy => {
let len = record.fragment.len();
if len == 0 {
return tls_err!(UnexpectedMessage, "zero-length Alert record arrived");
} else if len < 2 {
// alert packet can be broken into several records,
// buf it is rarely used and may cause alert attack
// if carelessly implemented:
// http://www.mitls.org/wsgi/alert-attack
// we just don't accept such record for simplicity.
// If alert messages are long, use the first two bytes.
return tls_err!(UnexpectedMessage, "awkward Alert record arrived");
}
let level = FromPrimitive::from_u8(record.fragment[0]);
let desc = FromPrimitive::from_u8(record.fragment[1]);
match (level, desc) {
(Some(level), Some(desc)) => {
return Ok(AlertMessage(try!(Alert::new(level, desc))));
}
_ => return tls_err!(UnexpectedMessage,
"unknown alert: {:?}",
record.fragment),
}
}
HandshakeTy => {
if record.fragment.len() == 0 {
return tls_err!(UnexpectedMessage, "zero-length Handshake arrived");
}
self.handshake_buffer.add_record(&record.fragment);
match try!(self.handshake_buffer.get_message()) {
Some(handshake_msg) => return Ok(HandshakeMessage(handshake_msg)),
_ => {}
}
}
ApplicationDataTy => {
return Ok(ApplicationDataMessage(record.fragment));
}
}
}
}
pub fn read_application_data(&mut self) -> TlsResult<Vec<u8>> {
if self.decryptor.is_none() {
panic!("ApplicationData called before handshake");
}
loop {
let msg = try!(self.read_message());
match msg {
ApplicationDataMessage(msg) => return Ok(msg),
// TODO: handle other cases
AlertMessage(..) => unimplemented!(),
ChangeCipherSpecMessage(..) => unimplemented!(), // this should not come here
HandshakeMessage(..) => unimplemented!(), // TODO: re-handshake
}
}
}
pub fn read_handshake(&mut self) -> TlsResult<Handshake> {
match try!(self.read_message()) {
HandshakeMessage(handshake) => Ok(handshake),
AlertMessage(alert) => tls_err!(AlertReceived, "alert: {:?}", alert.description),
_ => tls_err!(UnexpectedMessage, "expected Handshake"),
}
}
pub fn read_change_cipher_spec(&mut self) -> TlsResult<()> {
match try!(self.read_message()) {
ChangeCipherSpecMessage => Ok(()),
_ => tls_err!(UnexpectedMessage, "expected ChangeCipherSpec"),
}
}
}
pub struct Tls<R: Read, W: Write> {
pub writer: RecordWriter<W>,
pub reader: RecordReader<R>,
pub rng: OsRng,
}
impl<R: Read, W: Write> Tls<R, W> {
pub fn new(reader: R, writer: W, rng: OsRng) -> Tls<R, W> {
let writer = RecordWriter::new(writer);
let reader = RecordReader::new(reader);
Tls {
writer: writer,
reader: reader,
rng: rng,
}
}
pub fn close(&mut self) -> TlsResult<()> {
let alert_data = alert::Alert {
level: alert::AlertLevel::fatal,
description: alert::AlertDescription::close_notify,
};
try!(self.writer.write_alert(&alert_data));
Ok(())
}
// send fatal alert and return error
// (it may be different to `err`, because writing alert can fail)
pub fn send_tls_alert(&mut self, err: TlsError) -> TlsError {
match err.kind {
TlsErrorKind::IoFailure => return err,
_ => {
let alert = alert::Alert::from_tls_err(&err);
let result = self.writer.write_alert(&alert);
match result {
Ok(()) => return err,
Err(err) => return err,
}
}
}
}
}
tls: Add doccomments and assertions
`RecordWriter.set_encryptor` and `RecordReader.set_decryptor` will panic
if they are called twice.
use std::io::prelude::*;
use rand::OsRng;
use num::traits::FromPrimitive;
use tls_result::{TlsResult, TlsError, TlsErrorKind};
use tls_result::TlsErrorKind::{UnexpectedMessage, RecordOverflow, BadRecordMac, AlertReceived};
use alert::{self, Alert};
use handshake::{Handshake, HandshakeBuffer};
use util::u64_be_array;
use util::{ReadExt, WriteExt};
use cipher::{Encryptor, Decryptor};
use tls_item::TlsItem;
use self::ContentType::{ChangeCipherSpecTy, AlertTy, HandshakeTy, ApplicationDataTy};
use self::Message::{HandshakeMessage, ChangeCipherSpecMessage, AlertMessage,
ApplicationDataMessage};
pub static TLS_VERSION: (u8, u8) = (3, 3);
enum_from_primitive! {
#[repr(u8)]
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ContentType {
ChangeCipherSpecTy = 20,
AlertTy = 21,
HandshakeTy = 22,
ApplicationDataTy = 23,
// HeartBeat = 24, RFC 6520 extension :-)
}
}
/// maximum length of Record (excluding content_type, version, length fields)
pub const RECORD_MAX_LEN: usize = 1 << 14;
/// maximum length of EncryptedRecord (excluding content_type, version, length fields)
pub const ENC_RECORD_MAX_LEN: usize = (1 << 14) + 2048;
/// corresponds to `TLSPlaintext` in Section 6.2.1.
#[derive(Debug)]
pub struct Record {
pub content_type: ContentType,
pub ver_major: u8,
pub ver_minor: u8,
// fragment length < 2^14
pub fragment: Vec<u8>,
}
impl Record {
pub fn new(content_type: ContentType,
ver_major: u8,
ver_minor: u8,
fragment: Vec<u8>) -> Record {
let len = fragment.len();
if len > RECORD_MAX_LEN {
panic!("record too long: {} > 2^14", len);
}
Record {
content_type: content_type,
ver_major: ver_major,
ver_minor: ver_minor,
fragment: fragment,
}
}
}
/// Writes `Record` or higher-layer message to a writable object.
/// Record is internally encrypted before written.
pub struct RecordWriter<W: Write> {
writer: W,
// if encryptor is None, handshake is not done yet.
encryptor: Option<Box<Encryptor + Send + 'static>>,
write_count: u64,
}
impl<W: Write> RecordWriter<W> {
/// Create new `RecordWriter` with null encryption.
/// Invoke `set_encryptor` to set encryptor.
pub fn new(writer: W) -> RecordWriter<W> {
RecordWriter {
writer: writer,
encryptor: None,
write_count: 0,
}
}
#[inline]
pub fn get_mut(&mut self) -> &mut W {
&mut self.writer
}
/// Set encryptor and reset count.
/// This must be called only once.
pub fn set_encryptor(&mut self, encryptor: Box<Encryptor + Send + 'static>) {
assert!(self.encryptor.is_none());
self.encryptor = Some(encryptor);
self.write_count = 0;
}
pub fn write_record(&mut self, record: Record) -> TlsResult<()> {
let encrypted_fragment = match self.encryptor {
None => record.fragment,
Some(ref mut encryptor) => {
let seq_num = u64_be_array(self.write_count);
let mut ad = Vec::new();
ad.extend(&seq_num);
ad.push(record.content_type as u8);
ad.push(record.ver_major);
ad.push(record.ver_minor);
let frag_len = record.fragment.len() as u16;
ad.push((frag_len >> 8) as u8);
ad.push(frag_len as u8);
let encrypted_fragment = encryptor.encrypt(&seq_num,
&record.fragment,
&ad);
encrypted_fragment
}
};
let fragment_len = encrypted_fragment.len();
if fragment_len > ENC_RECORD_MAX_LEN {
panic!("record too long: {} > 2^14 + 2048", fragment_len);
}
try!(self.writer.write_u8(record.content_type as u8));
try!(self.writer.write_u8(record.ver_major));
try!(self.writer.write_u8(record.ver_minor));
try!(self.writer.write_be_u16(fragment_len as u16));
try!(self.writer.write_all(&encrypted_fragment));
self.write_count += 1;
Ok(())
}
pub fn write_data(&mut self, ty: ContentType, data: &[u8]) -> TlsResult<()> {
let (major, minor) = TLS_VERSION;
// TODO: configurable maxlen
for fragment in data.chunks(RECORD_MAX_LEN) {
let fragment = fragment.to_vec();
let record = Record::new(ty, major, minor, fragment);
try!(self.write_record(record));
}
Ok(())
}
pub fn write_handshake(&mut self, handshake: &Handshake) -> TlsResult<()> {
let mut data = Vec::new();
try!(handshake.tls_write(&mut data));
self.write_data(HandshakeTy, &data)
}
pub fn write_alert(&mut self, alert: &Alert) -> TlsResult<()> {
let mut data = Vec::new();
try!(alert.tls_write(&mut data));
self.write_data(AlertTy, &data)
}
pub fn write_change_cipher_spec(&mut self) -> TlsResult<()> {
self.write_data(ChangeCipherSpecTy, &[1u8])
}
pub fn write_application_data(&mut self, data: &[u8]) -> TlsResult<()> {
if self.encryptor.is_none() {
panic!("attempted to write ApplicationData before handshake");
}
self.write_data(ApplicationDataTy, data)
}
}
/// Return type of `RecordReader.read_record()`.
pub enum Message {
HandshakeMessage(Handshake),
ChangeCipherSpecMessage,
AlertMessage(Alert),
ApplicationDataMessage(Vec<u8>),
}
pub struct RecordReader<R: ReadExt> {
reader: R,
// if decryptor is none, handshake is not done yet.
decryptor: Option<Box<Decryptor + Send + 'static>>,
read_count: u64,
handshake_buffer: HandshakeBuffer,
}
/// Reads `Record` or `Message` from a readable object.
/// Record is internally decrypted after read.
impl<R: ReadExt> RecordReader<R> {
pub fn new(reader: R) -> RecordReader<R> {
RecordReader {
reader: reader,
decryptor: None,
read_count: 0,
handshake_buffer: HandshakeBuffer::new(),
}
}
#[inline]
pub fn get_mut(&mut self) -> &mut R {
&mut self.reader
}
/// Set decryptor and reset count.
/// This must be called only once.
pub fn set_decryptor(&mut self, decryptor: Box<Decryptor + Send + 'static>) {
assert!(self.decryptor.is_none());
self.decryptor = Some(decryptor);
self.read_count = 0;
}
/// Read a record from readable stream.
///
/// Any record with unknown content type is treated as an error.
fn read_record(&mut self) -> TlsResult<Record> {
let content_type = {
let ty = try!(self.reader.read_u8());
let ct: Option<ContentType> = FromPrimitive::from_u8(ty);
match ct {
Some(ty) => ty,
None => return tls_err!(UnexpectedMessage, "unexpected ContentType: {}", ty),
}
};
let major = try!(self.reader.read_u8());
let minor = try!(self.reader.read_u8());
let len = {
let len = try!(self.reader.read_be_u16()) as usize;
if len > ENC_RECORD_MAX_LEN {
return tls_err!(RecordOverflow, "TLSEncryptedText too long: {}", len);
}
len
};
let fragment = try!(self.reader.read_exact(len as usize));
let record = match self.decryptor {
None => {
if fragment.len() > RECORD_MAX_LEN {
return tls_err!(RecordOverflow,
"decrypted record too long: {}",
fragment.len());
}
Record::new(content_type, major, minor, fragment)
}
Some(ref mut decryptor) => {
let seq_num = u64_be_array(self.read_count);
let mut ad = Vec::new();
ad.extend(&seq_num);
ad.push(content_type as u8); // TLSCompressed.type
ad.push(major);
ad.push(minor);
let mac_len = decryptor.mac_len();
let total_len = fragment.len();
if total_len < mac_len {
return tls_err!(BadRecordMac, "encrypted message too short: {}", total_len);
}
let frag_len = (total_len - mac_len) as u16;
ad.push((frag_len >> 8) as u8);
ad.push(frag_len as u8);
// TODO: "seq_num as nonce" is chacha20poly1305-specific
let data = try!(decryptor.decrypt(&seq_num, &fragment, &ad));
if data.len() > RECORD_MAX_LEN {
// decryption routine went wrong.
return panic!("decrypted record too long: {}", data.len());
}
Record::new(content_type, major, minor, data)
}
};
self.read_count += 1;
Ok(record)
}
/// Read records until a "complete" message is found, then return the message.
///
/// if invalid ChangeCipherSpec/Alert/Handshake message is found, return Err.
/// (application record is always considered "complete" and "valid"
/// since it is opaque to TLS layer.)
///
/// Note: In theory, `Alert` message can be broken into several records.
/// It is not useful in practice and requires more complex routines.
/// (Incorrect handling leads to [Alert attack](http://www.mitls.org/wsgi/alert-attack).)
///
/// We treat partial alert message as an error and returns `UnexpectedMessage`.
pub fn read_message(&mut self) -> TlsResult<Message> {
match try!(self.handshake_buffer.get_message()) {
Some(handshake_msg) => return Ok(HandshakeMessage(handshake_msg)),
None => {}
}
// ok, no message found. read it from network!
loop {
// TODO: what if handshake record is present in buffer then
// other record comes? is it legal?
let record = try!(self.read_record());
match record.content_type {
ChangeCipherSpecTy => {
if record.fragment.len() != 1 || record.fragment[0] != 1 {
return tls_err!(UnexpectedMessage, "invalid ChangeCipherSpec arrived");
}
return Ok(ChangeCipherSpecMessage);
}
AlertTy => {
let len = record.fragment.len();
if len == 0 {
return tls_err!(UnexpectedMessage, "zero-length Alert record arrived");
} else if len < 2 {
// alert attack
return tls_err!(UnexpectedMessage, "awkward Alert record arrived");
}
let level = FromPrimitive::from_u8(record.fragment[0]);
let desc = FromPrimitive::from_u8(record.fragment[1]);
match (level, desc) {
(Some(level), Some(desc)) => {
return Ok(AlertMessage(try!(Alert::new(level, desc))));
}
_ => return tls_err!(UnexpectedMessage,
"unknown alert: {:?}",
record.fragment),
}
}
HandshakeTy => {
if record.fragment.len() == 0 {
return tls_err!(UnexpectedMessage, "zero-length Handshake arrived");
}
self.handshake_buffer.add_record(&record.fragment);
match try!(self.handshake_buffer.get_message()) {
Some(handshake_msg) => return Ok(HandshakeMessage(handshake_msg)),
_ => {}
}
}
ApplicationDataTy => {
return Ok(ApplicationDataMessage(record.fragment));
}
}
}
}
pub fn read_application_data(&mut self) -> TlsResult<Vec<u8>> {
if self.decryptor.is_none() {
panic!("ApplicationData called before handshake");
}
loop {
let msg = try!(self.read_message());
match msg {
ApplicationDataMessage(msg) => return Ok(msg),
// TODO: handle other cases
AlertMessage(..) => unimplemented!(),
ChangeCipherSpecMessage(..) => unimplemented!(), // this should not come here
HandshakeMessage(..) => unimplemented!(), // TODO: re-handshake
}
}
}
pub fn read_handshake(&mut self) -> TlsResult<Handshake> {
match try!(self.read_message()) {
HandshakeMessage(handshake) => Ok(handshake),
AlertMessage(alert) => tls_err!(AlertReceived, "alert: {:?}", alert.description),
_ => tls_err!(UnexpectedMessage, "expected Handshake"),
}
}
pub fn read_change_cipher_spec(&mut self) -> TlsResult<()> {
match try!(self.read_message()) {
ChangeCipherSpecMessage => Ok(()),
_ => tls_err!(UnexpectedMessage, "expected ChangeCipherSpec"),
}
}
}
pub struct Tls<R: Read, W: Write> {
pub writer: RecordWriter<W>,
pub reader: RecordReader<R>,
pub rng: OsRng,
}
impl<R: Read, W: Write> Tls<R, W> {
pub fn new(reader: R, writer: W, rng: OsRng) -> Tls<R, W> {
let writer = RecordWriter::new(writer);
let reader = RecordReader::new(reader);
Tls {
writer: writer,
reader: reader,
rng: rng,
}
}
pub fn close(&mut self) -> TlsResult<()> {
let alert_data = alert::Alert {
level: alert::AlertLevel::fatal,
description: alert::AlertDescription::close_notify,
};
try!(self.writer.write_alert(&alert_data));
Ok(())
}
// send fatal alert and return error
// (it may be different to `err`, because writing alert can fail)
pub fn send_tls_alert(&mut self, err: TlsError) -> TlsError {
match err.kind {
TlsErrorKind::IoFailure => return err,
_ => {
let alert = alert::Alert::from_tls_err(&err);
let result = self.writer.write_alert(&alert);
match result {
Ok(()) => return err,
Err(err) => return err,
}
}
}
}
}
|
//
// tui.rs
// Copyright (C) 2017 Szymon Urbaś <szymon.urbas@aol.com>
// Distributed under terms of the BSD (2-clause) license.
//
// Created on: 16 Mar 2017 22:14:17 +0100 (CET)
//
use cursive::Cursive;
use cursive::view::*;
use cursive::views::*;
use process::*;
pub struct Tui {
renderer: Cursive,
}
pub fn new() -> Tui {
let mut renderer = Cursive::new();
let process_list = ListView::new().with_id("process_list").min_width(32);
let results = TextView::new("").scroll_strategy(ScrollStrategy::StickToBottom)
.with_id("results").full_height().min_width(48);
let header = TextView::new("").with_id("header");
let debug = TextView::new("").scroll_strategy(ScrollStrategy::StickToBottom).with_id("debug").full_height();
let mut layout = LinearLayout::horizontal();
let mut right_pane =
LinearLayout::vertical()
.child(Dialog::around(header).title("Header"))
.child(Dialog::around(results).title("Results"))
;
if cfg!(feature = "debug-info"){
right_pane.add_child(Dialog::around(debug).title("Debug info"));
}
layout.add_child(Dialog::around(process_list).title("Process list"));
layout.add_child(right_pane);
renderer.set_fps(60);
renderer.add_layer(layout);
renderer.add_global_callback('q', |tui| tui.quit());
Tui {
renderer: renderer,
}
}
impl Tui {
pub fn update(&mut self) {
self.renderer.step();
}
pub fn draw_process_list(&mut self, process_list: &[Process]) {
let mut process_list_view = self.renderer.find_id::<ListView>("process_list").unwrap();
process_list_view.clear();
for p in process_list.iter() {
let progress_bar_value = ((p.execution_time as f64 / p.burst_time as f64) as f64 * 100f64) as usize;
process_list_view.add_child(p.name.clone().as_str(),
ProgressBar::new().with_value(Counter::new(progress_bar_value)));
}
}
pub fn set_header(&mut self, text: String) {
let mut header = self.renderer.find_id::<TextView>("header").unwrap();
header.set_content(text.clone());
}
pub fn debug(&mut self, text: String) {
if cfg!(feature = "debug-info") {
let mut debug = self.renderer.find_id::<TextView>("debug").unwrap();
debug.append_content(&text);
debug.append_content("\n");
}
}
pub fn add_result(&mut self, text: String) {
let mut debug = self.renderer.find_id::<TextView>("results").unwrap();
debug.append_content(&text);
debug.append_content("\n");
}
}
/*
* vi: ts=2 sw=2 expandtab
*/
Slight rework of setting up the design
//
// tui.rs
// Copyright (C) 2017 Szymon Urbaś <szymon.urbas@aol.com>
// Distributed under terms of the BSD (2-clause) license.
//
// Created on: 16 Mar 2017 22:14:17 +0100 (CET)
//
use cursive::Cursive;
use cursive::view::*;
use cursive::views::*;
use process::*;
pub struct Tui {
renderer: Cursive,
}
pub fn new() -> Tui {
let mut renderer = Cursive::new();
let process_list = ListView::new().with_id("process_list").min_width(32);
let results = TextView::new("").scroll_strategy(ScrollStrategy::StickToBottom)
.with_id("results").full_height().min_width(48);
let header = TextView::new("").with_id("header");
let debug = TextView::new("").scroll_strategy(ScrollStrategy::StickToBottom).with_id("debug").full_height();
let scenarios = ListView::new().with_id("scenarios").max_height(4);
renderer.set_fps(60);
if cfg!(feature = "debug-info") {
renderer.add_layer(debug);
}
renderer.add_layer(
LinearLayout::vertical().child(
LinearLayout::horizontal()
.child(Dialog::around(process_list).title("Process list"))
.child(
LinearLayout::vertical()
.child(Dialog::around(header).title("Header"))
.child(Dialog::around(results).title("Results"))
)
).child(Dialog::around(scenarios).title("Scenarios")));
renderer.add_global_callback('q', |tui| tui.quit());
Tui {
renderer: renderer,
}
}
impl Tui {
pub fn update(&mut self) {
self.renderer.step();
}
pub fn draw_process_list(&mut self, process_list: &[Process]) {
let mut process_list_view = self.renderer.find_id::<ListView>("process_list").unwrap();
process_list_view.clear();
for p in process_list.iter() {
let progress_bar_value = ((p.execution_time as f64 / p.burst_time as f64) as f64 * 100f64) as usize;
process_list_view.add_child(p.name.clone().as_str(),
ProgressBar::new().with_value(Counter::new(progress_bar_value)));
}
}
pub fn set_header(&mut self, text: String) {
let mut header = self.renderer.find_id::<TextView>("header").unwrap();
header.set_content(text.clone());
}
pub fn debug(&mut self, text: String) {
if cfg!(feature = "debug-info") {
let mut debug = self.renderer.find_id::<TextView>("debug").unwrap();
debug.append_content(&text);
debug.append_content("\n");
}
}
pub fn add_result(&mut self, text: String) {
let mut debug = self.renderer.find_id::<TextView>("results").unwrap();
debug.append_content(&text);
debug.append_content("\n");
}
}
/*
* vi: ts=2 sw=2 expandtab
*/
|
use iron::{Iron, IronResult, Listening, status};
use iron::error::HttpResult;
use iron::response::Response;
use iron::request::Request;
use iron::middleware::Handler;
use iron::mime::Mime;
use sysinfo::{System, SystemExt};
use std::sync::{Arc, Mutex};
use SysinfoExt;
use serde_json;
const INDEX_HTML: &'static str = include_str!("index.html");
struct SysinfoIronHandler(Arc<Mutex<System>>);
impl Handler for SysinfoIronHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
match req.url.path().last() {
Some(path) => {
if *path == "" {
Ok(Response::with((status::Ok,
"text/html".parse::<Mime>().unwrap(),
INDEX_HTML)))
} else {
let mut system = self.0.lock().unwrap();
system.refresh_all();
let sysinfo = SysinfoExt::new(&system);
Ok(Response::with((status::Ok,
"application/json".parse::<Mime>().unwrap(),
serde_json::to_string(&sysinfo).unwrap_or(String::new()))))
}
},
None => Ok(Response::with((status::NotFound, "Not found")))
}
}
}
pub fn start_web_server(sock_addr: Option<String>) -> HttpResult<Listening> {
let system = Arc::new(Mutex::new(System::new()));
let mut iron = Iron::new(SysinfoIronHandler(system));
iron.threads = 4;
iron.http(sock_addr.unwrap_or("localhost:3000".to_owned()))
}
#[test]
fn test_sysinfo_json_str() {
assert!(!sysinfo_json_str().is_empty());
}
Remove obsolete test
use iron::{Iron, IronResult, Listening, status};
use iron::error::HttpResult;
use iron::response::Response;
use iron::request::Request;
use iron::middleware::Handler;
use iron::mime::Mime;
use sysinfo::{System, SystemExt};
use std::sync::{Arc, Mutex};
use SysinfoExt;
use serde_json;
const INDEX_HTML: &'static str = include_str!("index.html");
struct SysinfoIronHandler(Arc<Mutex<System>>);
impl Handler for SysinfoIronHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
match req.url.path().last() {
Some(path) => {
if *path == "" {
Ok(Response::with((status::Ok,
"text/html".parse::<Mime>().unwrap(),
INDEX_HTML)))
} else {
let mut system = self.0.lock().unwrap();
system.refresh_all();
let sysinfo = SysinfoExt::new(&system);
Ok(Response::with((status::Ok,
"application/json".parse::<Mime>().unwrap(),
serde_json::to_string(&sysinfo).unwrap_or(String::new()))))
}
},
None => Ok(Response::with((status::NotFound, "Not found")))
}
}
}
pub fn start_web_server(sock_addr: Option<String>) -> HttpResult<Listening> {
let system = Arc::new(Mutex::new(System::new()));
let mut iron = Iron::new(SysinfoIronHandler(system));
iron.threads = 4;
iron.http(sock_addr.unwrap_or("localhost:3000".to_owned()))
}
|
// This file is part of the uutils coreutils package.
//
// (c) Derek Chiang <derekchiang93@gmail.com>
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
use clap::{crate_version, Arg, Command};
use std::path::Path;
use uucore::display::print_verbatim;
use uucore::error::{UResult, UUsageError};
use uucore::{format_usage, InvalidEncodingHandling};
static ABOUT: &str = "strip last component from file name";
const USAGE: &str = "{} [OPTION] NAME...";
mod options {
pub const ZERO: &str = "zero";
pub const DIR: &str = "dir";
}
fn get_long_usage() -> String {
String::from(
"Output each NAME with its last non-slash component and trailing slashes
removed; if NAME contains no /'s, output '.' (meaning the current directory).",
)
}
#[uucore::main]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let args = args
.collect_str(InvalidEncodingHandling::ConvertLossy)
.accept_any();
let after_help = get_long_usage();
let matches = uu_app().after_help(&after_help[..]).get_matches_from(args);
let separator = if matches.is_present(options::ZERO) {
"\0"
} else {
"\n"
};
let dirnames: Vec<String> = matches
.values_of(options::DIR)
.unwrap_or_default()
.map(str::to_owned)
.collect();
if !dirnames.is_empty() {
for path in &dirnames {
let p = Path::new(path);
match p.parent() {
Some(d) => {
if d.components().next() == None {
print!(".");
} else {
print_verbatim(d).unwrap();
}
}
None => {
if p.is_absolute() || path == "/" {
print!("/");
} else {
print!(".");
}
}
}
print!("{}", separator);
}
} else {
return Err(UUsageError::new(1, "missing operand"));
}
Ok(())
}
pub fn uu_app<'a>() -> Command<'a> {
Command::new(uucore::util_name())
.about(ABOUT)
.version(crate_version!())
.override_usage(format_usage(USAGE))
.infer_long_args(true)
.arg(
Arg::new(options::ZERO)
.long(options::ZERO)
.short('z')
.help("separate output with NUL rather than newline"),
)
.arg(
Arg::new(options::DIR)
.hide(true)
.multiple_occurrences(true)
.value_hint(clap::ValueHint::AnyPath),
)
}
dirname: add missing "\n\" to usage message
// This file is part of the uutils coreutils package.
//
// (c) Derek Chiang <derekchiang93@gmail.com>
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
use clap::{crate_version, Arg, Command};
use std::path::Path;
use uucore::display::print_verbatim;
use uucore::error::{UResult, UUsageError};
use uucore::{format_usage, InvalidEncodingHandling};
static ABOUT: &str = "strip last component from file name";
const USAGE: &str = "{} [OPTION] NAME...";
mod options {
pub const ZERO: &str = "zero";
pub const DIR: &str = "dir";
}
fn get_long_usage() -> String {
String::from(
"Output each NAME with its last non-slash component and trailing slashes \n\
removed; if NAME contains no /'s, output '.' (meaning the current directory).",
)
}
#[uucore::main]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let args = args
.collect_str(InvalidEncodingHandling::ConvertLossy)
.accept_any();
let after_help = get_long_usage();
let matches = uu_app().after_help(&after_help[..]).get_matches_from(args);
let separator = if matches.is_present(options::ZERO) {
"\0"
} else {
"\n"
};
let dirnames: Vec<String> = matches
.values_of(options::DIR)
.unwrap_or_default()
.map(str::to_owned)
.collect();
if !dirnames.is_empty() {
for path in &dirnames {
let p = Path::new(path);
match p.parent() {
Some(d) => {
if d.components().next() == None {
print!(".");
} else {
print_verbatim(d).unwrap();
}
}
None => {
if p.is_absolute() || path == "/" {
print!("/");
} else {
print!(".");
}
}
}
print!("{}", separator);
}
} else {
return Err(UUsageError::new(1, "missing operand"));
}
Ok(())
}
pub fn uu_app<'a>() -> Command<'a> {
Command::new(uucore::util_name())
.about(ABOUT)
.version(crate_version!())
.override_usage(format_usage(USAGE))
.infer_long_args(true)
.arg(
Arg::new(options::ZERO)
.long(options::ZERO)
.short('z')
.help("separate output with NUL rather than newline"),
)
.arg(
Arg::new(options::DIR)
.hide(true)
.multiple_occurrences(true)
.value_hint(clap::ValueHint::AnyPath),
)
}
|
#![feature(rustc_private)]
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
// warn on lints, that are included in `rust-lang/rust`s bootstrap
#![warn(rust_2018_idioms, unused_lifetimes)]
// warn on rustc internal lints
#![deny(rustc::internal)]
// FIXME: switch to something more ergonomic here, once available.
// (Currently there is no way to opt into sysroot crates without `extern crate`.)
extern crate rustc_data_structures;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_interface;
extern crate rustc_middle;
use rustc_interface::interface;
use rustc_middle::ty::TyCtxt;
use rustc_tools_util::VersionInfo;
use lazy_static::lazy_static;
use std::borrow::Cow;
use std::env;
use std::ops::Deref;
use std::panic;
use std::path::{Path, PathBuf};
use std::process::{exit, Command};
mod lintlist;
/// If a command-line option matches `find_arg`, then apply the predicate `pred` on its value. If
/// true, then return it. The parameter is assumed to be either `--arg=value` or `--arg value`.
fn arg_value<'a, T: Deref<Target = str>>(
args: &'a [T],
find_arg: &str,
pred: impl Fn(&str) -> bool,
) -> Option<&'a str> {
let mut args = args.iter().map(Deref::deref);
while let Some(arg) = args.next() {
let mut arg = arg.splitn(2, '=');
if arg.next() != Some(find_arg) {
continue;
}
match arg.next().or_else(|| args.next()) {
Some(v) if pred(v) => return Some(v),
_ => {},
}
}
None
}
#[test]
fn test_arg_value() {
let args = &["--bar=bar", "--foobar", "123", "--foo"];
assert_eq!(arg_value(&[] as &[&str], "--foobar", |_| true), None);
assert_eq!(arg_value(args, "--bar", |_| false), None);
assert_eq!(arg_value(args, "--bar", |_| true), Some("bar"));
assert_eq!(arg_value(args, "--bar", |p| p == "bar"), Some("bar"));
assert_eq!(arg_value(args, "--bar", |p| p == "foo"), None);
assert_eq!(arg_value(args, "--foobar", |p| p == "foo"), None);
assert_eq!(arg_value(args, "--foobar", |p| p == "123"), Some("123"));
assert_eq!(arg_value(args, "--foo", |_| true), None);
}
struct DefaultCallbacks;
impl rustc_driver::Callbacks for DefaultCallbacks {}
struct ClippyCallbacks;
impl rustc_driver::Callbacks for ClippyCallbacks {
fn config(&mut self, config: &mut interface::Config) {
let previous = config.register_lints.take();
config.register_lints = Some(Box::new(move |sess, mut lint_store| {
// technically we're ~guaranteed that this is none but might as well call anything that
// is there already. Certainly it can't hurt.
if let Some(previous) = &previous {
(previous)(sess, lint_store);
}
let conf = clippy_lints::read_conf(&[], &sess);
clippy_lints::register_plugins(&mut lint_store, &sess, &conf);
clippy_lints::register_pre_expansion_lints(&mut lint_store);
clippy_lints::register_renamed(&mut lint_store);
}));
// FIXME: #4825; This is required, because Clippy lints that are based on MIR have to be
// run on the unoptimized MIR. On the other hand this results in some false negatives. If
// MIR passes can be enabled / disabled separately, we should figure out, what passes to
// use for Clippy.
config.opts.debugging_opts.mir_opt_level = 0;
}
}
#[allow(clippy::find_map, clippy::filter_map)]
fn describe_lints() {
use lintlist::{Level, Lint, ALL_LINTS, LINT_LEVELS};
use rustc_data_structures::fx::FxHashSet;
println!(
"
Available lint options:
-W <foo> Warn about <foo>
-A <foo> Allow <foo>
-D <foo> Deny <foo>
-F <foo> Forbid <foo> (deny <foo> and all attempts to override)
"
);
let lint_level = |lint: &Lint| {
LINT_LEVELS
.iter()
.find(|level_mapping| level_mapping.0 == lint.group)
.map(|(_, level)| match level {
Level::Allow => "allow",
Level::Warn => "warn",
Level::Deny => "deny",
})
.unwrap()
};
let mut lints: Vec<_> = ALL_LINTS.iter().collect();
// The sort doesn't case-fold but it's doubtful we care.
lints.sort_by_cached_key(|x: &&Lint| (lint_level(x), x.name));
let max_lint_name_len = lints
.iter()
.map(|lint| lint.name.len())
.map(|len| len + "clippy::".len())
.max()
.unwrap_or(0);
let padded = |x: &str| {
let mut s = " ".repeat(max_lint_name_len - x.chars().count());
s.push_str(x);
s
};
let scoped = |x: &str| format!("clippy::{}", x);
let lint_groups: FxHashSet<_> = lints.iter().map(|lint| lint.group).collect();
println!("Lint checks provided by clippy:\n");
println!(" {} {:7.7} meaning", padded("name"), "default");
println!(" {} {:7.7} -------", padded("----"), "-------");
let print_lints = |lints: &[&Lint]| {
for lint in lints {
let name = lint.name.replace("_", "-");
println!(
" {} {:7.7} {}",
padded(&scoped(&name)),
lint_level(lint),
lint.desc
);
}
println!("\n");
};
print_lints(&lints);
let max_group_name_len = std::cmp::max(
"clippy::all".len(),
lint_groups
.iter()
.map(|group| group.len())
.map(|len| len + "clippy::".len())
.max()
.unwrap_or(0),
);
let padded_group = |x: &str| {
let mut s = " ".repeat(max_group_name_len - x.chars().count());
s.push_str(x);
s
};
println!("Lint groups provided by clippy:\n");
println!(" {} sub-lints", padded_group("name"));
println!(" {} ---------", padded_group("----"));
println!(" {} the set of all clippy lints", padded_group("clippy::all"));
let print_lint_groups = || {
for group in lint_groups {
let name = group.to_lowercase().replace("_", "-");
let desc = lints
.iter()
.filter(|&lint| lint.group == group)
.map(|lint| lint.name)
.map(|name| name.replace("_", "-"))
.collect::<Vec<String>>()
.join(", ");
println!(" {} {}", padded_group(&scoped(&name)), desc);
}
println!("\n");
};
print_lint_groups();
}
fn display_help() {
println!(
"\
Checks a package to catch common mistakes and improve your Rust code.
Usage:
cargo clippy [options] [--] [<opts>...]
Common options:
-h, --help Print this message
--rustc Pass all args to rustc
-V, --version Print version info and exit
Other options are the same as `cargo check`.
To allow or deny a lint from the command line you can use `cargo clippy --`
with:
-W --warn OPT Set lint warnings
-A --allow OPT Set lint allowed
-D --deny OPT Set lint denied
-F --forbid OPT Set lint forbidden
You can use tool lints to allow or deny lints from your code, eg.:
#[allow(clippy::needless_lifetimes)]
"
);
}
const BUG_REPORT_URL: &str = "https://github.com/rust-lang/rust-clippy/issues/new";
lazy_static! {
static ref ICE_HOOK: Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static> = {
let hook = panic::take_hook();
panic::set_hook(Box::new(|info| report_clippy_ice(info, BUG_REPORT_URL)));
hook
};
}
fn report_clippy_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str) {
// Invoke our ICE handler, which prints the actual panic message and optionally a backtrace
(*ICE_HOOK)(info);
// Separate the output with an empty line
eprintln!();
let emitter = Box::new(rustc_errors::emitter::EmitterWriter::stderr(
rustc_errors::ColorConfig::Auto,
None,
false,
false,
None,
false,
));
let handler = rustc_errors::Handler::with_emitter(true, None, emitter);
// a .span_bug or .bug call has already printed what
// it wants to print.
if !info.payload().is::<rustc_errors::ExplicitBug>() {
let d = rustc_errors::Diagnostic::new(rustc_errors::Level::Bug, "unexpected panic");
handler.emit_diagnostic(&d);
}
let version_info = rustc_tools_util::get_version_info!();
let xs: Vec<Cow<'static, str>> = vec![
"the compiler unexpectedly panicked. this is a bug.".into(),
format!("we would appreciate a bug report: {}", bug_report_url).into(),
format!("Clippy version: {}", version_info).into(),
];
for note in &xs {
handler.note_without_error(¬e);
}
// If backtraces are enabled, also print the query stack
let backtrace = env::var_os("RUST_BACKTRACE").map_or(false, |x| &x != "0");
if backtrace {
TyCtxt::try_print_query_stack(&handler);
}
}
fn toolchain_path(home: Option<String>, toolchain: Option<String>) -> Option<PathBuf> {
home.and_then(|home| {
toolchain.map(|toolchain| {
let mut path = PathBuf::from(home);
path.push("toolchains");
path.push(toolchain);
path
})
})
}
pub fn main() {
rustc_driver::init_rustc_env_logger();
lazy_static::initialize(&ICE_HOOK);
exit(rustc_driver::catch_with_exit_code(move || {
let mut orig_args: Vec<String> = env::args().collect();
// Get the sysroot, looking from most specific to this invocation to the least:
// - command line
// - runtime environment
// - SYSROOT
// - RUSTUP_HOME, MULTIRUST_HOME, RUSTUP_TOOLCHAIN, MULTIRUST_TOOLCHAIN
// - sysroot from rustc in the path
// - compile-time environment
// - SYSROOT
// - RUSTUP_HOME, MULTIRUST_HOME, RUSTUP_TOOLCHAIN, MULTIRUST_TOOLCHAIN
let sys_root_arg = arg_value(&orig_args, "--sysroot", |_| true);
let have_sys_root_arg = sys_root_arg.is_some();
let sys_root = sys_root_arg
.map(PathBuf::from)
.or_else(|| std::env::var("SYSROOT").ok().map(PathBuf::from))
.or_else(|| {
let home = std::env::var("RUSTUP_HOME")
.or_else(|_| std::env::var("MULTIRUST_HOME"))
.ok();
let toolchain = std::env::var("RUSTUP_TOOLCHAIN")
.or_else(|_| std::env::var("MULTIRUST_TOOLCHAIN"))
.ok();
toolchain_path(home, toolchain)
})
.or_else(|| {
Command::new("rustc")
.arg("--print")
.arg("sysroot")
.output()
.ok()
.and_then(|out| String::from_utf8(out.stdout).ok())
.map(|s| PathBuf::from(s.trim()))
})
.or_else(|| option_env!("SYSROOT").map(PathBuf::from))
.or_else(|| {
let home = option_env!("RUSTUP_HOME")
.or(option_env!("MULTIRUST_HOME"))
.map(ToString::to_string);
let toolchain = option_env!("RUSTUP_TOOLCHAIN")
.or(option_env!("MULTIRUST_TOOLCHAIN"))
.map(ToString::to_string);
toolchain_path(home, toolchain)
})
.map(|pb| pb.to_string_lossy().to_string())
.expect("need to specify SYSROOT env var during clippy compilation, or use rustup or multirust");
// make "clippy-driver --rustc" work like a subcommand that passes further args to "rustc"
// for example `clippy-driver --rustc --version` will print the rustc version that clippy-driver
// uses
if let Some(pos) = orig_args.iter().position(|arg| arg == "--rustc") {
orig_args.remove(pos);
orig_args[0] = "rustc".to_string();
// if we call "rustc", we need to pass --sysroot here as well
let mut args: Vec<String> = orig_args.clone();
if !have_sys_root_arg {
args.extend(vec!["--sysroot".into(), sys_root]);
};
return rustc_driver::run_compiler(&args, &mut DefaultCallbacks, None, None);
}
if orig_args.iter().any(|a| a == "--version" || a == "-V") {
let version_info = rustc_tools_util::get_version_info!();
println!("{}", version_info);
exit(0);
}
// Setting RUSTC_WRAPPER causes Cargo to pass 'rustc' as the first argument.
// We're invoking the compiler programmatically, so we ignore this/
let wrapper_mode = orig_args.get(1).map(Path::new).and_then(Path::file_stem) == Some("rustc".as_ref());
if wrapper_mode {
// we still want to be able to invoke it normally though
orig_args.remove(1);
}
if !wrapper_mode && (orig_args.iter().any(|a| a == "--help" || a == "-h") || orig_args.len() == 1) {
display_help();
exit(0);
}
let should_describe_lints = || {
let args: Vec<_> = env::args().collect();
args.windows(2)
.any(|args| args[1] == "help" && matches!(args[0].as_str(), "-W" | "-A" | "-D" | "-F"))
};
if !wrapper_mode && should_describe_lints() {
describe_lints();
exit(0);
}
// this conditional check for the --sysroot flag is there so users can call
// `clippy_driver` directly
// without having to pass --sysroot or anything
let mut args: Vec<String> = orig_args.clone();
if !have_sys_root_arg {
args.extend(vec!["--sysroot".into(), sys_root]);
};
// this check ensures that dependencies are built but not linted and the final
// crate is linted but not built
let clippy_enabled = env::var("CLIPPY_TESTS").map_or(false, |val| val == "true")
|| arg_value(&orig_args, "--cap-lints", |val| val == "allow").is_none();
if clippy_enabled {
args.extend(vec!["--cfg".into(), r#"feature="cargo-clippy""#.into()]);
if let Ok(extra_args) = env::var("CLIPPY_ARGS") {
args.extend(extra_args.split("__CLIPPY_HACKERY__").filter_map(|s| {
if s.is_empty() {
None
} else {
Some(s.to_string())
}
}));
}
}
let mut clippy = ClippyCallbacks;
let mut default = DefaultCallbacks;
let callbacks: &mut (dyn rustc_driver::Callbacks + Send) =
if clippy_enabled { &mut clippy } else { &mut default };
rustc_driver::run_compiler(&args, callbacks, None, None)
}))
}
Auto merge of #6094 - ebroto:rustup, r=ebroto
Rustup
changelog: none
r? `@ghost`
#![feature(rustc_private)]
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
// warn on lints, that are included in `rust-lang/rust`s bootstrap
#![warn(rust_2018_idioms, unused_lifetimes)]
// warn on rustc internal lints
#![deny(rustc::internal)]
// FIXME: switch to something more ergonomic here, once available.
// (Currently there is no way to opt into sysroot crates without `extern crate`.)
extern crate rustc_data_structures;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_interface;
extern crate rustc_middle;
use rustc_interface::interface;
use rustc_middle::ty::TyCtxt;
use rustc_tools_util::VersionInfo;
use lazy_static::lazy_static;
use std::borrow::Cow;
use std::env;
use std::ops::Deref;
use std::panic;
use std::path::{Path, PathBuf};
use std::process::{exit, Command};
mod lintlist;
/// If a command-line option matches `find_arg`, then apply the predicate `pred` on its value. If
/// true, then return it. The parameter is assumed to be either `--arg=value` or `--arg value`.
fn arg_value<'a, T: Deref<Target = str>>(
args: &'a [T],
find_arg: &str,
pred: impl Fn(&str) -> bool,
) -> Option<&'a str> {
let mut args = args.iter().map(Deref::deref);
while let Some(arg) = args.next() {
let mut arg = arg.splitn(2, '=');
if arg.next() != Some(find_arg) {
continue;
}
match arg.next().or_else(|| args.next()) {
Some(v) if pred(v) => return Some(v),
_ => {},
}
}
None
}
#[test]
fn test_arg_value() {
let args = &["--bar=bar", "--foobar", "123", "--foo"];
assert_eq!(arg_value(&[] as &[&str], "--foobar", |_| true), None);
assert_eq!(arg_value(args, "--bar", |_| false), None);
assert_eq!(arg_value(args, "--bar", |_| true), Some("bar"));
assert_eq!(arg_value(args, "--bar", |p| p == "bar"), Some("bar"));
assert_eq!(arg_value(args, "--bar", |p| p == "foo"), None);
assert_eq!(arg_value(args, "--foobar", |p| p == "foo"), None);
assert_eq!(arg_value(args, "--foobar", |p| p == "123"), Some("123"));
assert_eq!(arg_value(args, "--foo", |_| true), None);
}
struct DefaultCallbacks;
impl rustc_driver::Callbacks for DefaultCallbacks {}
struct ClippyCallbacks;
impl rustc_driver::Callbacks for ClippyCallbacks {
fn config(&mut self, config: &mut interface::Config) {
let previous = config.register_lints.take();
config.register_lints = Some(Box::new(move |sess, mut lint_store| {
// technically we're ~guaranteed that this is none but might as well call anything that
// is there already. Certainly it can't hurt.
if let Some(previous) = &previous {
(previous)(sess, lint_store);
}
let conf = clippy_lints::read_conf(&[], &sess);
clippy_lints::register_plugins(&mut lint_store, &sess, &conf);
clippy_lints::register_pre_expansion_lints(&mut lint_store);
clippy_lints::register_renamed(&mut lint_store);
}));
// FIXME: #4825; This is required, because Clippy lints that are based on MIR have to be
// run on the unoptimized MIR. On the other hand this results in some false negatives. If
// MIR passes can be enabled / disabled separately, we should figure out, what passes to
// use for Clippy.
config.opts.debugging_opts.mir_opt_level = 0;
}
}
#[allow(clippy::find_map, clippy::filter_map)]
fn describe_lints() {
use lintlist::{Level, Lint, ALL_LINTS, LINT_LEVELS};
use rustc_data_structures::fx::FxHashSet;
println!(
"
Available lint options:
-W <foo> Warn about <foo>
-A <foo> Allow <foo>
-D <foo> Deny <foo>
-F <foo> Forbid <foo> (deny <foo> and all attempts to override)
"
);
let lint_level = |lint: &Lint| {
LINT_LEVELS
.iter()
.find(|level_mapping| level_mapping.0 == lint.group)
.map(|(_, level)| match level {
Level::Allow => "allow",
Level::Warn => "warn",
Level::Deny => "deny",
})
.unwrap()
};
let mut lints: Vec<_> = ALL_LINTS.iter().collect();
// The sort doesn't case-fold but it's doubtful we care.
lints.sort_by_cached_key(|x: &&Lint| (lint_level(x), x.name));
let max_lint_name_len = lints
.iter()
.map(|lint| lint.name.len())
.map(|len| len + "clippy::".len())
.max()
.unwrap_or(0);
let padded = |x: &str| {
let mut s = " ".repeat(max_lint_name_len - x.chars().count());
s.push_str(x);
s
};
let scoped = |x: &str| format!("clippy::{}", x);
let lint_groups: FxHashSet<_> = lints.iter().map(|lint| lint.group).collect();
println!("Lint checks provided by clippy:\n");
println!(" {} {:7.7} meaning", padded("name"), "default");
println!(" {} {:7.7} -------", padded("----"), "-------");
let print_lints = |lints: &[&Lint]| {
for lint in lints {
let name = lint.name.replace("_", "-");
println!(
" {} {:7.7} {}",
padded(&scoped(&name)),
lint_level(lint),
lint.desc
);
}
println!("\n");
};
print_lints(&lints);
let max_group_name_len = std::cmp::max(
"clippy::all".len(),
lint_groups
.iter()
.map(|group| group.len())
.map(|len| len + "clippy::".len())
.max()
.unwrap_or(0),
);
let padded_group = |x: &str| {
let mut s = " ".repeat(max_group_name_len - x.chars().count());
s.push_str(x);
s
};
println!("Lint groups provided by clippy:\n");
println!(" {} sub-lints", padded_group("name"));
println!(" {} ---------", padded_group("----"));
println!(" {} the set of all clippy lints", padded_group("clippy::all"));
let print_lint_groups = || {
for group in lint_groups {
let name = group.to_lowercase().replace("_", "-");
let desc = lints
.iter()
.filter(|&lint| lint.group == group)
.map(|lint| lint.name)
.map(|name| name.replace("_", "-"))
.collect::<Vec<String>>()
.join(", ");
println!(" {} {}", padded_group(&scoped(&name)), desc);
}
println!("\n");
};
print_lint_groups();
}
fn display_help() {
println!(
"\
Checks a package to catch common mistakes and improve your Rust code.
Usage:
cargo clippy [options] [--] [<opts>...]
Common options:
-h, --help Print this message
--rustc Pass all args to rustc
-V, --version Print version info and exit
Other options are the same as `cargo check`.
To allow or deny a lint from the command line you can use `cargo clippy --`
with:
-W --warn OPT Set lint warnings
-A --allow OPT Set lint allowed
-D --deny OPT Set lint denied
-F --forbid OPT Set lint forbidden
You can use tool lints to allow or deny lints from your code, eg.:
#[allow(clippy::needless_lifetimes)]
"
);
}
const BUG_REPORT_URL: &str = "https://github.com/rust-lang/rust-clippy/issues/new";
lazy_static! {
static ref ICE_HOOK: Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static> = {
let hook = panic::take_hook();
panic::set_hook(Box::new(|info| report_clippy_ice(info, BUG_REPORT_URL)));
hook
};
}
fn report_clippy_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str) {
// Invoke our ICE handler, which prints the actual panic message and optionally a backtrace
(*ICE_HOOK)(info);
// Separate the output with an empty line
eprintln!();
let emitter = Box::new(rustc_errors::emitter::EmitterWriter::stderr(
rustc_errors::ColorConfig::Auto,
None,
false,
false,
None,
false,
));
let handler = rustc_errors::Handler::with_emitter(true, None, emitter);
// a .span_bug or .bug call has already printed what
// it wants to print.
if !info.payload().is::<rustc_errors::ExplicitBug>() {
let d = rustc_errors::Diagnostic::new(rustc_errors::Level::Bug, "unexpected panic");
handler.emit_diagnostic(&d);
}
let version_info = rustc_tools_util::get_version_info!();
let xs: Vec<Cow<'static, str>> = vec![
"the compiler unexpectedly panicked. this is a bug.".into(),
format!("we would appreciate a bug report: {}", bug_report_url).into(),
format!("Clippy version: {}", version_info).into(),
];
for note in &xs {
handler.note_without_error(¬e);
}
// If backtraces are enabled, also print the query stack
let backtrace = env::var_os("RUST_BACKTRACE").map_or(false, |x| &x != "0");
if backtrace {
TyCtxt::try_print_query_stack(&handler);
}
}
fn toolchain_path(home: Option<String>, toolchain: Option<String>) -> Option<PathBuf> {
home.and_then(|home| {
toolchain.map(|toolchain| {
let mut path = PathBuf::from(home);
path.push("toolchains");
path.push(toolchain);
path
})
})
}
pub fn main() {
rustc_driver::init_rustc_env_logger();
lazy_static::initialize(&ICE_HOOK);
exit(rustc_driver::catch_with_exit_code(move || {
let mut orig_args: Vec<String> = env::args().collect();
// Get the sysroot, looking from most specific to this invocation to the least:
// - command line
// - runtime environment
// - SYSROOT
// - RUSTUP_HOME, MULTIRUST_HOME, RUSTUP_TOOLCHAIN, MULTIRUST_TOOLCHAIN
// - sysroot from rustc in the path
// - compile-time environment
// - SYSROOT
// - RUSTUP_HOME, MULTIRUST_HOME, RUSTUP_TOOLCHAIN, MULTIRUST_TOOLCHAIN
let sys_root_arg = arg_value(&orig_args, "--sysroot", |_| true);
let have_sys_root_arg = sys_root_arg.is_some();
let sys_root = sys_root_arg
.map(PathBuf::from)
.or_else(|| std::env::var("SYSROOT").ok().map(PathBuf::from))
.or_else(|| {
let home = std::env::var("RUSTUP_HOME")
.or_else(|_| std::env::var("MULTIRUST_HOME"))
.ok();
let toolchain = std::env::var("RUSTUP_TOOLCHAIN")
.or_else(|_| std::env::var("MULTIRUST_TOOLCHAIN"))
.ok();
toolchain_path(home, toolchain)
})
.or_else(|| {
Command::new("rustc")
.arg("--print")
.arg("sysroot")
.output()
.ok()
.and_then(|out| String::from_utf8(out.stdout).ok())
.map(|s| PathBuf::from(s.trim()))
})
.or_else(|| option_env!("SYSROOT").map(PathBuf::from))
.or_else(|| {
let home = option_env!("RUSTUP_HOME")
.or(option_env!("MULTIRUST_HOME"))
.map(ToString::to_string);
let toolchain = option_env!("RUSTUP_TOOLCHAIN")
.or(option_env!("MULTIRUST_TOOLCHAIN"))
.map(ToString::to_string);
toolchain_path(home, toolchain)
})
.map(|pb| pb.to_string_lossy().to_string())
.expect("need to specify SYSROOT env var during clippy compilation, or use rustup or multirust");
// make "clippy-driver --rustc" work like a subcommand that passes further args to "rustc"
// for example `clippy-driver --rustc --version` will print the rustc version that clippy-driver
// uses
if let Some(pos) = orig_args.iter().position(|arg| arg == "--rustc") {
orig_args.remove(pos);
orig_args[0] = "rustc".to_string();
// if we call "rustc", we need to pass --sysroot here as well
let mut args: Vec<String> = orig_args.clone();
if !have_sys_root_arg {
args.extend(vec!["--sysroot".into(), sys_root]);
};
return rustc_driver::run_compiler(&args, &mut DefaultCallbacks, None, None, None);
}
if orig_args.iter().any(|a| a == "--version" || a == "-V") {
let version_info = rustc_tools_util::get_version_info!();
println!("{}", version_info);
exit(0);
}
// Setting RUSTC_WRAPPER causes Cargo to pass 'rustc' as the first argument.
// We're invoking the compiler programmatically, so we ignore this/
let wrapper_mode = orig_args.get(1).map(Path::new).and_then(Path::file_stem) == Some("rustc".as_ref());
if wrapper_mode {
// we still want to be able to invoke it normally though
orig_args.remove(1);
}
if !wrapper_mode && (orig_args.iter().any(|a| a == "--help" || a == "-h") || orig_args.len() == 1) {
display_help();
exit(0);
}
let should_describe_lints = || {
let args: Vec<_> = env::args().collect();
args.windows(2)
.any(|args| args[1] == "help" && matches!(args[0].as_str(), "-W" | "-A" | "-D" | "-F"))
};
if !wrapper_mode && should_describe_lints() {
describe_lints();
exit(0);
}
// this conditional check for the --sysroot flag is there so users can call
// `clippy_driver` directly
// without having to pass --sysroot or anything
let mut args: Vec<String> = orig_args.clone();
if !have_sys_root_arg {
args.extend(vec!["--sysroot".into(), sys_root]);
};
// this check ensures that dependencies are built but not linted and the final
// crate is linted but not built
let clippy_enabled = env::var("CLIPPY_TESTS").map_or(false, |val| val == "true")
|| arg_value(&orig_args, "--cap-lints", |val| val == "allow").is_none();
if clippy_enabled {
args.extend(vec!["--cfg".into(), r#"feature="cargo-clippy""#.into()]);
if let Ok(extra_args) = env::var("CLIPPY_ARGS") {
args.extend(extra_args.split("__CLIPPY_HACKERY__").filter_map(|s| {
if s.is_empty() {
None
} else {
Some(s.to_string())
}
}));
}
}
let mut clippy = ClippyCallbacks;
let mut default = DefaultCallbacks;
let callbacks: &mut (dyn rustc_driver::Callbacks + Send) =
if clippy_enabled { &mut clippy } else { &mut default };
rustc_driver::run_compiler(&args, callbacks, None, None, None)
}))
}
|
/*!
This module defines the various error types that can be produced by a failed conversion.
*/
use std::any::Any;
use std::error::Error;
use std::fmt::{self, Debug, Display};
use misc::{Saturated, InvalidSentinel, SignedInfinity};
/// A general error enumeration that subsumes all of the below.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub enum GeneralError {
/// Input underflowed the target type.
Underflow,
/// Input overflowed the target type.
Overflow,
/// Input was not representable in the target type.
Unrepresentable,
}
impl Display for GeneralError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", self.description())
}
}
impl Error for GeneralError {
fn description(&self) -> &str {
use self::GeneralError::*;
match *self {
Underflow => "conversion resulted in underflow",
Overflow => "conversion resulted in overflow",
Unrepresentable => "could not convert unrepresentable value",
}
}
}
impl From<NoError> for GeneralError {
fn from(_: NoError) -> GeneralError {
panic!("cannot convert NoError into GeneralError");
}
}
impl<T> From<Unrepresentable<T>> for GeneralError {
fn from(_: Unrepresentable<T>) -> GeneralError {
GeneralError::Unrepresentable
}
}
impl From<Underflow> for GeneralError {
fn from(_: Underflow) -> GeneralError {
GeneralError::Underflow
}
}
impl From<Overflow> for GeneralError {
fn from(_: Overflow) -> GeneralError {
GeneralError::Overflow
}
}
impl From<RangeError> for GeneralError {
fn from(e: RangeError) -> GeneralError {
use self::RangeError as R;
use self::GeneralError as G;
match e {
R::Underflow => G::Underflow,
R::Overflow => G::Overflow,
}
}
}
impl From<FloatError> for GeneralError {
fn from(e: FloatError) -> GeneralError {
use self::FloatError as F;
use self::GeneralError as G;
match e {
F::Underflow => G::Underflow,
F::Overflow => G::Overflow,
F::NotANumber => G::Unrepresentable,
}
}
}
/// Indicates that it is not possible for the conversion to fail.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub enum NoError {}
impl Display for NoError {
fn fmt(&self, _: &mut fmt::Formatter) -> Result<(), fmt::Error> {
unreachable!()
}
}
impl Error for NoError {
fn description(&self) -> &str {
unreachable!()
}
}
/// Indicates that the conversion failed because the value was not representable.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct Unrepresentable<T>(pub T);
impl<T> Display for Unrepresentable<T>
where T: Display {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "could not convert unrepresentable value: {}", self.0)
}
}
impl<T> Error for Unrepresentable<T>
where T: Debug + Display + Any {
fn description(&self) -> &str {
"could not convert unrepresentable value"
}
}
/// Indicates that the conversion failed due to an underflow.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct Underflow;
impl Display for Underflow {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", self.description())
}
}
impl Error for Underflow {
fn description(&self) -> &str {
"conversion resulted in underflow"
}
}
impl From<NoError> for Underflow {
fn from(_: NoError) -> Underflow {
panic!("cannot convert NoError into Underflow");
}
}
/// Indicates that the conversion failed due to an overflow.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct Overflow;
impl Display for Overflow {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", self.description())
}
}
impl Error for Overflow {
fn description(&self) -> &str {
"conversion resulted in overflow"
}
}
impl From<NoError> for Overflow {
fn from(_: NoError) -> Overflow {
panic!("cannot convert NoError into Overflow");
}
}
/**
Indicates that a conversion from a floating point type failed.
*/
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub enum FloatError {
/// Input underflowed the target type.
Underflow,
/// Input overflowed the target type.
Overflow,
/// Input was not-a-number, which the target type could not represent.
NotANumber,
}
impl Display for FloatError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", self.description())
}
}
impl Error for FloatError {
fn description(&self) -> &str {
use self::FloatError::*;
match *self {
Underflow => "conversion resulted in underflow",
Overflow => "conversion resulted in overflow",
NotANumber => "conversion target does not support not-a-number",
}
}
}
impl From<NoError> for FloatError {
fn from(_: NoError) -> FloatError {
panic!("cannot convert NoError into FloatError");
}
}
impl From<Underflow> for FloatError {
fn from(_: Underflow) -> FloatError {
FloatError::Underflow
}
}
impl From<Overflow> for FloatError {
fn from(_: Overflow) -> FloatError {
FloatError::Overflow
}
}
impl From<RangeError> for FloatError {
fn from(e: RangeError) -> FloatError {
use self::RangeError as R;
use self::FloatError as F;
match e {
R::Underflow => F::Underflow,
R::Overflow => F::Overflow,
}
}
}
/**
Indicates that a conversion failed due to a range error.
*/
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub enum RangeError {
/// Input underflowed the target type.
Underflow,
/// Input overflowed the target type.
Overflow,
}
impl Display for RangeError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", self.description())
}
}
impl Error for RangeError {
fn description(&self) -> &str {
use self::RangeError::*;
match *self {
Underflow => "conversion resulted in underflow",
Overflow => "conversion resulted in overflow",
}
}
}
impl From<NoError> for RangeError {
fn from(_: NoError) -> RangeError {
panic!("cannot convert NoError into RangeError");
}
}
impl From<Underflow> for RangeError {
fn from(_: Underflow) -> RangeError {
RangeError::Underflow
}
}
impl From<Overflow> for RangeError {
fn from(_: Overflow) -> RangeError {
RangeError::Overflow
}
}
/**
Safely unwrap a `Result` that cannot contain an error.
*/
pub trait UnwrapOk<T> {
/**
Unwraps a `Result` without possibility of failing.
Technically, this is not necessary; it's provided simply to make user code a little clearer.
*/
fn unwrap_ok(self) -> T;
}
impl<T> UnwrapOk<T> for Result<T, NoError> {
fn unwrap_ok(self) -> T {
match self {
Ok(v) => v,
Err(..) => loop {},
}
}
}
/**
Unwrap a conversion by saturating to infinity.
*/
pub trait UnwrapOrInf {
/// The result of unwrapping.
type Output;
/**
Either unwraps the successfully converted value, or saturates to infinity in the "direction" of overflow/underflow.
*/
fn unwrap_or_inf(self) -> Self::Output;
}
/**
Unwrap a conversion by replacing a failure with an invalid sentinel value.
*/
pub trait UnwrapOrInvalid {
/// The result of unwrapping.
type Output;
/**
Either unwraps the successfully converted value, or returns the output type's invalid sentinel value.
*/
fn unwrap_or_invalid(self) -> Self::Output;
}
/**
Unwrap a conversion by saturating.
*/
pub trait UnwrapOrSaturate {
/// The result of unwrapping.
type Output;
/**
Either unwraps the successfully converted value, or saturates in the "direction" of overflow/underflow.
*/
fn unwrap_or_saturate(self) -> Self::Output;
}
impl<T, E> UnwrapOrInf for Result<T, E>
where T: SignedInfinity, E: Into<RangeError> {
type Output = T;
fn unwrap_or_inf(self) -> T {
use self::RangeError::*;
match self.map_err(Into::into) {
Ok(v) => v,
Err(Underflow) => T::neg_infinity(),
Err(Overflow) => T::pos_infinity(),
}
}
}
impl<T, E> UnwrapOrInvalid for Result<T, E>
where T: InvalidSentinel {
type Output = T;
fn unwrap_or_invalid(self) -> T {
match self {
Ok(v) => v,
Err(..) => T::invalid_sentinel(),
}
}
}
impl<T, E> UnwrapOrSaturate for Result<T, E>
where T: Saturated, E: Into<RangeError> {
type Output = T;
fn unwrap_or_saturate(self) -> T {
use self::RangeError::*;
match self.map_err(Into::into) {
Ok(v) => v,
Err(Underflow) => T::saturated_min(),
Err(Overflow) => T::saturated_max(),
}
}
}
Fixed slightly dodgy GeneralError description.
/*!
This module defines the various error types that can be produced by a failed conversion.
*/
use std::any::Any;
use std::error::Error;
use std::fmt::{self, Debug, Display};
use misc::{Saturated, InvalidSentinel, SignedInfinity};
/**
A general error enumeration that subsumes all other conversion errors.
This exists primarily as a "catch-all" for reliably unifying various different kinds of conversion errors.
*/
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub enum GeneralError {
/// Input underflowed the target type.
Underflow,
/// Input overflowed the target type.
Overflow,
/// Input was not representable in the target type.
Unrepresentable,
}
impl Display for GeneralError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", self.description())
}
}
impl Error for GeneralError {
fn description(&self) -> &str {
use self::GeneralError::*;
match *self {
Underflow => "conversion resulted in underflow",
Overflow => "conversion resulted in overflow",
Unrepresentable => "could not convert unrepresentable value",
}
}
}
impl From<NoError> for GeneralError {
fn from(_: NoError) -> GeneralError {
panic!("cannot convert NoError into GeneralError");
}
}
impl<T> From<Unrepresentable<T>> for GeneralError {
fn from(_: Unrepresentable<T>) -> GeneralError {
GeneralError::Unrepresentable
}
}
impl From<Underflow> for GeneralError {
fn from(_: Underflow) -> GeneralError {
GeneralError::Underflow
}
}
impl From<Overflow> for GeneralError {
fn from(_: Overflow) -> GeneralError {
GeneralError::Overflow
}
}
impl From<RangeError> for GeneralError {
fn from(e: RangeError) -> GeneralError {
use self::RangeError as R;
use self::GeneralError as G;
match e {
R::Underflow => G::Underflow,
R::Overflow => G::Overflow,
}
}
}
impl From<FloatError> for GeneralError {
fn from(e: FloatError) -> GeneralError {
use self::FloatError as F;
use self::GeneralError as G;
match e {
F::Underflow => G::Underflow,
F::Overflow => G::Overflow,
F::NotANumber => G::Unrepresentable,
}
}
}
/// Indicates that it is not possible for the conversion to fail.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub enum NoError {}
impl Display for NoError {
fn fmt(&self, _: &mut fmt::Formatter) -> Result<(), fmt::Error> {
unreachable!()
}
}
impl Error for NoError {
fn description(&self) -> &str {
unreachable!()
}
}
/// Indicates that the conversion failed because the value was not representable.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct Unrepresentable<T>(pub T);
impl<T> Display for Unrepresentable<T>
where T: Display {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "could not convert unrepresentable value: {}", self.0)
}
}
impl<T> Error for Unrepresentable<T>
where T: Debug + Display + Any {
fn description(&self) -> &str {
"could not convert unrepresentable value"
}
}
/// Indicates that the conversion failed due to an underflow.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct Underflow;
impl Display for Underflow {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", self.description())
}
}
impl Error for Underflow {
fn description(&self) -> &str {
"conversion resulted in underflow"
}
}
impl From<NoError> for Underflow {
fn from(_: NoError) -> Underflow {
panic!("cannot convert NoError into Underflow");
}
}
/// Indicates that the conversion failed due to an overflow.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct Overflow;
impl Display for Overflow {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", self.description())
}
}
impl Error for Overflow {
fn description(&self) -> &str {
"conversion resulted in overflow"
}
}
impl From<NoError> for Overflow {
fn from(_: NoError) -> Overflow {
panic!("cannot convert NoError into Overflow");
}
}
/**
Indicates that a conversion from a floating point type failed.
*/
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub enum FloatError {
/// Input underflowed the target type.
Underflow,
/// Input overflowed the target type.
Overflow,
/// Input was not-a-number, which the target type could not represent.
NotANumber,
}
impl Display for FloatError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", self.description())
}
}
impl Error for FloatError {
fn description(&self) -> &str {
use self::FloatError::*;
match *self {
Underflow => "conversion resulted in underflow",
Overflow => "conversion resulted in overflow",
NotANumber => "conversion target does not support not-a-number",
}
}
}
impl From<NoError> for FloatError {
fn from(_: NoError) -> FloatError {
panic!("cannot convert NoError into FloatError");
}
}
impl From<Underflow> for FloatError {
fn from(_: Underflow) -> FloatError {
FloatError::Underflow
}
}
impl From<Overflow> for FloatError {
fn from(_: Overflow) -> FloatError {
FloatError::Overflow
}
}
impl From<RangeError> for FloatError {
fn from(e: RangeError) -> FloatError {
use self::RangeError as R;
use self::FloatError as F;
match e {
R::Underflow => F::Underflow,
R::Overflow => F::Overflow,
}
}
}
/**
Indicates that a conversion failed due to a range error.
*/
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub enum RangeError {
/// Input underflowed the target type.
Underflow,
/// Input overflowed the target type.
Overflow,
}
impl Display for RangeError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", self.description())
}
}
impl Error for RangeError {
fn description(&self) -> &str {
use self::RangeError::*;
match *self {
Underflow => "conversion resulted in underflow",
Overflow => "conversion resulted in overflow",
}
}
}
impl From<NoError> for RangeError {
fn from(_: NoError) -> RangeError {
panic!("cannot convert NoError into RangeError");
}
}
impl From<Underflow> for RangeError {
fn from(_: Underflow) -> RangeError {
RangeError::Underflow
}
}
impl From<Overflow> for RangeError {
fn from(_: Overflow) -> RangeError {
RangeError::Overflow
}
}
/**
Safely unwrap a `Result` that cannot contain an error.
*/
pub trait UnwrapOk<T> {
/**
Unwraps a `Result` without possibility of failing.
Technically, this is not necessary; it's provided simply to make user code a little clearer.
*/
fn unwrap_ok(self) -> T;
}
impl<T> UnwrapOk<T> for Result<T, NoError> {
fn unwrap_ok(self) -> T {
match self {
Ok(v) => v,
Err(..) => loop {},
}
}
}
/**
Unwrap a conversion by saturating to infinity.
*/
pub trait UnwrapOrInf {
/// The result of unwrapping.
type Output;
/**
Either unwraps the successfully converted value, or saturates to infinity in the "direction" of overflow/underflow.
*/
fn unwrap_or_inf(self) -> Self::Output;
}
/**
Unwrap a conversion by replacing a failure with an invalid sentinel value.
*/
pub trait UnwrapOrInvalid {
/// The result of unwrapping.
type Output;
/**
Either unwraps the successfully converted value, or returns the output type's invalid sentinel value.
*/
fn unwrap_or_invalid(self) -> Self::Output;
}
/**
Unwrap a conversion by saturating.
*/
pub trait UnwrapOrSaturate {
/// The result of unwrapping.
type Output;
/**
Either unwraps the successfully converted value, or saturates in the "direction" of overflow/underflow.
*/
fn unwrap_or_saturate(self) -> Self::Output;
}
impl<T, E> UnwrapOrInf for Result<T, E>
where T: SignedInfinity, E: Into<RangeError> {
type Output = T;
fn unwrap_or_inf(self) -> T {
use self::RangeError::*;
match self.map_err(Into::into) {
Ok(v) => v,
Err(Underflow) => T::neg_infinity(),
Err(Overflow) => T::pos_infinity(),
}
}
}
impl<T, E> UnwrapOrInvalid for Result<T, E>
where T: InvalidSentinel {
type Output = T;
fn unwrap_or_invalid(self) -> T {
match self {
Ok(v) => v,
Err(..) => T::invalid_sentinel(),
}
}
}
impl<T, E> UnwrapOrSaturate for Result<T, E>
where T: Saturated, E: Into<RangeError> {
type Output = T;
fn unwrap_or_saturate(self) -> T {
use self::RangeError::*;
match self.map_err(Into::into) {
Ok(v) => v,
Err(Underflow) => T::saturated_min(),
Err(Overflow) => T::saturated_max(),
}
}
}
|
/// Exports a given list of functions to a Erlang module.
///
/// This should be called exactly once in every NIF library. It will wrap and export the given rust
/// functions into the Erlang module.
///
/// The first argument is a string specifying what Erlang/Elixir module you want the function
/// exported into. In Erlang this will simply be the atom you named your module. In Elixir, all
/// modules are prefixed with `Elixir.<module path>`
///
/// The second argument is a list of 3-tuples. Each tuple contains information on a single exported
/// NIF function. The first tuple item is the name you want to export the function into, the second
/// is the arity (number of arguments) of the exported function. The third argument is a
/// indentifier of a rust function. This is where your actual NIF will be implemented.
///
/// The third argument is an `Option<fn(env: &NifEnv, load_info: NifTerm) -> bool>`. If this is
/// `Some`, the function will execute when the NIF is first loaded by the BEAM.
#[macro_export]
macro_rules! rustler_export_nifs {
($name:expr, [$( ($nif_name:expr, $nif_arity:expr, $nif_fun:path) ),*], $on_load:expr) => (
rustler_export_nifs!($name, [$( ($nif_name, $nif_arity, $nif_fun, rustler::wrapper::nif_interface::ErlNifTaskFlags::ERL_NIF_NORMAL_JOB) ),*], $on_load);
);
($name:expr, [$( ($nif_name:expr, $nif_arity:expr, $nif_fun:path, $nif_flag:expr) ),*], $on_load:expr) => (
static mut NIF_ENTRY: Option<rustler::wrapper::nif_interface::DEF_NIF_ENTRY> = None;
#[no_mangle]
pub extern "C" fn nif_init() -> *const rustler::wrapper::nif_interface::DEF_NIF_ENTRY {
// TODO: If a NIF ever gets unloaded, there will be a memory leak! Fix this!
// TODO: If an unwrap ever happens, we will unwind right into C! Fix this!
extern "C" fn nif_load(
env: rustler::wrapper::nif_interface::NIF_ENV,
_priv_data: *mut *mut rustler::codegen_runtime::c_void,
load_info: rustler::wrapper::nif_interface::NIF_TERM)
-> rustler::codegen_runtime::c_int {
rustler::codegen_runtime::handle_nif_init_call($on_load, env, load_info)
}
let fun_entries = [
$(
rustler::wrapper::nif_interface::DEF_NIF_FUNC {
name: ::std::ffi::CString::new($nif_name).unwrap().into_raw() as *const u8,
arity: $nif_arity,
function: {
extern "C" fn nif_func(
env: rustler::wrapper::nif_interface::NIF_ENV,
argc: rustler::codegen_runtime::c_int,
argv: *const rustler::wrapper::nif_interface::NIF_TERM)
-> rustler::wrapper::nif_interface::NIF_TERM {
rustler::codegen_runtime::handle_nif_call($nif_fun, $nif_arity, env, argc, argv)
}
nif_func
},
flags: $nif_flag as u32,
}
),*
];
let fun_entries_len = fun_entries.len();
let fun_entries_ptr = Box::into_raw(Box::new(fun_entries));
let entry = rustler::wrapper::nif_interface::DEF_NIF_ENTRY {
major: rustler::wrapper::nif_interface::NIF_MAJOR_VERSION,
minor: rustler::wrapper::nif_interface::NIF_MINOR_VERSION,
name: ::std::ffi::CString::new($name).unwrap().into_raw() as *const u8,
num_of_funcs: fun_entries_len as rustler::codegen_runtime::c_int,
funcs: fun_entries_ptr as *const rustler::wrapper::nif_interface::DEF_NIF_FUNC,
load: Some(nif_load),
reload: None,
upgrade: None,
unload: None,
vm_variant: b"beam.vanilla\x00" as *const u8,
options: 0,
};
unsafe { NIF_ENTRY = Some(entry) };
unsafe { NIF_ENTRY.as_ref().unwrap() }
}
);
}
fix scheduling flag in rustler_export_nifs! macro
/// Exports a given list of functions to a Erlang module.
///
/// This should be called exactly once in every NIF library. It will wrap and export the given rust
/// functions into the Erlang module.
///
/// The first argument is a string specifying what Erlang/Elixir module you want the function
/// exported into. In Erlang this will simply be the atom you named your module. In Elixir, all
/// modules are prefixed with `Elixir.<module path>`
///
/// The second argument is a list of 3-tuples. Each tuple contains information on a single exported
/// NIF function. The first tuple item is the name you want to export the function into, the second
/// is the arity (number of arguments) of the exported function. The third argument is a
/// indentifier of a rust function. This is where your actual NIF will be implemented.
///
/// The third argument is an `Option<fn(env: &NifEnv, load_info: NifTerm) -> bool>`. If this is
/// `Some`, the function will execute when the NIF is first loaded by the BEAM.
#[macro_export]
macro_rules! rustler_export_nifs {
($name:expr, [$( $exported_nif:tt ),*], $on_load:expr) => {
static mut NIF_ENTRY: Option<rustler::wrapper::nif_interface::DEF_NIF_ENTRY> = None;
#[no_mangle]
pub extern "C" fn nif_init() -> *const rustler::wrapper::nif_interface::DEF_NIF_ENTRY {
// TODO: If a NIF ever gets unloaded, there will be a memory leak! Fix this!
// TODO: If an unwrap ever happens, we will unwind right into C! Fix this!
extern "C" fn nif_load(
env: rustler::wrapper::nif_interface::NIF_ENV,
_priv_data: *mut *mut rustler::codegen_runtime::c_void,
load_info: rustler::wrapper::nif_interface::NIF_TERM)
-> rustler::codegen_runtime::c_int {
rustler::codegen_runtime::handle_nif_init_call($on_load, env, load_info)
}
let fun_entries = [
$(rustler_export_nifs!(internal, $exported_nif)),*
];
let fun_entries_len = fun_entries.len();
let fun_entries_ptr = Box::into_raw(Box::new(fun_entries));
let entry = rustler::wrapper::nif_interface::DEF_NIF_ENTRY {
major: rustler::wrapper::nif_interface::NIF_MAJOR_VERSION,
minor: rustler::wrapper::nif_interface::NIF_MINOR_VERSION,
name: ::std::ffi::CString::new($name).unwrap().into_raw() as *const u8,
num_of_funcs: fun_entries_len as rustler::codegen_runtime::c_int,
funcs: fun_entries_ptr as *const rustler::wrapper::nif_interface::DEF_NIF_FUNC,
load: Some(nif_load),
reload: None,
upgrade: None,
unload: None,
vm_variant: b"beam.vanilla\x00" as *const u8,
options: 0,
};
unsafe { NIF_ENTRY = Some(entry) };
unsafe { NIF_ENTRY.as_ref().unwrap() }
}
};
(internal, ($nif_name:expr, $nif_arity:expr, $nif_fun:path)) => {
rustler_export_nifs!(internal, ($nif_name, $nif_arity, $nif_fun, rustler::wrapper::nif_interface::ErlNifTaskFlags::ERL_NIF_NORMAL_JOB))
};
(internal, ($nif_name:expr, $nif_arity:expr, $nif_fun:path, $nif_flag:expr)) => {
rustler::wrapper::nif_interface::DEF_NIF_FUNC {
name: ::std::ffi::CString::new($nif_name).unwrap().into_raw() as *const u8,
arity: $nif_arity,
function: {
extern "C" fn nif_func(
env: rustler::wrapper::nif_interface::NIF_ENV,
argc: rustler::codegen_runtime::c_int,
argv: *const rustler::wrapper::nif_interface::NIF_TERM)
-> rustler::wrapper::nif_interface::NIF_TERM {
rustler::codegen_runtime::handle_nif_call($nif_fun, $nif_arity, env, argc, argv)
}
nif_func
},
flags: $nif_flag as u32,
}
};
}
|
use std;
use concurrent_help::to_concurrent_on_section;
use num_cpus;
// macro_rules! if_any_divides {
// ( $( $div:tt ),* | $n:ident $if_b:block ) => { if $(($n % $div == 0))|* $if_b};
// }
// macro_rules! if_sm_prime_divides{
// ( $n:ident $if_b:block ) => {if_any_divides!(
// 2, 3, 5, 7, 11, 13,
// 17, 19, 23, 29, 31,
// 37, 41, 43, 47, 53,
// 59, 61, 67, 71, 73,
// 79, 83, 89, 97, 101,
// 103, 107, 109, 113, 127,
// 131, 137, 139, 149, 151,
// 157, 163, 167, 173, 179,
// 181, 191, 193, 197, 199 | $n $if_b)};
// }
macro_rules! set_true_if_in_range{
( $( $i:tt ),* => $filter:ident + $offset:expr, $min:expr, $max:expr) => {
$(if ($min <= $i) & ($max > $i) {$filter[$i-$offset] = true;})*
};
( $( $i:tt ),* => $filter:ident, $min:expr, $max:expr) => {
$(if ($min <= $i) & ($max > $i) {$filter[$i] = true;})*
};
}
// macro_rules! set_sm_primes_true {
// ($filter:expr, $min:expr, $max:expr) => {
// set_true_if_in_range!(
// 2, 3, 5, 7, 11, 13,
// 17, 19, 23, 29, 31,
// 37, 41, 43, 47, 53,
// 59, 61, 67, 71, 73,
// 79, 83, 89, 97, 101,
// 103, 107, 109, 113, 127,
// 131, 137, 139, 149, 151,
// 157, 163, 167, 173, 179,
// 181, 191, 193, 197, 199 => $filter + $min, $min, $max)
// };
// }
pub fn prime_filter_concurrently(max_num: usize, threads: usize) -> Vec<bool>{
prime_filter_section_concurrently(0, max_num, threads)
}
pub fn prime_filter_section_concurrently(min_num:usize, max_num: usize, threads: usize) -> Vec<bool>{
to_concurrent_on_section(prime_filter_section_sequentially, min_num, max_num, threads)
}
fn int_sqrt(n:usize) -> usize{
if n < (1 << 53) {
return (n as f64).sqrt() as usize;
}
match n {
0 => 0,
1 ... 3 => 1,
4 ... 8 => 2,
9 ... 15 => 3,
k => {
let mut x = k;
loop{
x = match (x + n/x) >> 1 {
new_x if new_x == x => break,
new_x if new_x*new_x == n + 1 => {x = new_x - 1; break},
new_x => new_x,
};
}
x
},
}
}
fn ceil_sqrt(n:usize) -> usize{
match int_sqrt(n){
sqrt if sqrt*sqrt == n => sqrt,
sqrt => sqrt+1,
}
}
pub fn prime_filter(max_num: usize) -> Vec<bool>{
prime_filter_concurrently(max_num, num_cpus::get())
}
pub fn prime_filter_section(min_num:usize, max_num: usize) -> Vec<bool>{
prime_filter_section_concurrently(min_num, max_num, num_cpus::get())
}
pub fn prime_filter_sequentially(max_num: usize) -> Vec<bool>{
if max_num<100{
slow_prime_filter(max_num)
}else{
prime_filter_section(0, max_num)
}
}
pub fn prime_filter_section_sequentially(min_num:usize, max_num: usize) -> Vec<bool>{
//Sieve of Atkin
assert!(min_num<max_num);
let mut prime_filter = vec![false; max_num-min_num];
set_true_if_in_range!(2, 3, 5 => prime_filter + min_num, min_num, max_num);
//Macro equivilent:
// if (min_num <= 2) & (max_num > 2) {prime_filter[2-min_num] = true;}
// if (min_num <= 3) & (max_num > 3) {prime_filter[3-min_num] = true;}
// if (min_num <= 5) & (max_num > 5) {prime_filter[5-min_num] = true;}
let (mut y_sq, mut to_next_y_sq) = (1, 3);
while y_sq<max_num {
if y_sq%2 == 1 {
//CASE 1
//n_1 = 4x^2 + y^2 === 1 (mod 4)
let (mut n_1, mut to_next_n_1) = match y_sq < min_num {
false => (y_sq+4, 12),
_ => {
let min_num_x = (ceil_sqrt(min_num - y_sq) +1)/2;
(4*min_num_x*min_num_x + y_sq, 8*min_num_x + 4)
},
};
loop{
match n_1{
n if n >= max_num => break,
n => {match n%60{
1 | 13 | 17 | 29 | 37 | 41 | 49 | 53 => prime_filter[n-min_num] ^= true,
_ => (),
};},
};
n_1 += to_next_n_1;
to_next_n_1 += 8;
};
};
if y_sq%3 == 1 {
//CASE 2
//n_2 = 3x^2 + y^2 === 1 (mod 6)
let (mut n_2, mut to_next_n_2) = match y_sq < min_num {
false => (y_sq+3, 9),
_ => {
let min_num_x = (ceil_sqrt((min_num - y_sq)*3)+2)/3;
(3*min_num_x*min_num_x + y_sq, 6*min_num_x + 3)
},
};
loop {
match n_2{
n if n >= max_num => break,
n => {match n%60{
7 | 19 | 31 | 43 => prime_filter[n-min_num] ^= true,
_ => (),
};}
};
n_2 += to_next_n_2;
to_next_n_2 += 6;
};
//CASE 3
//n_3 = 3x^2 - y^2 === 11 (mod 12)
//Initially, we set x = y+1 -> n_3 = 3(y+1)^2 - y^2 = 2*y^2 + 6*y + 3
//Amd then hop x by 2 each iteration.
let (mut n_3, mut to_next_n_3) = match (y_sq << 1) < min_num {
false => (2*y_sq+3*to_next_y_sq, 6*to_next_y_sq+18),
_ => {
let min_num_x = match (ceil_sqrt((min_num + y_sq)*3) +2)/3{
mx if (mx+y_sq)%2 == 0 => mx + 1,
mx => mx,
};
(3*min_num_x*min_num_x - y_sq, 12*min_num_x + 12)
},
};
loop {
match n_3{
n if n >= max_num => break,
n => {match n%60{
11 | 23 | 47 | 59 => prime_filter[n-min_num] ^= true,
_ => (),
};},
};
n_3 += to_next_n_3;
to_next_n_3 += 24;
};
};
while{ //Do-while
y_sq += to_next_y_sq;
to_next_y_sq += 2;
y_sq%6 == 0
} {};
};
//Elimin_numate non-squarefree numbers
let mut n_sq = 49; // 7^2
let mut next_n_sq = 32; //9^2 - 7^2, skip even numbers.
while n_sq < max_num {
let mut non_sq_free = n_sq;
while non_sq_free < max_num {
if non_sq_free >= min_num {
prime_filter[non_sq_free - min_num] = false;
}
while{ //Do-while
non_sq_free += n_sq + n_sq;
(non_sq_free%3==0) | (non_sq_free%5==0)
} {};
};
while{ //Do-while
n_sq += next_n_sq;
next_n_sq += 8;
(n_sq%3==0) | (n_sq%5 == 0)
} {};
}
prime_filter
}
#[cfg(test)]
pub fn old_prime_filter(max_num: usize) -> std::vec::Vec<bool>{
slow_prime_filter(max_num)
}
#[test]
fn private_filter_test(){
assert_eq!(5, ceil_sqrt(24));
assert_eq!(2, int_sqrt(4));
assert_eq!(4, int_sqrt(24));
assert_eq!(10, int_sqrt(101));
assert_eq!(1, int_sqrt(1));
assert_eq!(10, int_sqrt(100));
assert_eq!(3, int_sqrt(13));
}
fn slow_prime_filter(max_num: usize) -> std::vec::Vec<bool>{
if max_num < 5 {
let mut ret = vec![false, false, true, true];
ret.truncate(max_num);
return ret
}
let mut prime_filter = vec![true; max_num];
prime_filter[0] = false;
prime_filter[1] = false;
let mut cur_num = 2;
'outer: loop{
for i in (cur_num+1)..max_num{
if 0 == i%cur_num { prime_filter[i] = false; }
}
cur_num += 1;
while !prime_filter[cur_num]{
if cur_num*cur_num > max_num {
break 'outer
}
cur_num += 1;
}
};
prime_filter
}
Code speedup...
Skip low numbers for squareful check
use std;
use concurrent_help::to_concurrent_on_section;
use num_cpus;
// macro_rules! if_any_divides {
// ( $( $div:tt ),* | $n:ident $if_b:block ) => { if $(($n % $div == 0))|* $if_b};
// }
// macro_rules! if_sm_prime_divides{
// ( $n:ident $if_b:block ) => {if_any_divides!(
// 2, 3, 5, 7, 11, 13,
// 17, 19, 23, 29, 31,
// 37, 41, 43, 47, 53,
// 59, 61, 67, 71, 73,
// 79, 83, 89, 97, 101,
// 103, 107, 109, 113, 127,
// 131, 137, 139, 149, 151,
// 157, 163, 167, 173, 179,
// 181, 191, 193, 197, 199 | $n $if_b)};
// }
macro_rules! set_true_if_in_range{
( $( $i:tt ),* => $filter:ident + $offset:expr, $min:expr, $max:expr) => {
$(if ($min <= $i) & ($max > $i) {$filter[$i-$offset] = true;})*
};
( $( $i:tt ),* => $filter:ident, $min:expr, $max:expr) => {
$(if ($min <= $i) & ($max > $i) {$filter[$i] = true;})*
};
}
// macro_rules! set_sm_primes_true {
// ($filter:expr, $min:expr, $max:expr) => {
// set_true_if_in_range!(
// 2, 3, 5, 7, 11, 13,
// 17, 19, 23, 29, 31,
// 37, 41, 43, 47, 53,
// 59, 61, 67, 71, 73,
// 79, 83, 89, 97, 101,
// 103, 107, 109, 113, 127,
// 131, 137, 139, 149, 151,
// 157, 163, 167, 173, 179,
// 181, 191, 193, 197, 199 => $filter + $min, $min, $max)
// };
// }
pub fn prime_filter_concurrently(max_num: usize, threads: usize) -> Vec<bool>{
prime_filter_section_concurrently(0, max_num, threads)
}
pub fn prime_filter_section_concurrently(min_num:usize, max_num: usize, threads: usize) -> Vec<bool>{
to_concurrent_on_section(prime_filter_section_sequentially, min_num, max_num, threads)
}
fn int_sqrt(n:usize) -> usize{
if n < (1 << 53) {
return (n as f64).sqrt() as usize;
}
match n {
0 => 0,
1 ... 3 => 1,
4 ... 8 => 2,
9 ... 15 => 3,
k => {
let mut x = k;
loop{
x = match (x + n/x) >> 1 {
new_x if new_x == x => break,
new_x if new_x*new_x == n + 1 => {x = new_x - 1; break},
new_x => new_x,
};
}
x
},
}
}
fn ceil_sqrt(n:usize) -> usize{
if n == 0{
return 0;
}
match int_sqrt(n){
sqrt if sqrt*sqrt == n => sqrt,
sqrt => sqrt+1,
}
}
pub fn prime_filter(max_num: usize) -> Vec<bool>{
prime_filter_concurrently(max_num, num_cpus::get())
}
pub fn prime_filter_section(min_num:usize, max_num: usize) -> Vec<bool>{
prime_filter_section_concurrently(min_num, max_num, num_cpus::get())
}
pub fn prime_filter_sequentially(max_num: usize) -> Vec<bool>{
if max_num<100{
slow_prime_filter(max_num)
}else{
prime_filter_section(0, max_num)
}
}
pub fn prime_filter_section_sequentially(min_num:usize, max_num: usize) -> Vec<bool>{
//Sieve of Atkin
assert!(min_num<max_num);
let mut prime_filter = vec![false; max_num-min_num];
set_true_if_in_range!(2, 3, 5 => prime_filter + min_num, min_num, max_num);
//Macro equivilent:
// if (min_num <= 2) & (max_num > 2) {prime_filter[2-min_num] = true;}
// if (min_num <= 3) & (max_num > 3) {prime_filter[3-min_num] = true;}
// if (min_num <= 5) & (max_num > 5) {prime_filter[5-min_num] = true;}
let (mut y_sq, mut to_next_y_sq) = (1, 3);
while y_sq<max_num {
if y_sq%2 == 1 {
//CASE 1
//n_1 = 4x^2 + y^2 === 1 (mod 4)
let (mut n_1, mut to_next_n_1) = match y_sq < min_num {
false => (y_sq+4, 12),
_ => {
let min_num_x = (ceil_sqrt(min_num - y_sq) +1)/2;
(4*min_num_x*min_num_x + y_sq, 8*min_num_x + 4)
},
};
loop{
match n_1{
n if n >= max_num => break,
n => {match n%60{
1 | 13 | 17 | 29 | 37 | 41 | 49 | 53 => prime_filter[n-min_num] ^= true,
_ => (),
};},
};
n_1 += to_next_n_1;
to_next_n_1 += 8;
};
};
if y_sq%3 == 1 {
//CASE 2
//n_2 = 3x^2 + y^2 === 1 (mod 6)
let (mut n_2, mut to_next_n_2) = match y_sq < min_num {
false => (y_sq+3, 9),
_ => {
let min_num_x = (ceil_sqrt((min_num - y_sq)*3)+2)/3;
(3*min_num_x*min_num_x + y_sq, 6*min_num_x + 3)
},
};
loop {
match n_2{
n if n >= max_num => break,
n => {match n%60{
7 | 19 | 31 | 43 => prime_filter[n-min_num] ^= true,
_ => (),
};}
};
n_2 += to_next_n_2;
to_next_n_2 += 6;
};
//CASE 3
//n_3 = 3x^2 - y^2 === 11 (mod 12)
//Initially, we set x = y+1 -> n_3 = 3(y+1)^2 - y^2 = 2*y^2 + 6*y + 3
//Amd then hop x by 2 each iteration.
let (mut n_3, mut to_next_n_3) = match (y_sq << 1) < min_num {
false => (2*y_sq+3*to_next_y_sq, 6*to_next_y_sq+18),
_ => {
let min_num_x = match (ceil_sqrt((min_num + y_sq)*3) +2)/3{
mx if (mx+y_sq)%2 == 0 => mx + 1,
mx => mx,
};
(3*min_num_x*min_num_x - y_sq, 12*min_num_x + 12)
},
};
loop {
match n_3{
n if n >= max_num => break,
n => {match n%60{
11 | 23 | 47 | 59 => prime_filter[n-min_num] ^= true,
_ => (),
};},
};
n_3 += to_next_n_3;
to_next_n_3 += 24;
};
};
while{ //Do-while
y_sq += to_next_y_sq;
to_next_y_sq += 2;
y_sq%6 == 0
} {};
};
//Eliminate non-squarefree numbers
let mut n_sq = 49; // 7^2
let mut next_n_sq = 32; //9^2 - 7^2, skip even numbers.
while n_sq < max_num {
let mut non_sq_free = n_sq * match min_num/n_sq{
k if k%2 == 1 => k,
k => k + 1,
};
while non_sq_free < max_num {
if non_sq_free >= min_num {
prime_filter[non_sq_free - min_num] = false;
}
while{ //Do-while
non_sq_free += n_sq + n_sq;
(non_sq_free%3==0) | (non_sq_free%5==0)
} {};
};
while{ //Do-while
n_sq += next_n_sq;
next_n_sq += 8;
(n_sq%3==0) | (n_sq%5 == 0)
} {};
}
prime_filter
}
#[cfg(test)]
pub fn old_prime_filter(max_num: usize) -> std::vec::Vec<bool>{
slow_prime_filter(max_num)
}
#[test]
fn private_filter_test(){
assert_eq!(5, ceil_sqrt(24));
assert_eq!(2, int_sqrt(4));
assert_eq!(4, int_sqrt(24));
assert_eq!(10, int_sqrt(101));
assert_eq!(1, int_sqrt(1));
assert_eq!(10, int_sqrt(100));
assert_eq!(3, int_sqrt(13));
}
fn slow_prime_filter(max_num: usize) -> std::vec::Vec<bool>{
if max_num < 5 {
let mut ret = vec![false, false, true, true];
ret.truncate(max_num);
return ret
}
let mut prime_filter = vec![true; max_num];
prime_filter[0] = false;
prime_filter[1] = false;
let mut cur_num = 2;
'outer: loop{
for i in (cur_num+1)..max_num{
if 0 == i%cur_num { prime_filter[i] = false; }
}
cur_num += 1;
while !prime_filter[cur_num]{
if cur_num*cur_num > max_num {
break 'outer
}
cur_num += 1;
}
};
prime_filter
}
|
use std;
use std::thread;
use std::sync::mpsc;
pub fn prime_filter(iter_size: usize) -> Vec<bool>{
if iter_size<100{
slow_prime_filter(iter_size)
}else{
prime_filter_section(0, iter_size)
}
}
fn case_1(y_sq: usize, iter_size: usize)
-> (usize, Vec<bool>) {
//n_1 = 4x^2 + y^2 === 1 (mod 4)
let (mut n_1, mut to_next_n_1) = (y_sq, 4);
let offset = n_1;
let mut temp_filter = vec![false; iter_size-offset];
loop{
n_1 += to_next_n_1;
to_next_n_1 += 8;
match n_1%60{
_ if n_1 >= iter_size => break,
1 | 13 | 17 | 29 | 37 | 41 | 49 | 53 => (),
_ => continue,
};
// println!("1: {}", n_1);
temp_filter[n_1-offset] ^= true;
};
(offset, temp_filter)
}
fn case_2(y_sq: usize, iter_size: usize)
-> (usize, Vec<bool>) {
//n_2 = 3x^2 + y^2 === 1 (mod 6)
let (mut n_2, mut to_next_n_2) = (y_sq, 3);
let offset = n_2.clone();
let mut temp_filter = vec![false; iter_size-offset];
loop {
n_2 += to_next_n_2;
to_next_n_2 += 6;
match n_2%60{
_ if n_2 >= iter_size => break,
7 | 19 | 31 | 43 => (),
_ => continue,
};
// println!("2: {}", n_2);
temp_filter[n_2 - offset] ^= true;
};
(offset, temp_filter)
}
fn case_3(y_sq: usize, to_next_y_sq: usize, iter_size: usize)
-> (usize, Vec<bool>) {
//n_3 = 3x^2 - y^2 === 11 (mod 12)
let (mut n_3, mut to_next_n_3) = (2*y_sq, 3*to_next_y_sq);
let offset = n_3;
let mut temp_filter = vec![false; iter_size - offset];
loop {
n_3 += to_next_n_3;
to_next_n_3 += 6;
match n_3%60{
_ if n_3 >= iter_size => break,
11 | 23 | 47 | 59 => (),
_ => continue,
};
// println!("3: {}", n_3);
temp_filter[n_3 - offset] ^= true;
};
(offset, temp_filter)
}
fn prime_filter_section(min:usize, max: usize) -> Vec<bool>{
//Sieve of Atkin
assert!(min<max);
let mut prime_filter = vec![false; max-min];
if (min <= 2) & (max > 2) {prime_filter[2-min] = true;}
if (min <= 3) & (max > 3) {prime_filter[3-min] = true;}
if (min <= 5) & (max > 5) {prime_filter[5-min] = true;}
let (mut y_sq, mut to_next_y_sq) = match min {
0|1 => (1, 3),
_ => (min*min, 2*min + 1),
};
let mut spawned_threads = 0;
let (tx, rx) = mpsc::channel();
while y_sq<max {
if y_sq%2 == 1 {
spawned_threads += 1;
let (y_sq, tx) = (y_sq, tx.clone());
thread::spawn(move || {
tx.send(case_1(y_sq, max)).unwrap();
});
};
if y_sq%3 == 1 {
spawned_threads += 1;
let (y_sq, to_next_y_sq, tx1) = (y_sq, to_next_y_sq, tx.clone());
thread::spawn(move || {
tx1.send(case_2(y_sq, max)).unwrap();
});
if y_sq*2<iter_size{
spawned_threads += 1;
let tx2 = tx.clone();
thread::spawn(move || {
tx2.send(case_3(y_sq, to_next_y_sq, max)).unwrap();
});
}
};
if spawned_threads>0{
for mes in rx.try_iter(){
spawned_threads -= 1;
let (offset, temp_filter) = mes;
for flip_i in temp_filter.into_iter().enumerate()
.filter_map(|x| match x {
(i, true) => Some(i),
_ => None,
}){
prime_filter[offset + flip_i - min] ^= true;
}
}
}
while{ //Do-while
y_sq += to_next_y_sq;
to_next_y_sq += 2;
y_sq%6 == 0
} {};
};
while spawned_threads>0{
for mes in rx.try_iter(){
spawned_threads -= 1;
let (offset, temp_filter) = mes;
for flip_i in temp_filter.into_iter().enumerate()
.filter_map(|x| match x {
(i, true) => Some(i),
_ => None,
}){
prime_filter[offset + flip_i - min] ^= true;
}
}
}
//Eliminate non-squarefree numbers
let mut n_sq = 49; // 7^2
let mut next_n_sq = 32; //9^2 - 7^2, skip even numbers.
while n_sq < iter_size {
let mut non_sq_free = n_sq;
while non_sq_free < max {
if non_sq_free >= min {
prime_filter[non_sq_free - min] = false;
}
while{ //Do-while
non_sq_free += n_sq + n_sq;
(non_sq_free%3==0) | (non_sq_free%5==0)
} {};
};
while{ //Do-while
non_sq_free += n_sq + n_sq;
(non_sq_free%3==0) | (non_sq_free%5==0)
} {};
};
while{ //Do-while
n_sq += next_n_sq;
next_n_sq += 8;
(n_sq%3==0) | (n_sq%5 == 0)
} {};
prime_filter
}
#[cfg(test)]
pub fn old_prime_filter(iter_size: usize) -> std::vec::Vec<bool>{
slow_prime_filter(iter_size)
}
fn slow_prime_filter(iter_size: usize) -> std::vec::Vec<bool>{
if iter_size < 5 {
let mut ret = vec![false, false, true, true];
ret.truncate(iter_size);
return ret
}
let mut prime_filter = vec![true; iter_size];
prime_filter[0] = false;
prime_filter[1] = false;
let mut cur_num = 2;
'outer: loop{
for i in (cur_num+1)..iter_size{
if 0 == i%cur_num { prime_filter[i] = false; }
}
cur_num += 1;
while !prime_filter[cur_num]{
if cur_num*cur_num > iter_size {
break 'outer
}
cur_num += 1;
}
};
prime_filter
}
name resolution fix
iter_size -> max
use std;
use std::thread;
use std::sync::mpsc;
pub fn prime_filter(iter_size: usize) -> Vec<bool>{
if iter_size<100{
slow_prime_filter(iter_size)
}else{
prime_filter_section(0, iter_size)
}
}
fn case_1(y_sq: usize, iter_size: usize)
-> (usize, Vec<bool>) {
//n_1 = 4x^2 + y^2 === 1 (mod 4)
let (mut n_1, mut to_next_n_1) = (y_sq, 4);
let offset = n_1;
let mut temp_filter = vec![false; iter_size-offset];
loop{
n_1 += to_next_n_1;
to_next_n_1 += 8;
match n_1%60{
_ if n_1 >= iter_size => break,
1 | 13 | 17 | 29 | 37 | 41 | 49 | 53 => (),
_ => continue,
};
// println!("1: {}", n_1);
temp_filter[n_1-offset] ^= true;
};
(offset, temp_filter)
}
fn case_2(y_sq: usize, iter_size: usize)
-> (usize, Vec<bool>) {
//n_2 = 3x^2 + y^2 === 1 (mod 6)
let (mut n_2, mut to_next_n_2) = (y_sq, 3);
let offset = n_2.clone();
let mut temp_filter = vec![false; iter_size-offset];
loop {
n_2 += to_next_n_2;
to_next_n_2 += 6;
match n_2%60{
_ if n_2 >= iter_size => break,
7 | 19 | 31 | 43 => (),
_ => continue,
};
// println!("2: {}", n_2);
temp_filter[n_2 - offset] ^= true;
};
(offset, temp_filter)
}
fn case_3(y_sq: usize, to_next_y_sq: usize, iter_size: usize)
-> (usize, Vec<bool>) {
//n_3 = 3x^2 - y^2 === 11 (mod 12)
let (mut n_3, mut to_next_n_3) = (2*y_sq, 3*to_next_y_sq);
let offset = n_3;
let mut temp_filter = vec![false; iter_size - offset];
loop {
n_3 += to_next_n_3;
to_next_n_3 += 6;
match n_3%60{
_ if n_3 >= iter_size => break,
11 | 23 | 47 | 59 => (),
_ => continue,
};
// println!("3: {}", n_3);
temp_filter[n_3 - offset] ^= true;
};
(offset, temp_filter)
}
fn prime_filter_section(min:usize, max: usize) -> Vec<bool>{
//Sieve of Atkin
assert!(min<max);
let mut prime_filter = vec![false; max-min];
if (min <= 2) & (max > 2) {prime_filter[2-min] = true;}
if (min <= 3) & (max > 3) {prime_filter[3-min] = true;}
if (min <= 5) & (max > 5) {prime_filter[5-min] = true;}
let (mut y_sq, mut to_next_y_sq) = match min {
0|1 => (1, 3),
_ => (min*min, 2*min + 1),
};
let mut spawned_threads = 0;
let (tx, rx) = mpsc::channel();
while y_sq<max {
if y_sq%2 == 1 {
spawned_threads += 1;
let (y_sq, tx) = (y_sq, tx.clone());
thread::spawn(move || {
tx.send(case_1(y_sq, max)).unwrap();
});
};
if y_sq%3 == 1 {
spawned_threads += 1;
let (y_sq, to_next_y_sq, tx1) = (y_sq, to_next_y_sq, tx.clone());
thread::spawn(move || {
tx1.send(case_2(y_sq, max)).unwrap();
});
if y_sq*2<max{
spawned_threads += 1;
let tx2 = tx.clone();
thread::spawn(move || {
tx2.send(case_3(y_sq, to_next_y_sq, max)).unwrap();
});
}
};
if spawned_threads>0{
for mes in rx.try_iter(){
spawned_threads -= 1;
let (offset, temp_filter) = mes;
for flip_i in temp_filter.into_iter().enumerate()
.filter_map(|x| match x {
(i, true) => Some(i),
_ => None,
}){
prime_filter[offset + flip_i - min] ^= true;
}
}
}
while{ //Do-while
y_sq += to_next_y_sq;
to_next_y_sq += 2;
y_sq%6 == 0
} {};
};
while spawned_threads>0{
for mes in rx.try_iter(){
spawned_threads -= 1;
let (offset, temp_filter) = mes;
for flip_i in temp_filter.into_iter().enumerate()
.filter_map(|x| match x {
(i, true) => Some(i),
_ => None,
}){
prime_filter[offset + flip_i - min] ^= true;
}
}
}
//Eliminate non-squarefree numbers
let mut n_sq = 49; // 7^2
let mut next_n_sq = 32; //9^2 - 7^2, skip even numbers.
while n_sq < max {
let mut non_sq_free = n_sq;
while non_sq_free < max {
if non_sq_free >= min {
prime_filter[non_sq_free - min] = false;
}
while{ //Do-while
non_sq_free += n_sq + n_sq;
(non_sq_free%3==0) | (non_sq_free%5==0)
} {};
};
while{ //Do-while
non_sq_free += n_sq + n_sq;
(non_sq_free%3==0) | (non_sq_free%5==0)
} {};
};
while{ //Do-while
n_sq += next_n_sq;
next_n_sq += 8;
(n_sq%3==0) | (n_sq%5 == 0)
} {};
prime_filter
}
#[cfg(test)]
pub fn old_prime_filter(iter_size: usize) -> std::vec::Vec<bool>{
slow_prime_filter(iter_size)
}
fn slow_prime_filter(iter_size: usize) -> std::vec::Vec<bool>{
if iter_size < 5 {
let mut ret = vec![false, false, true, true];
ret.truncate(iter_size);
return ret
}
let mut prime_filter = vec![true; iter_size];
prime_filter[0] = false;
prime_filter[1] = false;
let mut cur_num = 2;
'outer: loop{
for i in (cur_num+1)..iter_size{
if 0 == i%cur_num { prime_filter[i] = false; }
}
cur_num += 1;
while !prime_filter[cur_num]{
if cur_num*cur_num > iter_size {
break 'outer
}
cur_num += 1;
}
};
prime_filter
}
|
use backend::UsesAnsiSavepointSyntax;
use connection::Connection;
use result::QueryResult;
/// Manages the internal transaction state for a connection. You should not
/// interface with this trait unless you are implementing a new connection
/// adapter. You should use [`Connection::transaction`][transaction],
/// [`Connection::test_transaction`][test_transaction], or
/// [`Connection::begin_test_transaction`][begin_test_transaction] instead.
pub trait TransactionManager<Conn: Connection> {
/// Begin a new transaction. If the transaction depth is greater than 0,
/// this should create a savepoint instead. This function is expected to
/// increment the transaction depth by 1.
fn begin_transaction(&self, conn: &Conn) -> QueryResult<()>;
/// Rollback the inner-most transcation. If the transaction depth is greater
/// than 1, this should rollback to the most recent savepoint. This function
/// is expected to decrement the transaction depth by 1.
fn rollback_transaction(&self, conn: &Conn) -> QueryResult<()>;
/// Commit the inner-most transcation. If the transaction depth is greater
/// than 1, this should release the most recent savepoint. This function is
/// expected to decrement the transaction depth by 1.
fn commit_transaction(&self, conn: &Conn) -> QueryResult<()>;
/// Fetch the current transaction depth. Used to ensure that
/// `begin_test_transaction` is not called when already inside of a
/// transaction.
fn get_transaction_depth(&self) -> u32;
}
use std::cell::Cell;
/// An implementation of `TransactionManager` which can be used for backends
/// which use ANSI standard syntax for savepoints such as SQLite and PostgreSQL.
#[allow(missing_debug_implementations)]
#[derive(Default)]
pub struct AnsiTransactionManager {
transaction_depth: Cell<i32>,
}
impl AnsiTransactionManager {
pub fn new() -> Self {
AnsiTransactionManager::default()
}
fn change_transaction_depth(&self, by: i32, query: QueryResult<()>) -> QueryResult<()> {
if query.is_ok() {
self.transaction_depth.set(self.transaction_depth.get() + by)
}
query
}
}
impl<Conn> TransactionManager<Conn> for AnsiTransactionManager where
Conn: Connection,
Conn::Backend: UsesAnsiSavepointSyntax,
{
fn begin_transaction(&self, conn: &Conn) -> QueryResult<()> {
let transaction_depth = self.transaction_depth.get();
self.change_transaction_depth(1, if transaction_depth == 0 {
conn.batch_execute("BEGIN")
} else {
conn.batch_execute(&format!("SAVEPOINT diesel_savepoint_{}", transaction_depth))
})
}
fn rollback_transaction(&self, conn: &Conn) -> QueryResult<()> {
let transaction_depth = self.transaction_depth.get();
self.change_transaction_depth(-1, if transaction_depth == 1 {
conn.batch_execute("ROLLBACK")
} else {
conn.batch_execute(&format!("ROLLBACK TO SAVEPOINT diesel_savepoint_{}",
transaction_depth - 1))
})
}
fn commit_transaction(&self, conn: &Conn) -> QueryResult<()> {
let transaction_depth = self.transaction_depth.get();
self.change_transaction_depth(-1, if transaction_depth <= 1 {
conn.batch_execute("COMMIT")
} else {
conn.batch_execute(&format!("RELEASE SAVEPOINT diesel_savepoint_{}",
transaction_depth - 1))
})
}
fn get_transaction_depth(&self) -> u32 {
self.transaction_depth.get() as u32
}
}
Fix some links in the transaction_manager documentation
Thanks @mcarton for the report
use backend::UsesAnsiSavepointSyntax;
use connection::Connection;
use result::QueryResult;
/// Manages the internal transaction state for a connection. You should not
/// interface with this trait unless you are implementing a new connection
/// adapter. You should use [`Connection::transaction`][transaction],
/// [`Connection::test_transaction`][test_transaction], or
/// [`Connection::begin_test_transaction`][begin_test_transaction] instead.
///
/// [transaction]: trait.Connection.html#method.transaction
/// [test_transaction]: trait.Connection.html#method.test_transaction
/// [begin_test_transaction]: trait.Connection.html#method.begin_test_transaction
pub trait TransactionManager<Conn: Connection> {
/// Begin a new transaction. If the transaction depth is greater than 0,
/// this should create a savepoint instead. This function is expected to
/// increment the transaction depth by 1.
fn begin_transaction(&self, conn: &Conn) -> QueryResult<()>;
/// Rollback the inner-most transcation. If the transaction depth is greater
/// than 1, this should rollback to the most recent savepoint. This function
/// is expected to decrement the transaction depth by 1.
fn rollback_transaction(&self, conn: &Conn) -> QueryResult<()>;
/// Commit the inner-most transcation. If the transaction depth is greater
/// than 1, this should release the most recent savepoint. This function is
/// expected to decrement the transaction depth by 1.
fn commit_transaction(&self, conn: &Conn) -> QueryResult<()>;
/// Fetch the current transaction depth. Used to ensure that
/// `begin_test_transaction` is not called when already inside of a
/// transaction.
fn get_transaction_depth(&self) -> u32;
}
use std::cell::Cell;
/// An implementation of `TransactionManager` which can be used for backends
/// which use ANSI standard syntax for savepoints such as SQLite and PostgreSQL.
#[allow(missing_debug_implementations)]
#[derive(Default)]
pub struct AnsiTransactionManager {
transaction_depth: Cell<i32>,
}
impl AnsiTransactionManager {
pub fn new() -> Self {
AnsiTransactionManager::default()
}
fn change_transaction_depth(&self, by: i32, query: QueryResult<()>) -> QueryResult<()> {
if query.is_ok() {
self.transaction_depth.set(self.transaction_depth.get() + by)
}
query
}
}
impl<Conn> TransactionManager<Conn> for AnsiTransactionManager where
Conn: Connection,
Conn::Backend: UsesAnsiSavepointSyntax,
{
fn begin_transaction(&self, conn: &Conn) -> QueryResult<()> {
let transaction_depth = self.transaction_depth.get();
self.change_transaction_depth(1, if transaction_depth == 0 {
conn.batch_execute("BEGIN")
} else {
conn.batch_execute(&format!("SAVEPOINT diesel_savepoint_{}", transaction_depth))
})
}
fn rollback_transaction(&self, conn: &Conn) -> QueryResult<()> {
let transaction_depth = self.transaction_depth.get();
self.change_transaction_depth(-1, if transaction_depth == 1 {
conn.batch_execute("ROLLBACK")
} else {
conn.batch_execute(&format!("ROLLBACK TO SAVEPOINT diesel_savepoint_{}",
transaction_depth - 1))
})
}
fn commit_transaction(&self, conn: &Conn) -> QueryResult<()> {
let transaction_depth = self.transaction_depth.get();
self.change_transaction_depth(-1, if transaction_depth <= 1 {
conn.batch_execute("COMMIT")
} else {
conn.batch_execute(&format!("RELEASE SAVEPOINT diesel_savepoint_{}",
transaction_depth - 1))
})
}
fn get_transaction_depth(&self) -> u32 {
self.transaction_depth.get() as u32
}
}
|
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod common;
use anyhow::Result;
use common::{
create_partition_entry, create_primary_header, create_secondary_header,
};
use gpt_disk_io::{BlockIo, Disk, DiskError, MutSliceBlockIo};
use gpt_disk_types::{BlockSize, GptPartitionEntryArray};
use std::io::{Cursor, Read};
#[cfg(feature = "std")]
use {
gpt_disk_io::StdBlockIo,
std::fs::{self, File, OpenOptions},
std::path::Path,
tempfile::TempDir,
};
fn load_test_disk() -> Vec<u8> {
// Test data generated as follows:
//
// truncate --size 4MiB disk.bin
// sgdisk disk.bin \
// --disk-guid=57a7feb6-8cd5-4922-b7bd-c78b0914e870 \
// --new=1:2048:4096 \
// --change-name='1:hello world!' \
// --partition-guid=1:37c75ffd-8932-467a-9c56-8cf1f0456b12 \
// --typecode=1:ccf0994f-f7e0-4e26-a011-843e38aa2eac
// bzip2 disk.bin
// mv disk.bin.bz2 gpt_disk_io/tests/
let compressed_data = Cursor::new(include_bytes!("disk.bin.bz2"));
let mut reader = bzip2_rs::DecoderReader::new(compressed_data);
let mut disk = Vec::new();
reader.read_to_end(&mut disk).unwrap();
disk
}
fn test_disk_read<Io>(block_io: Io) -> Result<(), DiskError<Io::Error>>
where
Io: BlockIo,
{
let mut block_buf = vec![0u8; 512];
let mut disk = Disk::new(block_io)?;
let primary_header = disk.read_primary_gpt_header(&mut block_buf)?;
assert_eq!(primary_header, create_primary_header());
let secondary_header = disk.read_secondary_gpt_header(&mut block_buf)?;
assert_eq!(secondary_header, create_secondary_header());
let expected_partition_entry = create_partition_entry();
// Check the primary partition entry array.
let primary_partition_entry = disk
.gpt_partition_entry_array_iter(
primary_header.get_partition_entry_array_layout().unwrap(),
&mut block_buf,
)?
.next()
.unwrap()?;
assert_eq!(primary_partition_entry, expected_partition_entry);
// Check the secondary partition entry array.
let second_partition_entry = disk
.gpt_partition_entry_array_iter(
primary_header.get_partition_entry_array_layout().unwrap(),
&mut block_buf,
)?
.next()
.unwrap()?;
assert_eq!(second_partition_entry, expected_partition_entry);
Ok(())
}
fn test_disk_write<Io>(block_io: Io) -> Result<(), DiskError<Io::Error>>
where
Io: BlockIo,
{
let bs = BlockSize::B512;
let mut block_buf = vec![0u8; bs.to_usize().unwrap()];
let mut disk = Disk::new(block_io)?;
let primary_header = create_primary_header();
let secondary_header = create_secondary_header();
let partition_entry = create_partition_entry();
disk.write_protective_mbr(&mut block_buf)?;
disk.write_primary_gpt_header(&primary_header, &mut block_buf)?;
disk.write_secondary_gpt_header(&secondary_header, &mut block_buf)?;
let layout = primary_header.get_partition_entry_array_layout().unwrap();
let mut bytes =
vec![0; layout.num_bytes_rounded_to_block_as_usize(bs).unwrap()];
let mut entry_array =
GptPartitionEntryArray::new(layout, bs, &mut bytes).unwrap();
*entry_array.get_partition_entry_mut(0).unwrap() = partition_entry;
disk.write_gpt_partition_entry_array(&entry_array)?;
entry_array.set_start_lba(secondary_header.partition_entry_lba.into());
disk.write_gpt_partition_entry_array(&entry_array)?;
disk.flush()?;
Ok(())
}
fn test_with_mut_slice(test_disk: &[u8]) -> Result<()> {
let mut contents = test_disk.to_vec();
// Test read.
test_disk_read(MutSliceBlockIo::new(&mut contents, BlockSize::B512))
.unwrap();
// Test write.
let mut new_contents = vec![0; contents.len()];
test_disk_write(MutSliceBlockIo::new(&mut new_contents, BlockSize::B512))
.unwrap();
assert_eq!(contents, new_contents);
Ok(())
}
#[cfg(feature = "std")]
fn test_with_file(tmp_path: &Path, test_disk: &[u8]) -> Result<()> {
let sgdisk_path = tmp_path.join("disk.bin");
fs::write(&sgdisk_path, test_disk)?;
// Test read.
let mut file = File::open(&sgdisk_path)?;
test_disk_read(StdBlockIo::new(&mut file, BlockSize::B512))?;
// Test write.
let new_disk_path = tmp_path.join("new_disk.bin");
fs::write(&new_disk_path, vec![0; 4 * 1024 * 1024])?;
let mut new_file = OpenOptions::new()
.read(true)
.write(true)
.truncate(false)
.open(&new_disk_path)?;
test_disk_write(StdBlockIo::new(&mut new_file, BlockSize::B512)).unwrap();
let expected_bytes = fs::read(&sgdisk_path)?;
let actual_bytes = fs::read(&new_disk_path)?;
assert_eq!(expected_bytes, actual_bytes);
Ok(())
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_disk() -> Result<()> {
let test_disk = load_test_disk();
test_with_mut_slice(&test_disk)?;
#[cfg(feature = "std")]
{
let tmp_dir = TempDir::new()?;
let tmp_path = tmp_dir.path();
test_with_file(&tmp_path, &test_disk)?;
}
Ok(())
}
Drop use of tempfile from test_disk
Use Cursor instead of a File, since we just need something that
implements Read+Write+Seek.
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod common;
use anyhow::Result;
use common::{
create_partition_entry, create_primary_header, create_secondary_header,
};
#[cfg(feature = "std")]
use gpt_disk_io::StdBlockIo;
use gpt_disk_io::{BlockIo, Disk, DiskError, MutSliceBlockIo};
use gpt_disk_types::{BlockSize, GptPartitionEntryArray};
use std::io::{Cursor, Read};
fn load_test_disk() -> Vec<u8> {
// Test data generated as follows:
//
// truncate --size 4MiB disk.bin
// sgdisk disk.bin \
// --disk-guid=57a7feb6-8cd5-4922-b7bd-c78b0914e870 \
// --new=1:2048:4096 \
// --change-name='1:hello world!' \
// --partition-guid=1:37c75ffd-8932-467a-9c56-8cf1f0456b12 \
// --typecode=1:ccf0994f-f7e0-4e26-a011-843e38aa2eac
// bzip2 disk.bin
// mv disk.bin.bz2 gpt_disk_io/tests/
let compressed_data = Cursor::new(include_bytes!("disk.bin.bz2"));
let mut reader = bzip2_rs::DecoderReader::new(compressed_data);
let mut disk = Vec::new();
reader.read_to_end(&mut disk).unwrap();
disk
}
fn test_disk_read<Io>(block_io: Io) -> Result<(), DiskError<Io::Error>>
where
Io: BlockIo,
{
let mut block_buf = vec![0u8; 512];
let mut disk = Disk::new(block_io)?;
let primary_header = disk.read_primary_gpt_header(&mut block_buf)?;
assert_eq!(primary_header, create_primary_header());
let secondary_header = disk.read_secondary_gpt_header(&mut block_buf)?;
assert_eq!(secondary_header, create_secondary_header());
let expected_partition_entry = create_partition_entry();
// Check the primary partition entry array.
let primary_partition_entry = disk
.gpt_partition_entry_array_iter(
primary_header.get_partition_entry_array_layout().unwrap(),
&mut block_buf,
)?
.next()
.unwrap()?;
assert_eq!(primary_partition_entry, expected_partition_entry);
// Check the secondary partition entry array.
let second_partition_entry = disk
.gpt_partition_entry_array_iter(
primary_header.get_partition_entry_array_layout().unwrap(),
&mut block_buf,
)?
.next()
.unwrap()?;
assert_eq!(second_partition_entry, expected_partition_entry);
Ok(())
}
fn test_disk_write<Io>(block_io: Io) -> Result<(), DiskError<Io::Error>>
where
Io: BlockIo,
{
let bs = BlockSize::B512;
let mut block_buf = vec![0u8; bs.to_usize().unwrap()];
let mut disk = Disk::new(block_io)?;
let primary_header = create_primary_header();
let secondary_header = create_secondary_header();
let partition_entry = create_partition_entry();
disk.write_protective_mbr(&mut block_buf)?;
disk.write_primary_gpt_header(&primary_header, &mut block_buf)?;
disk.write_secondary_gpt_header(&secondary_header, &mut block_buf)?;
let layout = primary_header.get_partition_entry_array_layout().unwrap();
let mut bytes =
vec![0; layout.num_bytes_rounded_to_block_as_usize(bs).unwrap()];
let mut entry_array =
GptPartitionEntryArray::new(layout, bs, &mut bytes).unwrap();
*entry_array.get_partition_entry_mut(0).unwrap() = partition_entry;
disk.write_gpt_partition_entry_array(&entry_array)?;
entry_array.set_start_lba(secondary_header.partition_entry_lba.into());
disk.write_gpt_partition_entry_array(&entry_array)?;
disk.flush()?;
Ok(())
}
fn test_with_mut_slice(test_disk: &[u8]) -> Result<()> {
let mut contents = test_disk.to_vec();
// Test read.
test_disk_read(MutSliceBlockIo::new(&mut contents, BlockSize::B512))
.unwrap();
// Test write.
let mut new_contents = vec![0; contents.len()];
test_disk_write(MutSliceBlockIo::new(&mut new_contents, BlockSize::B512))
.unwrap();
assert_eq!(contents, new_contents);
Ok(())
}
#[cfg(feature = "std")]
fn test_with_filelike(test_disk: &[u8]) -> Result<()> {
let mut test_disk_cursor = Cursor::new(test_disk.to_vec());
// Test read.
test_disk_read(StdBlockIo::new(&mut test_disk_cursor, BlockSize::B512))?;
// Test write.
let mut new_disk = vec![0; 4 * 1024 * 1024];
let mut new_disk_cursor = Cursor::new(&mut new_disk);
test_disk_write(StdBlockIo::new(&mut new_disk_cursor, BlockSize::B512))?;
assert_eq!(new_disk, test_disk);
Ok(())
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_disk() -> Result<()> {
let test_disk = load_test_disk();
test_with_mut_slice(&test_disk)?;
#[cfg(feature = "std")]
test_with_filelike(&test_disk)?;
Ok(())
}
|
use std::fmt;
use std::io::Read; // Trait providing read_to_string()
use std::env;
use url;
use hyper;
use serde;
use serde_json;
// use Groups;
use ::errors::*;
pub const API_VERSION: u16 = 3;
#[derive(Default, Clone, Copy, Debug)]
pub struct Pagination {
pub page: u16,
pub per_page: u16,
}
pub struct GitLab {
url: url::Url,
private_token: String,
pagination: Option<Pagination>,
client: hyper::Client,
}
// Explicitly implement Debug trait for GitLab so we can hide the token.
impl fmt::Debug for GitLab {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"GitLab {{ scheme: {}, domain: {}, port: {}, private_token: XXXXXXXXXXXXXXXXXXXX, \
pagination: {:?} }}",
self.url.scheme(),
self.url.domain().unwrap_or("bad hostname provided"),
self.url
.port()
.map(|port_u16| port_u16.to_string())
.unwrap_or("no port provided".to_string()),
self.pagination)
}
}
fn validate_url(scheme: &str, domain: &str, port: u16) -> Result<url::Url> {
match domain.find('.') {
None => {
// pass
}
Some(index) => {
if index == 0 {
bail!(format!("invalid domain: '{}' cannot start with a dot", domain));
}
}
};
if domain.ends_with('.') {
bail!(format!("invalid domain: '{}' cannot end with a dot", domain));
}
let url_string = format!("{}://{}/api/v{}/", scheme, domain, API_VERSION);
let mut url = url::Url::parse(&url_string)
.chain_err(|| format!("failure to parse URL '{}'", url_string))?;
url.set_port(Some(port)).expect("bad port provided");
{
let url_host = url.host_str();
if url_host.is_none() {
bail!("failure to get URL's hostname");
}
if url_host.unwrap() != domain {
bail!(format!("invalid hostname '{}'", domain));
}
}
Ok(url)
}
impl GitLab {
pub fn _new(scheme: &str, domain: &str, port: u16, private_token: &str) -> Result<GitLab> {
if private_token.len() != 20 {
bail!(format!("private token should be a 20 characters string (not {})",
private_token.len()));
}
let url: url::Url = validate_url(scheme, domain, port).chain_err(|| "invalid URL")?;
Ok(GitLab {
url: url,
private_token: private_token.to_string(),
pagination: None,
client: match env::var("HTTP_PROXY") {
Ok(proxy) => {
let proxy: Vec<&str> = proxy.trim_left_matches("http://").split(':').collect();
let hostname = proxy[0].to_string();
let port = proxy[1].parse()
.chain_err(|| format!("failure to set port to {}", proxy[1]))?;
hyper::Client::with_http_proxy(hostname, port)
}
Err(_) => hyper::Client::new(),
},
})
}
pub fn new_insecure(domain: &str, private_token: &str) -> Result<GitLab> {
warn!("Using insecure http:// protocol: Token will be sent in clear!");
GitLab::_new("http", domain, 80, private_token)
}
pub fn new(domain: &str, private_token: &str) -> Result<GitLab> {
GitLab::_new("https", domain, 443, private_token)
}
pub fn port(mut self, port: u16) -> Self {
self.url.set_port(Some(port)).unwrap();
self
}
pub fn scheme(mut self, scheme: &str) -> Self {
self.url.set_scheme(scheme).unwrap();
self
}
/// Build a URL used to access GitLab instance, including some parameters.
///
/// # Examples
///
/// Example from GitLab: https://docs.gitlab.com/ce/api/#basic-usage
///
/// ```
/// use gitlab_api::GitLab;
///
/// let expected_url = "https://gitlab.example.com\
/// /api/v3/groups?order_by=path&private_token=XXXXXXXXXXXXXXXXXXXX";
///
/// let gl = GitLab::new("gitlab.example.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
///
/// assert_eq!(gl.build_url("groups?order_by=path").unwrap(), expected_url);
/// ```
pub fn build_url(&self, query: &str) -> Result<String> {
let mut new_url = self.url
.clone()
.join(query)
.chain_err(|| {
format!("Failure to join query '{}' to url {}",
query,
self.url.as_str())
})?;
new_url.query_pairs_mut().append_pair("private_token", &self.private_token);
self.pagination.as_ref().map(|pagination| {
new_url.query_pairs_mut().append_pair("page", &pagination.page.to_string());
new_url.query_pairs_mut().append_pair("per_page", &pagination.per_page.to_string());
});
Ok(new_url.into_string())
}
// pub fn attempt_connection(&self) -> Result<hyper::client::Response, hyper::Error> {
// let url = self.build_url("version");
// // Close connections after each GET.
// self.client.get(&url).header(hyper::header::Connection::close()).send()
// }
/// Set pagination information
///
/// # Examples
///
/// ```
/// use gitlab_api::{GitLab, Pagination};
///
/// let expected_url = "https://gitlab.example.com\
/// /api/v3/groups?order_by=path&\
/// private_token=XXXXXXXXXXXXXXXXXXXX&page=2&per_page=5";
///
/// let mut gl = GitLab::new("gitlab.example.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
/// gl.set_pagination(Pagination {page: 2, per_page: 5});
/// assert_eq!(gl.build_url("groups?order_by=path").unwrap(), expected_url);
/// ```
pub fn set_pagination(&mut self, pagination: Pagination) {
self.pagination = Some(pagination);
}
pub fn get<T>(&self, query: &str) -> Result<T>
where T: serde::Deserialize
{
let url = self.build_url(query)
.chain_err(|| format!("failure to build url for query '{}'", query))?;
info!("url: {:?}", url);
// Close connections after each GET.
let mut res: hyper::client::Response = self.client
.get(&url)
.header(hyper::header::Connection::close())
.send()
.chain_err(|| format!("cannot send request '{}' to {:?}", query, self))?;
info!("res.status: {:?}", res.status);
debug!("res.headers: {:?}", res.headers);
debug!("res.url: {:?}", res.url);
let mut body = String::new();
res.read_to_string(&mut body).chain_err(|| "cannot read response body")?;
debug!("body:\n{:?}", body);
if res.status != hyper::status::StatusCode::Ok {
bail!(format!("status code '{}', not '200 OK'", res.status));
}
serde_json::from_str(body.as_str())
.chain_err(|| format!("cannot build Rust struct from JSON data: {}", body))
}
pub fn version(&self) -> Result<::Version> {
self.get("version").chain_err(|| "cannot query 'version'")
}
pub fn groups(&self) -> ::groups::GroupsLister {
::groups::GroupsLister::new(self)
}
pub fn projects(&self) -> ::projects::ProjectsLister {
::projects::ProjectsLister::new(self)
}
pub fn issues(&self) -> ::issues::IssuesLister {
::issues::IssuesLister::new(self)
}
pub fn merge_requests(&self, project_id: i64) -> ::merge_requests::MergeRequestsLister {
::merge_requests::MergeRequestsLister::new(self, project_id)
}
// pub fn groups(&mut self, listing: ::groups::Listing) -> Result<Groups, serde_json::Error> {
// let query = listing.build_query();
// // self.get(&query)
// unimplemented!();
// }
// pub fn owned_groups(&mut self) -> Result<Groups, serde_json::Error> {
// let query = ::groups::owned_groups::Listing::new().build_query();
// info!("query: {:?}", query);
// self.get(&query)
// }
// Higher level methods
pub fn get_project(&mut self, namespace: &str, name: &str) -> Result<::Project> {
// We can't search for "namespace/name", so we search for "name", and loop on the result
// until we find the proper "namespace/name".
// NOTE: Since our search match could contain many results and they will be paginated,
// we need two loops: one on content of a page, one for the pages.
// Store the initial pagination so we can restore it later
let initial_pagination = self.pagination.clone();
// Set a default value for the pagination if it's None
self.pagination = self.pagination.or(Some(Pagination {page: 1, per_page: 20}));
let mut found_project: Option<::Project> = None;
// Query GitLab inside the page loop
loop {
let projects = self.projects().search(name.to_string()).list().chain_err(|| "cannot get projects")?;
let nb_projects_found = projects.len();
// Find the right project in the vector
found_project = projects.into_iter().find(|ref project| project.namespace.name == namespace && project.name == name);
if found_project.is_some() || nb_projects_found < self.pagination.unwrap().per_page as usize {
break;
}
// Bump to the next page
self.pagination.as_mut().map(|pagination| pagination.page += 1);
}
// Restore the initial pagination
self.pagination = initial_pagination;
match found_project {
None => bail!(format!("Project '{}/{}' not found!", namespace, name)),
Some(project) => Ok(project)
}
}
}
#[cfg(test)]
mod tests {
use std::fmt;
use gitlab::*;
use errors::*;
fn verify_ok<T>(result: &Result<T>) {
if let &Err(ref e) = result {
println!("error: {}", e);
for e in e.iter().skip(1) {
println!("caused by: {}", e);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
println!("backtrace: {:?}", backtrace);
}
}
assert!(result.is_ok());
}
fn verify_err<T>(result: &Result<T>)
where T: fmt::Debug
{
match result {
&Err(_) => {
// pass
}
&Ok(ref t) => {
panic!(format!("Expected an Err(), got an Ok(t), with t: {:?}", t));
}
}
}
#[test]
fn impl_debug_for_gitlab() {
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
let debug = format!("{:?}", gl);
assert_eq!("GitLab { scheme: https, domain: gitlab.com, port: no port provided, \
private_token: XXXXXXXXXXXXXXXXXXXX, pagination: None }",
debug);
let gl = gl.scheme("http").port(80);
let debug = format!("{:?}", gl);
assert_eq!("GitLab { scheme: http, domain: gitlab.com, port: no port provided, \
private_token: XXXXXXXXXXXXXXXXXXXX, pagination: None }",
debug);
let mut gl = gl.port(81);
let debug = format!("{:?}", gl);
assert_eq!("GitLab { scheme: http, domain: gitlab.com, port: 81, private_token: \
XXXXXXXXXXXXXXXXXXXX, pagination: None }",
debug);
gl.set_pagination(Pagination {
page: 2,
per_page: 5,
});
let debug = format!("{:?}", gl);
assert_eq!("GitLab { scheme: http, domain: gitlab.com, port: 81, private_token: \
XXXXXXXXXXXXXXXXXXXX, pagination: Some(Pagination { page: 2, per_page: 5 }) }",
debug);
}
#[test]
fn gitlab_listers_groups() {
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
let groups_lister = gl.groups();
let debug = format!("{:?}", groups_lister);
assert_eq!("GroupsLister { gl: GitLab { scheme: https, domain: gitlab.com, port: no \
port provided, private_token: XXXXXXXXXXXXXXXXXXXX, pagination: None }, \
internal: GroupsListerInternal { skip_groups: None, all_available: None, \
search: None, order_by: None, sort: None } }",
debug);
}
#[test]
fn gitlab_listers_projects() {
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
let projects_lister = gl.projects();
let debug = format!("{:?}", projects_lister);
assert_eq!("ProjectsLister { gl: GitLab { scheme: https, domain: gitlab.com, port: no \
port provided, private_token: XXXXXXXXXXXXXXXXXXXX, pagination: None }, \
internal: ProjectListerInternal { archived: None, visibility: None, \
order_by: None, sort: None, search: None, simple: None } }",
debug);
}
#[test]
fn gitlab_listers_issues() {
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
let issues_lister = gl.issues();
let debug = format!("{:?}", issues_lister);
assert_eq!("IssuesLister { gl: GitLab { scheme: https, domain: gitlab.com, port: no port \
provided, private_token: XXXXXXXXXXXXXXXXXXXX, pagination: None }, internal: \
IssuesListerInternal { state: None, labels: None, order_by: None, sort: None \
} }",
debug);
}
#[test]
fn new_valid() {
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_ok(&gl);
let gl = GitLab::new_insecure("gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_ok(&gl);
let gl = GitLab::new("localhost", "XXXXXXXXXXXXXXXXXXXX");
verify_ok(&gl);
let gl = GitLab::new_insecure("localhost", "XXXXXXXXXXXXXXXXXXXX");
verify_ok(&gl);
}
#[test]
fn new_invalid_url_1() {
let gl = GitLab::new("", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_2() {
let gl = GitLab::new("gitla/b.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("gitla/b.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_3() {
let gl = GitLab::new("/gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("/gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_4() {
let gl = GitLab::new("http:/gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("http:/gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_5() {
let gl = GitLab::new("http:///gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("http:///gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_6() {
let gl = GitLab::new(".gitlab", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure(".gitlab", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_7() {
let gl = GitLab::new(".gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure(".gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_8() {
let gl = GitLab::new("gitlab.", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("gitlab.", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_10() {
let gl = GitLab::new("gitlab.com.", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("gitlab.com.", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_token() {
let gl = GitLab::new("gitlab.com", "");
verify_err(&gl);
let gl = GitLab::new("gitlab.com", "X");
verify_err(&gl);
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn build_url_doc() {
let expected_url = "https://gitlab.example.com\
/api/v3/groups?order_by=path&private_token=XXXXXXXXXXXXXXXXXXXX";
let gl = GitLab::new("gitlab.example.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
let url = gl.build_url("groups?order_by=path").unwrap();
assert_eq!(url, expected_url);
}
#[test]
fn build_url_pagination() {
let expected_url = "https://gitlab.example.com\
/api/v3/groups?order_by=path&\
private_token=XXXXXXXXXXXXXXXXXXXX&page=2&per_page=5";
let mut gl = GitLab::new("gitlab.example.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
gl.set_pagination(Pagination {page: 2, per_page: 5});
let url = gl.build_url("groups?order_by=path").unwrap();
assert_eq!(url, expected_url);
}
}
GitLab::get_project(): No need to set a default value to `found_project`
use std::fmt;
use std::io::Read; // Trait providing read_to_string()
use std::env;
use url;
use hyper;
use serde;
use serde_json;
// use Groups;
use ::errors::*;
pub const API_VERSION: u16 = 3;
#[derive(Default, Clone, Copy, Debug)]
pub struct Pagination {
pub page: u16,
pub per_page: u16,
}
pub struct GitLab {
url: url::Url,
private_token: String,
pagination: Option<Pagination>,
client: hyper::Client,
}
// Explicitly implement Debug trait for GitLab so we can hide the token.
impl fmt::Debug for GitLab {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"GitLab {{ scheme: {}, domain: {}, port: {}, private_token: XXXXXXXXXXXXXXXXXXXX, \
pagination: {:?} }}",
self.url.scheme(),
self.url.domain().unwrap_or("bad hostname provided"),
self.url
.port()
.map(|port_u16| port_u16.to_string())
.unwrap_or("no port provided".to_string()),
self.pagination)
}
}
fn validate_url(scheme: &str, domain: &str, port: u16) -> Result<url::Url> {
match domain.find('.') {
None => {
// pass
}
Some(index) => {
if index == 0 {
bail!(format!("invalid domain: '{}' cannot start with a dot", domain));
}
}
};
if domain.ends_with('.') {
bail!(format!("invalid domain: '{}' cannot end with a dot", domain));
}
let url_string = format!("{}://{}/api/v{}/", scheme, domain, API_VERSION);
let mut url = url::Url::parse(&url_string)
.chain_err(|| format!("failure to parse URL '{}'", url_string))?;
url.set_port(Some(port)).expect("bad port provided");
{
let url_host = url.host_str();
if url_host.is_none() {
bail!("failure to get URL's hostname");
}
if url_host.unwrap() != domain {
bail!(format!("invalid hostname '{}'", domain));
}
}
Ok(url)
}
impl GitLab {
pub fn _new(scheme: &str, domain: &str, port: u16, private_token: &str) -> Result<GitLab> {
if private_token.len() != 20 {
bail!(format!("private token should be a 20 characters string (not {})",
private_token.len()));
}
let url: url::Url = validate_url(scheme, domain, port).chain_err(|| "invalid URL")?;
Ok(GitLab {
url: url,
private_token: private_token.to_string(),
pagination: None,
client: match env::var("HTTP_PROXY") {
Ok(proxy) => {
let proxy: Vec<&str> = proxy.trim_left_matches("http://").split(':').collect();
let hostname = proxy[0].to_string();
let port = proxy[1].parse()
.chain_err(|| format!("failure to set port to {}", proxy[1]))?;
hyper::Client::with_http_proxy(hostname, port)
}
Err(_) => hyper::Client::new(),
},
})
}
pub fn new_insecure(domain: &str, private_token: &str) -> Result<GitLab> {
warn!("Using insecure http:// protocol: Token will be sent in clear!");
GitLab::_new("http", domain, 80, private_token)
}
pub fn new(domain: &str, private_token: &str) -> Result<GitLab> {
GitLab::_new("https", domain, 443, private_token)
}
pub fn port(mut self, port: u16) -> Self {
self.url.set_port(Some(port)).unwrap();
self
}
pub fn scheme(mut self, scheme: &str) -> Self {
self.url.set_scheme(scheme).unwrap();
self
}
/// Build a URL used to access GitLab instance, including some parameters.
///
/// # Examples
///
/// Example from GitLab: https://docs.gitlab.com/ce/api/#basic-usage
///
/// ```
/// use gitlab_api::GitLab;
///
/// let expected_url = "https://gitlab.example.com\
/// /api/v3/groups?order_by=path&private_token=XXXXXXXXXXXXXXXXXXXX";
///
/// let gl = GitLab::new("gitlab.example.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
///
/// assert_eq!(gl.build_url("groups?order_by=path").unwrap(), expected_url);
/// ```
pub fn build_url(&self, query: &str) -> Result<String> {
let mut new_url = self.url
.clone()
.join(query)
.chain_err(|| {
format!("Failure to join query '{}' to url {}",
query,
self.url.as_str())
})?;
new_url.query_pairs_mut().append_pair("private_token", &self.private_token);
self.pagination.as_ref().map(|pagination| {
new_url.query_pairs_mut().append_pair("page", &pagination.page.to_string());
new_url.query_pairs_mut().append_pair("per_page", &pagination.per_page.to_string());
});
Ok(new_url.into_string())
}
// pub fn attempt_connection(&self) -> Result<hyper::client::Response, hyper::Error> {
// let url = self.build_url("version");
// // Close connections after each GET.
// self.client.get(&url).header(hyper::header::Connection::close()).send()
// }
/// Set pagination information
///
/// # Examples
///
/// ```
/// use gitlab_api::{GitLab, Pagination};
///
/// let expected_url = "https://gitlab.example.com\
/// /api/v3/groups?order_by=path&\
/// private_token=XXXXXXXXXXXXXXXXXXXX&page=2&per_page=5";
///
/// let mut gl = GitLab::new("gitlab.example.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
/// gl.set_pagination(Pagination {page: 2, per_page: 5});
/// assert_eq!(gl.build_url("groups?order_by=path").unwrap(), expected_url);
/// ```
pub fn set_pagination(&mut self, pagination: Pagination) {
self.pagination = Some(pagination);
}
pub fn get<T>(&self, query: &str) -> Result<T>
where T: serde::Deserialize
{
let url = self.build_url(query)
.chain_err(|| format!("failure to build url for query '{}'", query))?;
info!("url: {:?}", url);
// Close connections after each GET.
let mut res: hyper::client::Response = self.client
.get(&url)
.header(hyper::header::Connection::close())
.send()
.chain_err(|| format!("cannot send request '{}' to {:?}", query, self))?;
info!("res.status: {:?}", res.status);
debug!("res.headers: {:?}", res.headers);
debug!("res.url: {:?}", res.url);
let mut body = String::new();
res.read_to_string(&mut body).chain_err(|| "cannot read response body")?;
debug!("body:\n{:?}", body);
if res.status != hyper::status::StatusCode::Ok {
bail!(format!("status code '{}', not '200 OK'", res.status));
}
serde_json::from_str(body.as_str())
.chain_err(|| format!("cannot build Rust struct from JSON data: {}", body))
}
pub fn version(&self) -> Result<::Version> {
self.get("version").chain_err(|| "cannot query 'version'")
}
pub fn groups(&self) -> ::groups::GroupsLister {
::groups::GroupsLister::new(self)
}
pub fn projects(&self) -> ::projects::ProjectsLister {
::projects::ProjectsLister::new(self)
}
pub fn issues(&self) -> ::issues::IssuesLister {
::issues::IssuesLister::new(self)
}
pub fn merge_requests(&self, project_id: i64) -> ::merge_requests::MergeRequestsLister {
::merge_requests::MergeRequestsLister::new(self, project_id)
}
// pub fn groups(&mut self, listing: ::groups::Listing) -> Result<Groups, serde_json::Error> {
// let query = listing.build_query();
// // self.get(&query)
// unimplemented!();
// }
// pub fn owned_groups(&mut self) -> Result<Groups, serde_json::Error> {
// let query = ::groups::owned_groups::Listing::new().build_query();
// info!("query: {:?}", query);
// self.get(&query)
// }
// Higher level methods
pub fn get_project(&mut self, namespace: &str, name: &str) -> Result<::Project> {
// We can't search for "namespace/name", so we search for "name", and loop on the result
// until we find the proper "namespace/name".
// NOTE: Since our search match could contain many results and they will be paginated,
// we need two loops: one on content of a page, one for the pages.
// Store the initial pagination so we can restore it later
let initial_pagination = self.pagination.clone();
// Set a default value for the pagination if it's None
self.pagination = self.pagination.or(Some(Pagination {page: 1, per_page: 20}));
let mut found_project: Option<::Project>;
// Query GitLab inside the page loop
loop {
let projects = self.projects().search(name.to_string()).list().chain_err(|| "cannot get projects")?;
let nb_projects_found = projects.len();
// Find the right project in the vector
found_project = projects.into_iter().find(|ref project| project.namespace.name == namespace && project.name == name);
if found_project.is_some() || nb_projects_found < self.pagination.unwrap().per_page as usize {
break;
}
// Bump to the next page
self.pagination.as_mut().map(|pagination| pagination.page += 1);
}
// Restore the initial pagination
self.pagination = initial_pagination;
match found_project {
None => bail!(format!("Project '{}/{}' not found!", namespace, name)),
Some(project) => Ok(project)
}
}
}
#[cfg(test)]
mod tests {
use std::fmt;
use gitlab::*;
use errors::*;
fn verify_ok<T>(result: &Result<T>) {
if let &Err(ref e) = result {
println!("error: {}", e);
for e in e.iter().skip(1) {
println!("caused by: {}", e);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
println!("backtrace: {:?}", backtrace);
}
}
assert!(result.is_ok());
}
fn verify_err<T>(result: &Result<T>)
where T: fmt::Debug
{
match result {
&Err(_) => {
// pass
}
&Ok(ref t) => {
panic!(format!("Expected an Err(), got an Ok(t), with t: {:?}", t));
}
}
}
#[test]
fn impl_debug_for_gitlab() {
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
let debug = format!("{:?}", gl);
assert_eq!("GitLab { scheme: https, domain: gitlab.com, port: no port provided, \
private_token: XXXXXXXXXXXXXXXXXXXX, pagination: None }",
debug);
let gl = gl.scheme("http").port(80);
let debug = format!("{:?}", gl);
assert_eq!("GitLab { scheme: http, domain: gitlab.com, port: no port provided, \
private_token: XXXXXXXXXXXXXXXXXXXX, pagination: None }",
debug);
let mut gl = gl.port(81);
let debug = format!("{:?}", gl);
assert_eq!("GitLab { scheme: http, domain: gitlab.com, port: 81, private_token: \
XXXXXXXXXXXXXXXXXXXX, pagination: None }",
debug);
gl.set_pagination(Pagination {
page: 2,
per_page: 5,
});
let debug = format!("{:?}", gl);
assert_eq!("GitLab { scheme: http, domain: gitlab.com, port: 81, private_token: \
XXXXXXXXXXXXXXXXXXXX, pagination: Some(Pagination { page: 2, per_page: 5 }) }",
debug);
}
#[test]
fn gitlab_listers_groups() {
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
let groups_lister = gl.groups();
let debug = format!("{:?}", groups_lister);
assert_eq!("GroupsLister { gl: GitLab { scheme: https, domain: gitlab.com, port: no \
port provided, private_token: XXXXXXXXXXXXXXXXXXXX, pagination: None }, \
internal: GroupsListerInternal { skip_groups: None, all_available: None, \
search: None, order_by: None, sort: None } }",
debug);
}
#[test]
fn gitlab_listers_projects() {
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
let projects_lister = gl.projects();
let debug = format!("{:?}", projects_lister);
assert_eq!("ProjectsLister { gl: GitLab { scheme: https, domain: gitlab.com, port: no \
port provided, private_token: XXXXXXXXXXXXXXXXXXXX, pagination: None }, \
internal: ProjectListerInternal { archived: None, visibility: None, \
order_by: None, sort: None, search: None, simple: None } }",
debug);
}
#[test]
fn gitlab_listers_issues() {
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
let issues_lister = gl.issues();
let debug = format!("{:?}", issues_lister);
assert_eq!("IssuesLister { gl: GitLab { scheme: https, domain: gitlab.com, port: no port \
provided, private_token: XXXXXXXXXXXXXXXXXXXX, pagination: None }, internal: \
IssuesListerInternal { state: None, labels: None, order_by: None, sort: None \
} }",
debug);
}
#[test]
fn new_valid() {
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_ok(&gl);
let gl = GitLab::new_insecure("gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_ok(&gl);
let gl = GitLab::new("localhost", "XXXXXXXXXXXXXXXXXXXX");
verify_ok(&gl);
let gl = GitLab::new_insecure("localhost", "XXXXXXXXXXXXXXXXXXXX");
verify_ok(&gl);
}
#[test]
fn new_invalid_url_1() {
let gl = GitLab::new("", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_2() {
let gl = GitLab::new("gitla/b.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("gitla/b.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_3() {
let gl = GitLab::new("/gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("/gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_4() {
let gl = GitLab::new("http:/gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("http:/gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_5() {
let gl = GitLab::new("http:///gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("http:///gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_6() {
let gl = GitLab::new(".gitlab", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure(".gitlab", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_7() {
let gl = GitLab::new(".gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure(".gitlab.com", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_8() {
let gl = GitLab::new("gitlab.", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("gitlab.", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_url_10() {
let gl = GitLab::new("gitlab.com.", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new_insecure("gitlab.com.", "XXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn new_invalid_token() {
let gl = GitLab::new("gitlab.com", "");
verify_err(&gl);
let gl = GitLab::new("gitlab.com", "X");
verify_err(&gl);
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
let gl = GitLab::new("gitlab.com", "XXXXXXXXXXXXXXXXXXXXX");
verify_err(&gl);
}
#[test]
fn build_url_doc() {
let expected_url = "https://gitlab.example.com\
/api/v3/groups?order_by=path&private_token=XXXXXXXXXXXXXXXXXXXX";
let gl = GitLab::new("gitlab.example.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
let url = gl.build_url("groups?order_by=path").unwrap();
assert_eq!(url, expected_url);
}
#[test]
fn build_url_pagination() {
let expected_url = "https://gitlab.example.com\
/api/v3/groups?order_by=path&\
private_token=XXXXXXXXXXXXXXXXXXXX&page=2&per_page=5";
let mut gl = GitLab::new("gitlab.example.com", "XXXXXXXXXXXXXXXXXXXX").unwrap();
gl.set_pagination(Pagination {page: 2, per_page: 5});
let url = gl.build_url("groups?order_by=path").unwrap();
assert_eq!(url, expected_url);
}
}
|
use std::fmt;
use std::collections::HashMap;
use std::collections::hashmap::{Occupied, Vacant};
use std::slice::Items;
use super::utils::MultiIter;
/// Trait for converting from RFC822 Header values into
/// Rust types.
pub trait FromHeader {
/// Parse the `value` of the header.
///
/// Returns None if the value failed to be parsed
fn from_header(value: String) -> Option<Self>;
}
impl FromHeader for String {
fn from_header(value: String) -> Option<String> {
Some(value)
}
}
/// Represents an RFC 822 Header
#[deriving(PartialEq, Eq, Clone, Hash)]
pub struct Header {
/// The name of this header
pub name: String,
value: String,
}
impl Header {
/// Creates a new Header for the given `name` and `value`
pub fn new(name: String, value: String) -> Header {
Header {
name: name,
value: value,
}
}
/// Get the value represented by this header, as parsed
/// into whichever type `T`
pub fn get_value<T: FromHeader>(&self) -> Option<T> {
FromHeader::from_header(self.value.clone())
}
}
impl fmt::Show for Header {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}: {}", self.name, self.value)
}
}
/// A collection of Headers
#[deriving(Eq,PartialEq)]
pub struct HeaderMap {
headers: HashMap<String, Vec<Header>>,
}
impl HeaderMap {
pub fn new() -> HeaderMap {
HeaderMap {
headers: HashMap::new()
}
}
/// Adds a header to the collection
pub fn insert(&mut self, header: Header) {
// If the header hashmap already has this header, use that list.
// Otherwise, make a new one.
let header_list = match self.headers.entry(header.name.clone()) {
Vacant(entry) => entry.set(Vec::new()),
Occupied(entry) => entry.into_mut(),
};
// ... and add the new header to it
header_list.push(header);
}
/// Get an Iterator over the collection of headers.
pub fn iter(&self) -> MultiIter<&Header, Items<Header>> {
let mut iters = Vec::new();
for header_list in self.headers.values() {
iters.push(header_list.iter());
}
MultiIter::new(iters)
}
/// Get the last value of the header
pub fn get(&self, name: String) -> Option<&Header> {
match self.headers.find(&name) {
Some(values) => values.last(),
None => None,
}
}
/// Get the last value of the header, as a decoded type.
pub fn get_value<T: FromHeader>(&self, name: String) -> Option<T> {
match self.get(name) {
Some(ref header) => header.get_value(),
None => None,
}
}
pub fn len(&self) -> uint {
self.iter().count()
}
pub fn find(&self, key: &String) -> Option<&Vec<Header>> {
self.headers.find(key)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
static SAMPLE_HEADERS: [(&'static str, &'static str), ..4] = [
("Test", "Value"),
("Test", "Value 2"),
("Test-2", "Value 3"),
("Test-Multiline", "Foo\nBar"),
];
fn make_sample_headers() -> Vec<Header> {
SAMPLE_HEADERS.iter().map(|&(name, value)| {
Header::new(name.to_string(), value.to_string())
}).collect()
}
#[test]
fn test_header_to_string() {
let header = Header::new("Test".to_string(), "Value".to_string());
assert_eq!(header.to_string(), "Test: Value".to_string());
}
#[test]
fn test_string_get_value() {
let header = Header::new("Test".to_string(), "Value".to_string());
let string_value: String = header.get_value().unwrap();
assert_eq!(string_value, "Value".to_string());
}
#[test]
fn test_header_map_len() {
let mut headers = HeaderMap::new();
for (i, header) in make_sample_headers().into_iter().enumerate() {
headers.insert(header);
assert_eq!(headers.len(), i + 1);
}
}
#[test]
fn test_header_map_iter() {
let mut headers = HeaderMap::new();
let mut expected_headers = HashSet::new();
for header in make_sample_headers().into_iter() {
headers.insert(header.clone());
expected_headers.insert(header);
}
let mut count = 0u;
// Ensure all the headers returned are expected
for header in headers.iter() {
assert!(expected_headers.contains(header));
count += 1;
}
// And that there is the right number of them
assert_eq!(count, expected_headers.len());
}
}
Change HeaderMap to maintain ordering of inserted items.
use std::fmt;
use std::slice::Items;
/// Trait for converting from RFC822 Header values into
/// Rust types.
pub trait FromHeader {
/// Parse the `value` of the header.
///
/// Returns None if the value failed to be parsed
fn from_header(value: String) -> Option<Self>;
}
impl FromHeader for String {
fn from_header(value: String) -> Option<String> {
Some(value)
}
}
/// Represents an RFC 822 Header
#[deriving(PartialEq, Eq, Clone, Hash)]
pub struct Header {
/// The name of this header
pub name: String,
value: String,
}
impl Header {
/// Creates a new Header for the given `name` and `value`
pub fn new(name: String, value: String) -> Header {
Header {
name: name,
value: value,
}
}
/// Get the value represented by this header, as parsed
/// into whichever type `T`
pub fn get_value<T: FromHeader>(&self) -> Option<T> {
FromHeader::from_header(self.value.clone())
}
}
impl fmt::Show for Header {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}: {}", self.name, self.value)
}
}
/// A collection of Headers
#[deriving(Eq,PartialEq)]
pub struct HeaderMap {
headers: Vec<Header>,
}
impl HeaderMap {
pub fn new() -> HeaderMap{
HeaderMap {
headers: Vec::new(),
}
}
/// Adds a header to the collection
pub fn insert(&mut self, header: Header) {
self.headers.push(header);
}
/// Get an Iterator over the collection of headers.
pub fn iter(&self) -> Items<Header> {
self.headers.iter()
}
/// Get the last value of the header
pub fn get(&self, name: String) -> Option<&Header> {
self.iter().filter(|h| { h.name == name }).last()
}
/// Get the last value of the header, as a decoded type.
pub fn get_value<T: FromHeader>(&self, name: String) -> Option<T> {
match self.get(name) {
Some(ref header) => header.get_value(),
None => None,
}
}
pub fn len(&self) -> uint {
self.headers.len()
}
pub fn find(&self, key: &String) -> Option<Vec<&Header>> {
let headers: Vec<&Header> = self.iter().filter(|h| { &h.name == key }).collect();
if headers.len() > 0u {
Some(headers)
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
static SAMPLE_HEADERS: [(&'static str, &'static str), ..4] = [
("Test", "Value"),
("Test", "Value 2"),
("Test-2", "Value 3"),
("Test-Multiline", "Foo\nBar"),
];
fn make_sample_headers() -> Vec<Header> {
SAMPLE_HEADERS.iter().map(|&(name, value)| {
Header::new(name.to_string(), value.to_string())
}).collect()
}
#[test]
fn test_header_to_string() {
let header = Header::new("Test".to_string(), "Value".to_string());
assert_eq!(header.to_string(), "Test: Value".to_string());
}
#[test]
fn test_string_get_value() {
let header = Header::new("Test".to_string(), "Value".to_string());
let string_value: String = header.get_value().unwrap();
assert_eq!(string_value, "Value".to_string());
}
#[test]
fn test_header_map_len() {
let mut headers = HeaderMap::new();
for (i, header) in make_sample_headers().into_iter().enumerate() {
headers.insert(header);
assert_eq!(headers.len(), i + 1);
}
}
#[test]
fn test_header_map_iter() {
let mut headers = HeaderMap::new();
let mut expected_headers = HashSet::new();
for header in make_sample_headers().into_iter() {
headers.insert(header.clone());
expected_headers.insert(header);
}
let mut count = 0u;
// Ensure all the headers returned are expected
for header in headers.iter() {
assert!(expected_headers.contains(header));
count += 1;
}
// And that there is the right number of them
assert_eq!(count, expected_headers.len());
}
}
|
use std::fmt;
use std::slice::Items;
/// Trait for converting from RFC822 Header values into
/// Rust types.
pub trait FromHeader {
/// Parse the `value` of the header.
///
/// Returns None if the value failed to be parsed
fn from_header(value: String) -> Option<Self>;
}
/// Trait for converting from a Rust type into a Header value.
pub trait ToHeader {
/// Turn the `value` into a String suitable for being used in
/// a message header.
///
/// Returns None if the value cannot be stringified.
fn to_header(value: Self) -> Option<String>;
}
/// Trait for converting from a Rust time into a Header value
/// that handles its own folding.
///
/// Be mindful that this trait does not mean that the value will
/// not be folded later, rather that the type returns a value that
/// should not be folded, given that the header value starts so far
/// in to a line.
pub trait ToFoldedHeader {
fn to_folded_header(start_pos: uint, value: Self) -> Option<String>;
}
impl<T: ToHeader> ToFoldedHeader for T {
fn to_folded_header(_: uint, value: T) -> Option<String> {
// We ignore the start_position because the thing will fold anyway.
ToHeader::to_header(value)
}
}
impl FromHeader for String {
fn from_header(value: String) -> Option<String> {
Some(value)
}
}
impl ToHeader for String {
fn to_header(value: String) -> Option<String> {
Some(value)
}
}
impl<'a> ToHeader for &'a str {
fn to_header(value: &'a str) -> Option<String> {
Some(value.to_string())
}
}
/// Represents an RFC 822 Header
#[deriving(PartialEq, Eq, Clone, Hash)]
pub struct Header {
/// The name of this header
pub name: String,
value: String,
}
impl Header {
/// Creates a new Header for the given `name` and `value`
pub fn new(name: String, value: String) -> Header {
Header {
name: name,
value: value,
}
}
/// Creates a new Header for the given `name` and `value`,
/// as converted through the `ToHeader` or `ToFoldedHeader` trait.
///
/// Returns None if the value failed to be converted.
pub fn new_with_value<T: ToFoldedHeader>(name: String, value: T) -> Option<Header> {
let header_len = name.len() + 2;
ToFoldedHeader::to_folded_header(header_len, value).map(|val| { Header::new(name.clone(), val) })
}
/// Get the value represented by this header, as parsed
/// into whichever type `T`
pub fn get_value<T: FromHeader>(&self) -> Option<T> {
FromHeader::from_header(self.value.clone())
}
}
impl fmt::Show for Header {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}: {}", self.name, self.value)
}
}
/// A collection of Headers
#[deriving(Eq,PartialEq)]
pub struct HeaderMap {
headers: Vec<Header>,
}
impl HeaderMap {
pub fn new() -> HeaderMap{
HeaderMap {
headers: Vec::new(),
}
}
/// Adds a header to the collection
pub fn insert(&mut self, header: Header) {
self.headers.push(header);
}
/// Get an Iterator over the collection of headers.
pub fn iter(&self) -> Items<Header> {
self.headers.iter()
}
/// Get the last value of the header
pub fn get(&self, name: String) -> Option<&Header> {
self.iter().filter(|h| { h.name == name }).last()
}
/// Get the last value of the header, as a decoded type.
pub fn get_value<T: FromHeader>(&self, name: String) -> Option<T> {
match self.get(name) {
Some(ref header) => header.get_value(),
None => None,
}
}
pub fn len(&self) -> uint {
self.headers.len()
}
pub fn find(&self, key: &String) -> Option<Vec<&Header>> {
let headers: Vec<&Header> = self.iter().filter(|h| { &h.name == key }).collect();
if headers.len() > 0u {
Some(headers)
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
static SAMPLE_HEADERS: [(&'static str, &'static str), ..4] = [
("Test", "Value"),
("Test", "Value 2"),
("Test-2", "Value 3"),
("Test-Multiline", "Foo\nBar"),
];
fn make_sample_headers() -> Vec<Header> {
SAMPLE_HEADERS.iter().map(|&(name, value)| {
Header::new(name.to_string(), value.to_string())
}).collect()
}
#[test]
fn test_header_to_string() {
let header = Header::new("Test".to_string(), "Value".to_string());
assert_eq!(header.to_string(), "Test: Value".to_string());
}
#[test]
fn test_string_get_value() {
let header = Header::new("Test".to_string(), "Value".to_string());
let string_value: String = header.get_value().unwrap();
assert_eq!(string_value, "Value".to_string());
}
#[test]
fn test_to_header_string() {
let header = Header::new_with_value("Test".to_string(), "Value".to_string()).unwrap();
let header_value = header.get_value::<String>().unwrap();
assert_eq!(header_value, "Value".to_string());
}
#[test]
fn test_to_header_str() {
let header = Header::new_with_value("Test".to_string(), "Value").unwrap();
let header_value = header.get_value::<String>().unwrap();
assert_eq!(header_value, "Value".to_string());
}
#[test]
fn test_header_map_len() {
let mut headers = HeaderMap::new();
for (i, header) in make_sample_headers().into_iter().enumerate() {
headers.insert(header);
assert_eq!(headers.len(), i + 1);
}
}
#[test]
fn test_header_map_iter() {
let mut headers = HeaderMap::new();
let mut expected_headers = HashSet::new();
for header in make_sample_headers().into_iter() {
headers.insert(header.clone());
expected_headers.insert(header);
}
let mut count = 0u;
// Ensure all the headers returned are expected
for header in headers.iter() {
assert!(expected_headers.contains(header));
count += 1;
}
// And that there is the right number of them
assert_eq!(count, expected_headers.len());
}
}
Support decoding of RFC2047 parts within headers.
use std::fmt;
use std::slice::Items;
use super::rfc2047::decode_rfc2047;
/// Trait for converting from RFC822 Header values into
/// Rust types.
pub trait FromHeader {
/// Parse the `value` of the header.
///
/// Returns None if the value failed to be parsed
fn from_header(value: String) -> Option<Self>;
}
/// Trait for converting from a Rust type into a Header value.
pub trait ToHeader {
/// Turn the `value` into a String suitable for being used in
/// a message header.
///
/// Returns None if the value cannot be stringified.
fn to_header(value: Self) -> Option<String>;
}
/// Trait for converting from a Rust time into a Header value
/// that handles its own folding.
///
/// Be mindful that this trait does not mean that the value will
/// not be folded later, rather that the type returns a value that
/// should not be folded, given that the header value starts so far
/// in to a line.
pub trait ToFoldedHeader {
fn to_folded_header(start_pos: uint, value: Self) -> Option<String>;
}
impl<T: ToHeader> ToFoldedHeader for T {
fn to_folded_header(_: uint, value: T) -> Option<String> {
// We ignore the start_position because the thing will fold anyway.
ToHeader::to_header(value)
}
}
impl FromHeader for String {
fn from_header(value: String) -> Option<String> {
#[deriving(Show)]
enum ParseState {
Normal(uint),
SeenEquals(uint),
SeenQuestion(uint, uint),
}
let mut state = ParseState::Normal(0u);
let mut decoded = String::new();
let mut pos = 0u;
let value_slice = value.as_slice();
while pos < value.len() {
let ch_range = value_slice.char_range_at(pos);
let c = ch_range.ch;
state = match (state, c) {
(ParseState::SeenQuestion(start_pos, 4), '=') => {
// Go to decode if we've seen enough ?
let part_decoded = decode_rfc2047(value_slice.slice(start_pos, ch_range.next));
let to_push = match part_decoded {
Some(ref s) => s.as_slice(),
// Decoding failed, push the undecoded string in.
None => value_slice.slice(start_pos, pos),
};
decoded.push_str(to_push);
// Revert us to normal state, but starting at the next character.
ParseState::Normal(ch_range.next)
},
(ParseState::SeenQuestion(start_pos, count), '?') => {
ParseState::SeenQuestion(start_pos, count + 1)
},
(ParseState::SeenQuestion(start_pos, count), _) => {
if count > 4 {
// This isn't a RFC2047 sequence, so go back to a normal.
ParseState::Normal(start_pos)
} else {
state
}
}
(ParseState::SeenEquals(start_pos), '?') => {
ParseState::SeenQuestion(start_pos, 1)
},
(ParseState::SeenEquals(start_pos), _) => {
// This isn't a RFC2047 sequence, so go back to a normal.
ParseState::Normal(start_pos)
}
(ParseState::Normal(start_pos), '=') => {
if start_pos != pos {
// Push all up to the =, if there is stuff to push.
decoded.push_str(value_slice.slice(start_pos, pos));
}
ParseState::SeenEquals(pos)
},
(ParseState::Normal(_), _) => state,
};
pos = ch_range.next;
}
// Don't forget to push on whatever we have left
let last_start = match state {
ParseState::Normal(start_pos) => start_pos,
ParseState::SeenEquals(start_pos) => start_pos,
ParseState::SeenQuestion(start_pos, _) => start_pos,
};
decoded.push_str(value_slice.slice_from(last_start));
Some(decoded)
}
}
impl ToHeader for String {
fn to_header(value: String) -> Option<String> {
Some(value)
}
}
impl<'a> ToHeader for &'a str {
fn to_header(value: &'a str) -> Option<String> {
Some(value.to_string())
}
}
/// Represents an RFC 822 Header
#[deriving(PartialEq, Eq, Clone, Hash)]
pub struct Header {
/// The name of this header
pub name: String,
value: String,
}
impl Header {
/// Creates a new Header for the given `name` and `value`
pub fn new(name: String, value: String) -> Header {
Header {
name: name,
value: value,
}
}
/// Creates a new Header for the given `name` and `value`,
/// as converted through the `ToHeader` or `ToFoldedHeader` trait.
///
/// Returns None if the value failed to be converted.
pub fn new_with_value<T: ToFoldedHeader>(name: String, value: T) -> Option<Header> {
let header_len = name.len() + 2;
ToFoldedHeader::to_folded_header(header_len, value).map(|val| { Header::new(name.clone(), val) })
}
/// Get the value represented by this header, as parsed
/// into whichever type `T`
pub fn get_value<T: FromHeader>(&self) -> Option<T> {
FromHeader::from_header(self.value.clone())
}
}
impl fmt::Show for Header {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}: {}", self.name, self.value)
}
}
/// A collection of Headers
#[deriving(Eq,PartialEq)]
pub struct HeaderMap {
headers: Vec<Header>,
}
impl HeaderMap {
pub fn new() -> HeaderMap{
HeaderMap {
headers: Vec::new(),
}
}
/// Adds a header to the collection
pub fn insert(&mut self, header: Header) {
self.headers.push(header);
}
/// Get an Iterator over the collection of headers.
pub fn iter(&self) -> Items<Header> {
self.headers.iter()
}
/// Get the last value of the header
pub fn get(&self, name: String) -> Option<&Header> {
self.iter().filter(|h| { h.name == name }).last()
}
/// Get the last value of the header, as a decoded type.
pub fn get_value<T: FromHeader>(&self, name: String) -> Option<T> {
match self.get(name) {
Some(ref header) => header.get_value(),
None => None,
}
}
pub fn len(&self) -> uint {
self.headers.len()
}
pub fn find(&self, key: &String) -> Option<Vec<&Header>> {
let headers: Vec<&Header> = self.iter().filter(|h| { &h.name == key }).collect();
if headers.len() > 0u {
Some(headers)
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
static SAMPLE_HEADERS: [(&'static str, &'static str), ..4] = [
("Test", "Value"),
("Test", "Value 2"),
("Test-2", "Value 3"),
("Test-Multiline", "Foo\nBar"),
];
fn make_sample_headers() -> Vec<Header> {
SAMPLE_HEADERS.iter().map(|&(name, value)| {
Header::new(name.to_string(), value.to_string())
}).collect()
}
#[test]
fn test_header_to_string() {
let header = Header::new("Test".to_string(), "Value".to_string());
assert_eq!(header.to_string(), "Test: Value".to_string());
}
#[test]
fn test_string_get_value() {
struct HeaderTest<'s> {
input: &'s str,
result: Option<&'s str>,
}
let tests = vec![
HeaderTest {
input: "Value",
result: Some("Value"),
},
HeaderTest {
input: "=?ISO-8859-1?Q?Test=20text?=",
result: Some("Test text"),
},
HeaderTest {
input: "=?ISO-8859-1?Q?Multiple?= =?utf-8?b?ZW5jb2Rpbmdz?=",
result: Some("Multiple encodings"),
},
HeaderTest {
input: "Some things with =?utf-8?b?ZW5jb2Rpbmdz?=, other things without.",
result: Some("Some things with encodings, other things without."),
},
HeaderTest {
input: "Encoding =?utf-8?q?fail",
result: Some("Encoding =?utf-8?q?fail"),
},
];
for test in tests.into_iter() {
let header = Header::new("Test".to_string(), test.input.to_string());
let string_value = header.get_value::<String>();
assert_eq!(string_value, test.result.map(|s| { s.to_string() }));
}
}
#[test]
fn test_to_header_string() {
let header = Header::new_with_value("Test".to_string(), "Value".to_string()).unwrap();
let header_value = header.get_value::<String>().unwrap();
assert_eq!(header_value, "Value".to_string());
}
#[test]
fn test_to_header_str() {
let header = Header::new_with_value("Test".to_string(), "Value").unwrap();
let header_value = header.get_value::<String>().unwrap();
assert_eq!(header_value, "Value".to_string());
}
#[test]
fn test_header_map_len() {
let mut headers = HeaderMap::new();
for (i, header) in make_sample_headers().into_iter().enumerate() {
headers.insert(header);
assert_eq!(headers.len(), i + 1);
}
}
#[test]
fn test_header_map_iter() {
let mut headers = HeaderMap::new();
let mut expected_headers = HashSet::new();
for header in make_sample_headers().into_iter() {
headers.insert(header.clone());
expected_headers.insert(header);
}
let mut count = 0u;
// Ensure all the headers returned are expected
for header in headers.iter() {
assert!(expected_headers.contains(header));
count += 1;
}
// And that there is the right number of them
assert_eq!(count, expected_headers.len());
}
}
|
use std::marker::PhantomData;
use std::error;
use std::fmt;
use std::mem;
use std::ptr;
use std::sync::Arc;
use buffer::Buffer;
use buffer::BufferSlice;
use format::StrongStorage;
use Error;
use OomError;
use VulkanObject;
use VulkanPointers;
use check_errors;
use vk;
/// Represents a way for the GPU to interpret buffer data.
///
/// Note that a buffer view is only required for some operations. For example using a buffer as a
/// uniform buffer doesn't require creating a `BufferView`.
pub struct BufferView<F, B> where B: Buffer {
view: vk::BufferView,
buffer: Arc<B>,
marker: PhantomData<F>,
}
impl<F, B> BufferView<F, B> where B: Buffer {
/// Builds a new buffer view.
///
/// The format of the view will be automatically determined by the `T` parameter.
///
/// The buffer must have been created with either the `uniform_texel_buffer` or
/// the `storage_texel_buffer` usage or an error will occur.
pub fn new<'a, S>(buffer: S, format: F)
-> Result<Arc<BufferView<F, B>>, BufferViewCreationError>
where S: Into<BufferSlice<'a, [F::Pixel], B>>, B: 'static, F: StrongStorage + 'static
{
let buffer = buffer.into();
let device = buffer.resource.inner_buffer().device();
let format = format.format();
if !buffer.buffer().inner_buffer().usage_uniform_texel_buffer() &&
!buffer.buffer().inner_buffer().usage_storage_texel_buffer()
{
return Err(BufferViewCreationError::WrongBufferUsage);
}
// TODO: check that format is supported? or check only when the view is used?
let infos = vk::BufferViewCreateInfo {
sType: vk::STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved,
buffer: buffer.resource.inner_buffer().internal_object(),
format: format as u32,
offset: buffer.offset as u64,
range: buffer.size as u64,
};
let view = unsafe {
let vk = device.pointers();
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateBufferView(device.internal_object(), &infos,
ptr::null(), &mut output)));
output
};
Ok(Arc::new(BufferView {
view: view,
buffer: buffer.resource.clone(),
marker: PhantomData,
}))
}
}
unsafe impl<F, B> VulkanObject for BufferView<F, B> where B: Buffer {
type Object = vk::BufferView;
#[inline]
fn internal_object(&self) -> vk::BufferView {
self.view
}
}
impl<F, B> Drop for BufferView<F, B> where B: Buffer {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.buffer.inner_buffer().device().pointers();
vk.DestroyBufferView(self.buffer.inner_buffer().device().internal_object(), self.view,
ptr::null());
}
}
}
/// Error that can happen when creating a buffer view.
#[derive(Debug, Copy, Clone)]
pub enum BufferViewCreationError {
/// Out of memory.
OomError(OomError),
/// The buffer was not creating with one of the `storage_texel_buffer` or
/// `uniform_texel_buffer` usages.
WrongBufferUsage,
}
impl error::Error for BufferViewCreationError {
#[inline]
fn description(&self) -> &str {
match *self {
BufferViewCreationError::OomError(_) => "out of memory when creating buffer view",
BufferViewCreationError::WrongBufferUsage => "the buffer is missing correct usage \
flags",
}
}
#[inline]
fn cause(&self) -> Option<&error::Error> {
match *self {
BufferViewCreationError::OomError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for BufferViewCreationError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}
impl From<OomError> for BufferViewCreationError {
#[inline]
fn from(err: OomError) -> BufferViewCreationError {
BufferViewCreationError::OomError(err)
}
}
impl From<Error> for BufferViewCreationError {
#[inline]
fn from(err: Error) -> BufferViewCreationError {
OomError::from(err).into()
}
}
#[cfg(test)]
mod tests {
use buffer::Buffer;
use buffer::BufferView;
use buffer::sys::Usage;
use buffer::view::BufferViewCreationError;
use buffer::immutable::ImmutableBuffer;
use format;
#[test]
fn create_uniform() {
let (device, queue) = gfx_dev_and_queue!();
let usage = Usage {
uniform_texel_buffer: true,
.. Usage::none()
};
let buffer = ImmutableBuffer::<[i8]>::array(&device, 128, &usage,
Some(queue.family())).unwrap();
let _ = BufferView::new(&buffer, format::R8Sscaled).unwrap();
}
#[test]
fn create_storage() {
let (device, queue) = gfx_dev_and_queue!();
let usage = Usage {
storage_texel_buffer: true,
.. Usage::none()
};
let buffer = ImmutableBuffer::<[i8]>::array(&device, 128, &usage,
Some(queue.family())).unwrap();
let _ = BufferView::new(&buffer, format::R8Sscaled).unwrap();
}
/*#[test]
fn wrong_usage() {
let (device, queue) = gfx_dev_and_queue!();
let buffer = Buffer::<[i8], _>::array(&device, 128, &Usage::none(), DeviceLocal,
&queue).unwrap();
match BufferView::new(&buffer) {
Err(BufferViewCreationError::WrongBufferUsage) => (),
_ => panic!()
}
}*/
}
Check the format of buffer views
use std::marker::PhantomData;
use std::error;
use std::fmt;
use std::mem;
use std::ptr;
use std::sync::Arc;
use buffer::Buffer;
use buffer::BufferSlice;
use format::StrongStorage;
use Error;
use OomError;
use VulkanObject;
use VulkanPointers;
use check_errors;
use vk;
/// Represents a way for the GPU to interpret buffer data.
///
/// Note that a buffer view is only required for some operations. For example using a buffer as a
/// uniform buffer doesn't require creating a `BufferView`.
pub struct BufferView<F, B> where B: Buffer {
view: vk::BufferView,
buffer: Arc<B>,
marker: PhantomData<F>,
atomic_accesses: bool,
}
impl<F, B> BufferView<F, B> where B: Buffer {
/// Builds a new buffer view.
///
/// The format of the view will be automatically determined by the `T` parameter.
///
/// The buffer must have been created with either the `uniform_texel_buffer` or
/// the `storage_texel_buffer` usage or an error will occur.
pub fn new<'a, S>(buffer: S, format: F)
-> Result<Arc<BufferView<F, B>>, BufferViewCreationError>
where S: Into<BufferSlice<'a, [F::Pixel], B>>, B: 'static, F: StrongStorage + 'static
{
let buffer = buffer.into();
let device = buffer.resource.inner_buffer().device();
let format = format.format();
if !buffer.buffer().inner_buffer().usage_uniform_texel_buffer() &&
!buffer.buffer().inner_buffer().usage_storage_texel_buffer()
{
return Err(BufferViewCreationError::WrongBufferUsage);
}
let format_props = unsafe {
let vk_i = device.instance().pointers();
let mut output = mem::uninitialized();
vk_i.GetPhysicalDeviceFormatProperties(device.physical_device().internal_object(),
format as u32, &mut output);
output.bufferFeatures
};
if buffer.buffer().inner_buffer().usage_uniform_texel_buffer() {
if (format_props & vk::FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) == 0 {
return Err(BufferViewCreationError::UnsupportedFormat);
}
}
if buffer.buffer().inner_buffer().usage_storage_texel_buffer() {
if (format_props & vk::FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) == 0 {
return Err(BufferViewCreationError::UnsupportedFormat);
}
}
let infos = vk::BufferViewCreateInfo {
sType: vk::STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved,
buffer: buffer.resource.inner_buffer().internal_object(),
format: format as u32,
offset: buffer.offset as u64,
range: buffer.size as u64,
};
let view = unsafe {
let vk = device.pointers();
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateBufferView(device.internal_object(), &infos,
ptr::null(), &mut output)));
output
};
Ok(Arc::new(BufferView {
view: view,
buffer: buffer.resource.clone(),
marker: PhantomData,
atomic_accesses: (format_props &
vk::FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT) != 0,
}))
}
}
unsafe impl<F, B> VulkanObject for BufferView<F, B> where B: Buffer {
type Object = vk::BufferView;
#[inline]
fn internal_object(&self) -> vk::BufferView {
self.view
}
}
impl<F, B> Drop for BufferView<F, B> where B: Buffer {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.buffer.inner_buffer().device().pointers();
vk.DestroyBufferView(self.buffer.inner_buffer().device().internal_object(), self.view,
ptr::null());
}
}
}
/// Error that can happen when creating a buffer view.
#[derive(Debug, Copy, Clone)]
pub enum BufferViewCreationError {
/// Out of memory.
OomError(OomError),
/// The buffer was not creating with one of the `storage_texel_buffer` or
/// `uniform_texel_buffer` usages.
WrongBufferUsage,
/// The requested format is not supported for this usage.
UnsupportedFormat,
}
impl error::Error for BufferViewCreationError {
#[inline]
fn description(&self) -> &str {
match *self {
BufferViewCreationError::OomError(_) => "out of memory when creating buffer view",
BufferViewCreationError::WrongBufferUsage => "the buffer is missing correct usage \
flags",
BufferViewCreationError::UnsupportedFormat => "the requested format is not supported \
for this usage",
}
}
#[inline]
fn cause(&self) -> Option<&error::Error> {
match *self {
BufferViewCreationError::OomError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for BufferViewCreationError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}
impl From<OomError> for BufferViewCreationError {
#[inline]
fn from(err: OomError) -> BufferViewCreationError {
BufferViewCreationError::OomError(err)
}
}
impl From<Error> for BufferViewCreationError {
#[inline]
fn from(err: Error) -> BufferViewCreationError {
OomError::from(err).into()
}
}
#[cfg(test)]
mod tests {
use buffer::BufferView;
use buffer::sys::Usage;
use buffer::view::BufferViewCreationError;
use buffer::immutable::ImmutableBuffer;
use format;
#[test]
fn create_uniform() {
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
let (device, queue) = gfx_dev_and_queue!();
let usage = Usage {
uniform_texel_buffer: true,
.. Usage::none()
};
let buffer = ImmutableBuffer::<[[u8; 4]]>::array(&device, 128, &usage,
Some(queue.family())).unwrap();
let _ = BufferView::new(&buffer, format::R8G8B8A8Unorm).unwrap();
}
#[test]
fn create_storage() {
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
let (device, queue) = gfx_dev_and_queue!();
let usage = Usage {
storage_texel_buffer: true,
.. Usage::none()
};
let buffer = ImmutableBuffer::<[[u8; 4]]>::array(&device, 128, &usage,
Some(queue.family())).unwrap();
let _ = BufferView::new(&buffer, format::R8G8B8A8Unorm).unwrap();
}
#[test]
fn wrong_usage() {
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
let (device, queue) = gfx_dev_and_queue!();
let buffer = ImmutableBuffer::<[[u8; 4]]>::array(&device, 128, &Usage::none(),
Some(queue.family())).unwrap();
match BufferView::new(&buffer, format::R8G8B8A8Unorm) {
Err(BufferViewCreationError::WrongBufferUsage) => (),
_ => panic!()
}
}
#[test]
fn unsupported_format() {
let (device, queue) = gfx_dev_and_queue!();
let usage = Usage {
uniform_texel_buffer: true,
storage_texel_buffer: true,
.. Usage::none()
};
let buffer = ImmutableBuffer::<[[f64; 4]]>::array(&device, 128, &usage,
Some(queue.family())).unwrap();
// TODO: what if R64G64B64A64Sfloat is supported?
match BufferView::new(&buffer, format::R64G64B64A64Sfloat) {
Err(BufferViewCreationError::UnsupportedFormat) => (),
_ => panic!()
}
}
}
|
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_upper_case_globals)]
use base::{ObjCMethodCall, id, SEL, nil, NSInteger, NSUInteger};
use libc;
#[cfg(target_word_size = "32")]
pub type CGFloat = f32;
#[cfg(target_word_size = "64")]
pub type CGFloat = f64;
#[repr(C)]
pub struct NSPoint {
pub x: f64,
pub y: f64,
}
impl NSPoint {
#[inline]
pub fn new(x: f64, y: f64) -> NSPoint {
NSPoint {
x: x,
y: y,
}
}
}
#[repr(C)]
pub struct NSSize {
pub width: f64,
pub height: f64,
}
impl NSSize {
#[inline]
pub fn new(width: f64, height: f64) -> NSSize {
NSSize {
width: width,
height: height,
}
}
}
#[repr(C)]
pub struct NSRect {
pub origin: NSPoint,
pub size: NSSize,
}
impl NSRect {
#[inline]
pub fn new(origin: NSPoint, size: NSSize) -> NSRect {
NSRect {
origin: origin,
size: size
}
}
}
#[link(name = "AppKit", kind = "framework")]
extern {}
pub unsafe fn NSApp() -> id {
"NSApplication".send("sharedApplication", ())
}
#[repr(i64)]
pub enum NSApplicationActivationPolicy {
NSApplicationActivationPolicyRegular = 0,
NSApplicationActivationPolicyERROR = -1
}
#[repr(u64)]
pub enum NSWindowMask {
NSBorderlessWindowMask = 0,
NSTitledWindowMask = 1 << 0,
NSClosableWindowMask = 1 << 1,
NSMiniaturizableWindowMask = 1 << 2,
NSResizableWindowMask = 1 << 3,
NSTexturedBackgroundWindowMask = 1 << 8,
NSUnifiedTitleAndToolbarWindowMask = 1 << 12,
NSFullScreenWindowMask = 1 << 14
}
#[repr(u64)]
pub enum NSBackingStoreType {
NSBackingStoreRetained = 0,
NSBackingStoreNonretained = 1,
NSBackingStoreBuffered = 2
}
bitflags! {
flags NSWindowOrderingMode: NSInteger {
const NSWindowAbove = 1,
const NSWindowBelow = -1,
const NSWindowOut = 0,
}
}
bitflags! {
flags NSAlignmentOptions: libc::c_ulonglong {
const NSAlignMinXInward = 1 << 0,
const NSAlignMinYInward = 1 << 1,
const NSAlignMaxXInward = 1 << 2,
const NSAlignMaxYInward = 1 << 3,
const NSAlignWidthInward = 1 << 4,
const NSAlignHeightInward = 1 << 5,
const NSAlignMinXOutward = 1 << 8,
const NSAlignMinYOutward = 1 << 9,
const NSAlignMaxXOutward = 1 << 10,
const NSAlignMaxYOutward = 1 << 11,
const NSAlignWidthOutward = 1 << 12,
const NSAlignHeightOutward = 1 << 13,
const NSAlignMinXNearest = 1 << 16,
const NSAlignMinYNearest = 1 << 17,
const NSAlignMaxXNearest = 1 << 18,
const NSAlignMaxYNearest = 1 << 19,
const NSAlignWidthNearest = 1 << 20,
const NSAlignHeightNearest = 1 << 21,
const NSAlignRectFlipped = 1 << 63,
const NSAlignAllEdgesInward = NSAlignMinXInward.bits
| NSAlignMaxXInward.bits
| NSAlignMinYInward.bits
| NSAlignMaxYInward.bits,
const NSAlignAllEdgesOutward = NSAlignMinXOutward.bits
| NSAlignMaxXOutward.bits
| NSAlignMinYOutward.bits
| NSAlignMaxYOutward.bits,
const NSAlignAllEdgesNearest = NSAlignMinXNearest.bits
| NSAlignMaxXNearest.bits
| NSAlignMinYNearest.bits
| NSAlignMaxYNearest.bits,
}
}
pub trait NSAutoreleasePool {
unsafe fn new(_: Self) -> id {
"NSAutoreleasePool".send("new", ())
}
unsafe fn autorelease(self) -> Self;
}
impl NSAutoreleasePool for id {
unsafe fn autorelease(self) -> id {
self.send("autorelease", ())
}
}
pub trait NSProcessInfo {
unsafe fn processInfo(_: Self) -> id {
"NSProcessInfo".send("processInfo", ())
}
unsafe fn processName(self) -> id;
}
impl NSProcessInfo for id {
unsafe fn processName(self) -> id {
self.send("processName", ())
}
}
pub trait NSApplication {
unsafe fn sharedApplication(_: Self) -> id {
"NSApplication".send("sharedApplication", ())
}
unsafe fn setActivationPolicy_(self, policy: NSApplicationActivationPolicy) -> bool;
unsafe fn setMainMenu_(self, menu: id);
unsafe fn activateIgnoringOtherApps_(self, ignore: bool);
unsafe fn run(self);
}
impl NSApplication for id {
unsafe fn setActivationPolicy_(self, policy: NSApplicationActivationPolicy) -> bool {
self.send_bool("setActivationPolicy:", policy as NSInteger)
}
unsafe fn setMainMenu_(self, menu: id) {
self.send_void("setMainMenu:", menu)
}
unsafe fn activateIgnoringOtherApps_(self, ignore: bool) {
self.send_void("activateIgnoringOtherApps:", ignore);
}
unsafe fn run(self) {
self.send_void("run", ());
}
}
pub trait NSMenu {
unsafe fn new(_: Self) -> id {
"NSMenu".send("new", ())
}
unsafe fn addItem_(self, menu_item: id);
}
impl NSMenu for id {
unsafe fn addItem_(self, menu_item: id) {
self.send_void("addItem:", menu_item)
}
}
pub trait NSMenuItem {
unsafe fn alloc(_: Self) -> id {
"NSMenuItem".send("alloc", ())
}
unsafe fn new(_: Self) -> id {
"NSMenuItem".send("new", ())
}
unsafe fn initWithTitle_action_keyEquivalent_(self, title: id, action: SEL, key: id) -> id;
unsafe fn setSubmenu_(self, submenu: id);
}
impl NSMenuItem for id {
unsafe fn initWithTitle_action_keyEquivalent_(self, title: id, action: SEL, key: id) -> id {
self.send("initWithTitle:action:keyEquivalent:", (title, action, key))
}
unsafe fn setSubmenu_(self, submenu: id) {
self.send_void("setSubmenu:", submenu)
}
}
pub trait NSWindow {
unsafe fn alloc(_: Self) -> id {
"NSWindow".send("alloc", ())
}
unsafe fn initWithContentRect_styleMask_backing_defer_(self,
rect: NSRect,
style: NSUInteger,
backing: NSBackingStoreType,
defer: bool) -> id;
unsafe fn makeKeyAndOrderFront_(self, sender: id);
// Sizing Windows
unsafe fn frame(self) -> NSRect;
unsafe fn setFrameOrigin_(self, point: NSPoint);
unsafe fn setFrameTopLeftPoint_(self, point: NSPoint);
// skipped: constrainFrameRect_toScreen_
unsafe fn cascadeTopLeftFromPoint_(self, topLeft: NSPoint) -> NSPoint;
unsafe fn setFrame_displayViews_(self, windowFrame: NSRect, display: bool);
unsafe fn aspectRatio(self) -> NSSize;
unsafe fn setAspectRatio_(self, aspectRatio: NSSize);
unsafe fn minSize(self) -> NSSize;
unsafe fn setMinSize_(self, minSize: NSSize);
unsafe fn maxSize(self) -> NSSize;
unsafe fn setMaxSize_(self, maxSize: NSSize);
unsafe fn performZoom_(self, sender: id);
unsafe fn zoom_(self, sender: id);
// skipped: resizeFlags
unsafe fn showsResizeIndicator(self) -> bool;
unsafe fn setShowsResizeIndicator_(self, showsResizeIndicator: bool);
unsafe fn resizeIncrements(self) -> NSSize;
unsafe fn setResizeIncrements_(self, resizeIncrements: NSSize);
unsafe fn preservesContentDuringLiveResize(self) -> bool;
unsafe fn setPreservesContentDuringLiveResize_(self, preservesContentDuringLiveResize: bool);
unsafe fn inLiveResize(self) -> bool;
// Managing Window Layers
unsafe fn orderOut_(self, sender: id);
unsafe fn orderBack_(self, sender: id);
unsafe fn orderFront_(self, sender: id);
unsafe fn orderFrontRegardless(self);
unsafe fn orderFrontWindow_relativeTo_(self, orderingMode: NSWindowOrderingMode, otherWindowNumber: NSInteger);
unsafe fn level(self) -> NSInteger;
unsafe fn setLevel_(self, level: NSInteger);
// Converting Coordinates
unsafe fn backingScaleFactor(self) -> CGFloat;
unsafe fn backingAlignedRect_options_(self, rect: NSRect, options: NSAlignmentOptions) -> NSRect;
unsafe fn convertRectFromBacking_(self, rect: NSRect) -> NSRect;
unsafe fn convertRectToBacking_(self, rect: NSRect) -> NSRect;
unsafe fn convertRectToScreen_(self, rect: NSRect) -> NSRect;
unsafe fn convertRectFromScreen_(self, rect: NSRect) -> NSRect;
// Managing Titles
unsafe fn title(self) -> id;
unsafe fn setTitle_(self, title: id);
unsafe fn setTitleWithRepresentedFilename_(self, filePath: id);
unsafe fn representedFilename(self) -> id;
unsafe fn setRepresentedFilename_(self, filePath: id);
// skipped: representedURL
// skipped: setRepresentedURL_
// Moving Windows
unsafe fn setMovableByWindowBackground_(self, movableByWindowBackground: bool);
unsafe fn setMovable_(self, movable: bool);
unsafe fn center(self);
// Closing Windows
unsafe fn performClose_(self, sender: id);
unsafe fn close(self);
unsafe fn setReleasedWhenClosed_(self, releasedWhenClosed: bool);
}
impl NSWindow for id {
unsafe fn initWithContentRect_styleMask_backing_defer_(self,
rect: NSRect,
style: NSUInteger,
backing: NSBackingStoreType,
defer: bool) -> id {
self.send("initWithContentRect:styleMask:backing:defer:",
(rect, style, backing as NSUInteger, defer))
}
unsafe fn makeKeyAndOrderFront_(self, sender: id) {
self.send_void("makeKeyAndOrderFront:", sender)
}
// Sizing Windows
unsafe fn frame(self) -> NSRect {
self.send_rect("frame", ())
}
unsafe fn setFrameOrigin_(self, point: NSPoint) {
self.send_void("setFrameOrigin:", point);
}
unsafe fn setFrameTopLeftPoint_(self, point: NSPoint) {
self.send_void("setFrameTopLeftPoint:", point);
}
unsafe fn cascadeTopLeftFromPoint_(self, topLeft: NSPoint) -> NSPoint {
self.send_point("cascadeTopLeftFromPoint:", topLeft)
}
unsafe fn setFrame_displayViews_(self, windowFrame: NSRect, display: bool) {
self.send_void("setFrame:displayViews:", (windowFrame, display));
}
unsafe fn aspectRatio(self) -> NSSize {
self.send_size("aspectRatio", ())
}
unsafe fn setAspectRatio_(self, aspectRatio: NSSize) {
self.send_void("setAspectRatio:", aspectRatio);
}
unsafe fn minSize(self) -> NSSize {
self.send_size("minSize", ())
}
unsafe fn setMinSize_(self, minSize: NSSize) {
self.send_void("setMinSize:", minSize);
}
unsafe fn maxSize(self) -> NSSize {
self.send_size("maxSize", ())
}
unsafe fn setMaxSize_(self, maxSize: NSSize) {
self.send_void("setMaxSize:", maxSize);
}
unsafe fn performZoom_(self, sender: id) {
self.send_void("performZoom:", sender);
}
unsafe fn zoom_(self, sender: id) {
self.send_void("zoom:", sender);
}
unsafe fn showsResizeIndicator(self) -> bool {
self.send_bool("showsResizeIndicator", ())
}
unsafe fn setShowsResizeIndicator_(self, showsResizeIndicator: bool) {
self.send_void("setShowsResizeIndicator:", showsResizeIndicator)
}
unsafe fn resizeIncrements(self) -> NSSize {
self.send_size("resizeIncrements", ())
}
unsafe fn setResizeIncrements_(self, resizeIncrements: NSSize) {
self.send_void("setResizeIncrements:", resizeIncrements);
}
unsafe fn preservesContentDuringLiveResize(self) -> bool {
self.send_bool("preservesContentDuringLiveResize", ())
}
unsafe fn setPreservesContentDuringLiveResize_(self, preservesContentDuringLiveResize: bool) {
self.send_void("setPreservesContentDuringLiveResize:", preservesContentDuringLiveResize)
}
unsafe fn inLiveResize(self) -> bool {
self.send_bool("inLiveResize", ())
}
// Managing Window Layers
unsafe fn orderOut_(self, sender: id) {
self.send_void("orderOut:", sender);
}
unsafe fn orderBack_(self, sender: id) {
self.send_void("orderBack:", sender);
}
unsafe fn orderFront_(self, sender: id) {
self.send_void("orderFront:", sender);
}
unsafe fn orderFrontRegardless(self) {
self.send_void("orderFrontRegardless", ());
}
unsafe fn orderFrontWindow_relativeTo_(self, ordering_mode: NSWindowOrderingMode, other_window_number: NSInteger) {
self.send_void("orderWindow:relativeTo:", (ordering_mode, other_window_number));
}
unsafe fn level(self) -> NSInteger {
self.send_integer("level", ())
}
unsafe fn setLevel_(self, level: NSInteger) {
self.send_void("setLevel:", level);
}
// Converting Coordinates
unsafe fn backingScaleFactor(self) -> CGFloat {
self.send_float("backingScaleFactor", ())
}
unsafe fn backingAlignedRect_options_(self, rect: NSRect, options: NSAlignmentOptions) -> NSRect {
self.send_rect("backingAlignedRect:options:", (rect, options))
}
unsafe fn convertRectFromBacking_(self, rect: NSRect) -> NSRect {
self.send_rect("convertRectFromBacking:", rect)
}
unsafe fn convertRectToBacking_(self, rect: NSRect) -> NSRect {
self.send_rect("convertRectToBacking:", rect)
}
unsafe fn convertRectToScreen_(self, rect: NSRect) -> NSRect {
self.send_rect("convertRectToScreen:", rect)
}
unsafe fn convertRectFromScreen_(self, rect: NSRect) -> NSRect {
self.send_rect("convertRectFromScreen:", rect)
}
// Managing Titles
unsafe fn title(self) -> id {
self.send("title", ())
}
unsafe fn setTitle_(self, title: id) {
self.send_void("setTitle:", title);
}
unsafe fn setTitleWithRepresentedFilename_(self, filePath: id) {
self.send_void("setTitleWithRepresentedFilename:", filePath);
}
unsafe fn representedFilename(self) -> id {
self.send("representedFilename", ())
}
unsafe fn setRepresentedFilename_(self, filePath: id) {
self.send_void("setRepresentedFilename:", filePath);
}
// Moving Windows
unsafe fn setMovableByWindowBackground_(self, movableByWindowBackground: bool) {
self.send_void("setMovableByWindowBackground:", movableByWindowBackground);
}
unsafe fn setMovable_(self, movable: bool) {
self.send_void("setMovable:", movable);
}
unsafe fn center(self) {
self.send_void("center", ());
}
// Closing Windows
unsafe fn performClose_(self, sender: id) {
self.send_void("performClose:", sender);
}
unsafe fn close(self) {
self.send_void("close", ());
}
unsafe fn setReleasedWhenClosed_(self, releasedWhenClosed: bool) {
self.send_void("setReleasedWhenClosed:", releasedWhenClosed);
}
}
pub trait NSString {
unsafe fn alloc(_: Self) -> id {
"NSString".send("alloc", ())
}
unsafe fn initWithUTF8String_(self, c_string: *const u8) -> id;
unsafe fn stringByAppendingString_(self, other: id) -> id;
unsafe fn init_str(self, string: &str) -> Self;
}
impl NSString for id {
unsafe fn initWithUTF8String_(self, c_string: *const u8) -> id {
self.send("initWithUTF8String:", c_string as id)
}
unsafe fn stringByAppendingString_(self, other: id) -> id {
self.send("stringByAppendingString:", other)
}
unsafe fn init_str(self, string: &str) -> id {
self.initWithUTF8String_(string.as_ptr())
}
}
Add winidow minimization methods
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_upper_case_globals)]
use base::{ObjCMethodCall, id, SEL, nil, NSInteger, NSUInteger};
use libc;
#[cfg(target_word_size = "32")]
pub type CGFloat = f32;
#[cfg(target_word_size = "64")]
pub type CGFloat = f64;
#[repr(C)]
pub struct NSPoint {
pub x: f64,
pub y: f64,
}
impl NSPoint {
#[inline]
pub fn new(x: f64, y: f64) -> NSPoint {
NSPoint {
x: x,
y: y,
}
}
}
#[repr(C)]
pub struct NSSize {
pub width: f64,
pub height: f64,
}
impl NSSize {
#[inline]
pub fn new(width: f64, height: f64) -> NSSize {
NSSize {
width: width,
height: height,
}
}
}
#[repr(C)]
pub struct NSRect {
pub origin: NSPoint,
pub size: NSSize,
}
impl NSRect {
#[inline]
pub fn new(origin: NSPoint, size: NSSize) -> NSRect {
NSRect {
origin: origin,
size: size
}
}
}
#[link(name = "AppKit", kind = "framework")]
extern {}
pub unsafe fn NSApp() -> id {
"NSApplication".send("sharedApplication", ())
}
#[repr(i64)]
pub enum NSApplicationActivationPolicy {
NSApplicationActivationPolicyRegular = 0,
NSApplicationActivationPolicyERROR = -1
}
#[repr(u64)]
pub enum NSWindowMask {
NSBorderlessWindowMask = 0,
NSTitledWindowMask = 1 << 0,
NSClosableWindowMask = 1 << 1,
NSMiniaturizableWindowMask = 1 << 2,
NSResizableWindowMask = 1 << 3,
NSTexturedBackgroundWindowMask = 1 << 8,
NSUnifiedTitleAndToolbarWindowMask = 1 << 12,
NSFullScreenWindowMask = 1 << 14
}
#[repr(u64)]
pub enum NSBackingStoreType {
NSBackingStoreRetained = 0,
NSBackingStoreNonretained = 1,
NSBackingStoreBuffered = 2
}
bitflags! {
flags NSWindowOrderingMode: NSInteger {
const NSWindowAbove = 1,
const NSWindowBelow = -1,
const NSWindowOut = 0,
}
}
bitflags! {
flags NSAlignmentOptions: libc::c_ulonglong {
const NSAlignMinXInward = 1 << 0,
const NSAlignMinYInward = 1 << 1,
const NSAlignMaxXInward = 1 << 2,
const NSAlignMaxYInward = 1 << 3,
const NSAlignWidthInward = 1 << 4,
const NSAlignHeightInward = 1 << 5,
const NSAlignMinXOutward = 1 << 8,
const NSAlignMinYOutward = 1 << 9,
const NSAlignMaxXOutward = 1 << 10,
const NSAlignMaxYOutward = 1 << 11,
const NSAlignWidthOutward = 1 << 12,
const NSAlignHeightOutward = 1 << 13,
const NSAlignMinXNearest = 1 << 16,
const NSAlignMinYNearest = 1 << 17,
const NSAlignMaxXNearest = 1 << 18,
const NSAlignMaxYNearest = 1 << 19,
const NSAlignWidthNearest = 1 << 20,
const NSAlignHeightNearest = 1 << 21,
const NSAlignRectFlipped = 1 << 63,
const NSAlignAllEdgesInward = NSAlignMinXInward.bits
| NSAlignMaxXInward.bits
| NSAlignMinYInward.bits
| NSAlignMaxYInward.bits,
const NSAlignAllEdgesOutward = NSAlignMinXOutward.bits
| NSAlignMaxXOutward.bits
| NSAlignMinYOutward.bits
| NSAlignMaxYOutward.bits,
const NSAlignAllEdgesNearest = NSAlignMinXNearest.bits
| NSAlignMaxXNearest.bits
| NSAlignMinYNearest.bits
| NSAlignMaxYNearest.bits,
}
}
pub trait NSAutoreleasePool {
unsafe fn new(_: Self) -> id {
"NSAutoreleasePool".send("new", ())
}
unsafe fn autorelease(self) -> Self;
}
impl NSAutoreleasePool for id {
unsafe fn autorelease(self) -> id {
self.send("autorelease", ())
}
}
pub trait NSProcessInfo {
unsafe fn processInfo(_: Self) -> id {
"NSProcessInfo".send("processInfo", ())
}
unsafe fn processName(self) -> id;
}
impl NSProcessInfo for id {
unsafe fn processName(self) -> id {
self.send("processName", ())
}
}
pub trait NSApplication {
unsafe fn sharedApplication(_: Self) -> id {
"NSApplication".send("sharedApplication", ())
}
unsafe fn setActivationPolicy_(self, policy: NSApplicationActivationPolicy) -> bool;
unsafe fn setMainMenu_(self, menu: id);
unsafe fn activateIgnoringOtherApps_(self, ignore: bool);
unsafe fn run(self);
}
impl NSApplication for id {
unsafe fn setActivationPolicy_(self, policy: NSApplicationActivationPolicy) -> bool {
self.send_bool("setActivationPolicy:", policy as NSInteger)
}
unsafe fn setMainMenu_(self, menu: id) {
self.send_void("setMainMenu:", menu)
}
unsafe fn activateIgnoringOtherApps_(self, ignore: bool) {
self.send_void("activateIgnoringOtherApps:", ignore);
}
unsafe fn run(self) {
self.send_void("run", ());
}
}
pub trait NSMenu {
unsafe fn new(_: Self) -> id {
"NSMenu".send("new", ())
}
unsafe fn addItem_(self, menu_item: id);
}
impl NSMenu for id {
unsafe fn addItem_(self, menu_item: id) {
self.send_void("addItem:", menu_item)
}
}
pub trait NSMenuItem {
unsafe fn alloc(_: Self) -> id {
"NSMenuItem".send("alloc", ())
}
unsafe fn new(_: Self) -> id {
"NSMenuItem".send("new", ())
}
unsafe fn initWithTitle_action_keyEquivalent_(self, title: id, action: SEL, key: id) -> id;
unsafe fn setSubmenu_(self, submenu: id);
}
impl NSMenuItem for id {
unsafe fn initWithTitle_action_keyEquivalent_(self, title: id, action: SEL, key: id) -> id {
self.send("initWithTitle:action:keyEquivalent:", (title, action, key))
}
unsafe fn setSubmenu_(self, submenu: id) {
self.send_void("setSubmenu:", submenu)
}
}
pub trait NSWindow {
unsafe fn alloc(_: Self) -> id {
"NSWindow".send("alloc", ())
}
unsafe fn initWithContentRect_styleMask_backing_defer_(self,
rect: NSRect,
style: NSUInteger,
backing: NSBackingStoreType,
defer: bool) -> id;
unsafe fn makeKeyAndOrderFront_(self, sender: id);
// Sizing Windows
unsafe fn frame(self) -> NSRect;
unsafe fn setFrameOrigin_(self, point: NSPoint);
unsafe fn setFrameTopLeftPoint_(self, point: NSPoint);
// skipped: constrainFrameRect_toScreen_
unsafe fn cascadeTopLeftFromPoint_(self, topLeft: NSPoint) -> NSPoint;
unsafe fn setFrame_displayViews_(self, windowFrame: NSRect, display: bool);
unsafe fn aspectRatio(self) -> NSSize;
unsafe fn setAspectRatio_(self, aspectRatio: NSSize);
unsafe fn minSize(self) -> NSSize;
unsafe fn setMinSize_(self, minSize: NSSize);
unsafe fn maxSize(self) -> NSSize;
unsafe fn setMaxSize_(self, maxSize: NSSize);
unsafe fn performZoom_(self, sender: id);
unsafe fn zoom_(self, sender: id);
// skipped: resizeFlags
unsafe fn showsResizeIndicator(self) -> bool;
unsafe fn setShowsResizeIndicator_(self, showsResizeIndicator: bool);
unsafe fn resizeIncrements(self) -> NSSize;
unsafe fn setResizeIncrements_(self, resizeIncrements: NSSize);
unsafe fn preservesContentDuringLiveResize(self) -> bool;
unsafe fn setPreservesContentDuringLiveResize_(self, preservesContentDuringLiveResize: bool);
unsafe fn inLiveResize(self) -> bool;
// Managing Window Layers
unsafe fn orderOut_(self, sender: id);
unsafe fn orderBack_(self, sender: id);
unsafe fn orderFront_(self, sender: id);
unsafe fn orderFrontRegardless(self);
unsafe fn orderFrontWindow_relativeTo_(self, orderingMode: NSWindowOrderingMode, otherWindowNumber: NSInteger);
unsafe fn level(self) -> NSInteger;
unsafe fn setLevel_(self, level: NSInteger);
// Converting Coordinates
unsafe fn backingScaleFactor(self) -> CGFloat;
unsafe fn backingAlignedRect_options_(self, rect: NSRect, options: NSAlignmentOptions) -> NSRect;
unsafe fn convertRectFromBacking_(self, rect: NSRect) -> NSRect;
unsafe fn convertRectToBacking_(self, rect: NSRect) -> NSRect;
unsafe fn convertRectToScreen_(self, rect: NSRect) -> NSRect;
unsafe fn convertRectFromScreen_(self, rect: NSRect) -> NSRect;
// Managing Titles
unsafe fn title(self) -> id;
unsafe fn setTitle_(self, title: id);
unsafe fn setTitleWithRepresentedFilename_(self, filePath: id);
unsafe fn representedFilename(self) -> id;
unsafe fn setRepresentedFilename_(self, filePath: id);
// skipped: representedURL
// skipped: setRepresentedURL_
// Moving Windows
unsafe fn setMovableByWindowBackground_(self, movableByWindowBackground: bool);
unsafe fn setMovable_(self, movable: bool);
unsafe fn center(self);
// Closing Windows
unsafe fn performClose_(self, sender: id);
unsafe fn close(self);
unsafe fn setReleasedWhenClosed_(self, releasedWhenClosed: bool);
// Minimizing Windows
unsafe fn performMiniaturize_(self, sender: id);
unsafe fn miniaturize_(self, sender: id);
unsafe fn deminiaturize_(self, sender: id);
// skipped: miniwindowImage
// skipped: setMiniwindowImage
unsafe fn miniwindowTitle(self) -> id;
unsafe fn setMiniwindowTitle_(self, miniwindowTitle: id);
}
impl NSWindow for id {
unsafe fn initWithContentRect_styleMask_backing_defer_(self,
rect: NSRect,
style: NSUInteger,
backing: NSBackingStoreType,
defer: bool) -> id {
self.send("initWithContentRect:styleMask:backing:defer:",
(rect, style, backing as NSUInteger, defer))
}
unsafe fn makeKeyAndOrderFront_(self, sender: id) {
self.send_void("makeKeyAndOrderFront:", sender)
}
// Sizing Windows
unsafe fn frame(self) -> NSRect {
self.send_rect("frame", ())
}
unsafe fn setFrameOrigin_(self, point: NSPoint) {
self.send_void("setFrameOrigin:", point);
}
unsafe fn setFrameTopLeftPoint_(self, point: NSPoint) {
self.send_void("setFrameTopLeftPoint:", point);
}
unsafe fn cascadeTopLeftFromPoint_(self, topLeft: NSPoint) -> NSPoint {
self.send_point("cascadeTopLeftFromPoint:", topLeft)
}
unsafe fn setFrame_displayViews_(self, windowFrame: NSRect, display: bool) {
self.send_void("setFrame:displayViews:", (windowFrame, display));
}
unsafe fn aspectRatio(self) -> NSSize {
self.send_size("aspectRatio", ())
}
unsafe fn setAspectRatio_(self, aspectRatio: NSSize) {
self.send_void("setAspectRatio:", aspectRatio);
}
unsafe fn minSize(self) -> NSSize {
self.send_size("minSize", ())
}
unsafe fn setMinSize_(self, minSize: NSSize) {
self.send_void("setMinSize:", minSize);
}
unsafe fn maxSize(self) -> NSSize {
self.send_size("maxSize", ())
}
unsafe fn setMaxSize_(self, maxSize: NSSize) {
self.send_void("setMaxSize:", maxSize);
}
unsafe fn performZoom_(self, sender: id) {
self.send_void("performZoom:", sender);
}
unsafe fn zoom_(self, sender: id) {
self.send_void("zoom:", sender);
}
unsafe fn showsResizeIndicator(self) -> bool {
self.send_bool("showsResizeIndicator", ())
}
unsafe fn setShowsResizeIndicator_(self, showsResizeIndicator: bool) {
self.send_void("setShowsResizeIndicator:", showsResizeIndicator)
}
unsafe fn resizeIncrements(self) -> NSSize {
self.send_size("resizeIncrements", ())
}
unsafe fn setResizeIncrements_(self, resizeIncrements: NSSize) {
self.send_void("setResizeIncrements:", resizeIncrements);
}
unsafe fn preservesContentDuringLiveResize(self) -> bool {
self.send_bool("preservesContentDuringLiveResize", ())
}
unsafe fn setPreservesContentDuringLiveResize_(self, preservesContentDuringLiveResize: bool) {
self.send_void("setPreservesContentDuringLiveResize:", preservesContentDuringLiveResize)
}
unsafe fn inLiveResize(self) -> bool {
self.send_bool("inLiveResize", ())
}
// Managing Window Layers
unsafe fn orderOut_(self, sender: id) {
self.send_void("orderOut:", sender);
}
unsafe fn orderBack_(self, sender: id) {
self.send_void("orderBack:", sender);
}
unsafe fn orderFront_(self, sender: id) {
self.send_void("orderFront:", sender);
}
unsafe fn orderFrontRegardless(self) {
self.send_void("orderFrontRegardless", ());
}
unsafe fn orderFrontWindow_relativeTo_(self, ordering_mode: NSWindowOrderingMode, other_window_number: NSInteger) {
self.send_void("orderWindow:relativeTo:", (ordering_mode, other_window_number));
}
unsafe fn level(self) -> NSInteger {
self.send_integer("level", ())
}
unsafe fn setLevel_(self, level: NSInteger) {
self.send_void("setLevel:", level);
}
// Converting Coordinates
unsafe fn backingScaleFactor(self) -> CGFloat {
self.send_float("backingScaleFactor", ())
}
unsafe fn backingAlignedRect_options_(self, rect: NSRect, options: NSAlignmentOptions) -> NSRect {
self.send_rect("backingAlignedRect:options:", (rect, options))
}
unsafe fn convertRectFromBacking_(self, rect: NSRect) -> NSRect {
self.send_rect("convertRectFromBacking:", rect)
}
unsafe fn convertRectToBacking_(self, rect: NSRect) -> NSRect {
self.send_rect("convertRectToBacking:", rect)
}
unsafe fn convertRectToScreen_(self, rect: NSRect) -> NSRect {
self.send_rect("convertRectToScreen:", rect)
}
unsafe fn convertRectFromScreen_(self, rect: NSRect) -> NSRect {
self.send_rect("convertRectFromScreen:", rect)
}
// Managing Titles
unsafe fn title(self) -> id {
self.send("title", ())
}
unsafe fn setTitle_(self, title: id) {
self.send_void("setTitle:", title);
}
unsafe fn setTitleWithRepresentedFilename_(self, filePath: id) {
self.send_void("setTitleWithRepresentedFilename:", filePath);
}
unsafe fn representedFilename(self) -> id {
self.send("representedFilename", ())
}
unsafe fn setRepresentedFilename_(self, filePath: id) {
self.send_void("setRepresentedFilename:", filePath);
}
// Moving Windows
unsafe fn setMovableByWindowBackground_(self, movableByWindowBackground: bool) {
self.send_void("setMovableByWindowBackground:", movableByWindowBackground);
}
unsafe fn setMovable_(self, movable: bool) {
self.send_void("setMovable:", movable);
}
unsafe fn center(self) {
self.send_void("center", ());
}
// Closing Windows
unsafe fn performClose_(self, sender: id) {
self.send_void("performClose:", sender);
}
unsafe fn close(self) {
self.send_void("close", ());
}
unsafe fn setReleasedWhenClosed_(self, releasedWhenClosed: bool) {
self.send_void("setReleasedWhenClosed:", releasedWhenClosed);
}
// Minimizing Windows
unsafe fn performMiniaturize_(self, sender: id) {
self.send_void("performMiniaturize:", sender);
}
unsafe fn miniaturize_(self, sender: id) {
self.send_void("miniaturize:", sender);
}
unsafe fn deminiaturize_(self, sender: id) {
self.send_void("deminiaturize:", sender);
}
unsafe fn miniwindowTitle(self) -> id {
self.send("miniwindowTitle", ())
}
unsafe fn setMiniwindowTitle_(self, miniwindowTitle: id) {
self.send_void("setMiniwindowTitle:", miniwindowTitle);
}
}
pub trait NSString {
unsafe fn alloc(_: Self) -> id {
"NSString".send("alloc", ())
}
unsafe fn initWithUTF8String_(self, c_string: *const u8) -> id;
unsafe fn stringByAppendingString_(self, other: id) -> id;
unsafe fn init_str(self, string: &str) -> Self;
}
impl NSString for id {
unsafe fn initWithUTF8String_(self, c_string: *const u8) -> id {
self.send("initWithUTF8String:", c_string as id)
}
unsafe fn stringByAppendingString_(self, other: id) -> id {
self.send("stringByAppendingString:", other)
}
unsafe fn init_str(self, string: &str) -> id {
self.initWithUTF8String_(string.as_ptr())
}
}
|
use nom::character::complete::{multispace0, multispace1};
use std::fmt;
use std::str;
use column::Column;
use common::{
assignment_expr_list, field_list, statement_terminator, table_reference,
value_list, FieldValueExpression, Literal,
};
use keywords::escape_if_keyword;
use table::Table;
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct InsertStatement {
pub table: Table,
pub fields: Option<Vec<Column>>,
pub data: Vec<Vec<Literal>>,
pub ignore: bool,
pub on_duplicate: Option<Vec<(Column, FieldValueExpression)>>,
}
impl fmt::Display for InsertStatement {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "INSERT INTO {}", escape_if_keyword(&self.table.name))?;
if let Some(ref fields) = self.fields {
write!(
f,
" ({})",
fields
.iter()
.map(|ref col| col.name.to_owned())
.collect::<Vec<_>>()
.join(", ")
)?;
}
write!(
f,
" VALUES {}",
self.data
.iter()
.map(|datas| format!(
"({})",
datas
.into_iter()
.map(|l| l.to_string())
.collect::<Vec<_>>()
.join(", ")
))
.collect::<Vec<_>>()
.join(", ")
)
}
}
// Parse rule for a SQL insert query.
// TODO(malte): support REPLACE, nested selection, DEFAULT VALUES
named!(pub insertion<&[u8], InsertStatement>,
do_parse!(
tag_no_case!("insert") >>
ignore: opt!(preceded!(multispace1, tag_no_case!("ignore"))) >>
multispace1 >>
tag_no_case!("into") >>
multispace1 >>
table: table_reference >>
multispace0 >>
fields: opt!(do_parse!(
tag!("(") >>
multispace0 >>
fields: field_list >>
multispace0 >>
tag!(")") >>
multispace1 >>
(fields)
)
) >>
tag_no_case!("values") >>
multispace0 >>
data: many1!(
do_parse!(
tag!("(") >>
values: value_list >>
tag!(")") >>
opt!(
do_parse!(
multispace0 >>
tag!(",") >>
multispace0 >>
()
)
) >>
(values)
)
) >>
upd_if_dup: opt!(do_parse!(
multispace0 >>
tag_no_case!("on duplicate key update") >>
multispace1 >>
assigns: assignment_expr_list >>
(assigns)
)) >>
statement_terminator >>
({
// "table AS alias" isn't legal in INSERT statements
assert!(table.alias.is_none());
InsertStatement {
table: table,
fields: fields,
data: data,
ignore: ignore.is_some(),
on_duplicate: upd_if_dup,
}
})
)
);
#[cfg(test)]
mod tests {
use super::*;
use arithmetic::{ArithmeticBase, ArithmeticExpression, ArithmeticOperator};
use column::Column;
use table::Table;
#[test]
fn simple_insert() {
let qstring = "INSERT INTO users VALUES (42, \"test\");";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: None,
data: vec![vec![42.into(), "test".into()]],
..Default::default()
}
);
}
#[test]
fn complex_insert() {
let qstring = "INSERT INTO users VALUES (42, 'test', \"test\", CURRENT_TIMESTAMP);";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: None,
data: vec![vec![
42.into(),
"test".into(),
"test".into(),
Literal::CurrentTimestamp,
],],
..Default::default()
}
);
}
#[test]
fn insert_with_field_names() {
let qstring = "INSERT INTO users (id, name) VALUES (42, \"test\");";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: Some(vec![Column::from("id"), Column::from("name")]),
data: vec![vec![42.into(), "test".into()]],
..Default::default()
}
);
}
// Issue #3
#[test]
fn insert_without_spaces() {
let qstring = "INSERT INTO users(id, name) VALUES(42, \"test\");";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: Some(vec![Column::from("id"), Column::from("name")]),
data: vec![vec![42.into(), "test".into()]],
..Default::default()
}
);
}
#[test]
fn multi_insert() {
let qstring = "INSERT INTO users (id, name) VALUES (42, \"test\"),(21, \"test2\");";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: Some(vec![Column::from("id"), Column::from("name")]),
data: vec![
vec![42.into(), "test".into()],
vec![21.into(), "test2".into()],
],
..Default::default()
}
);
}
#[test]
fn insert_with_parameters() {
let qstring = "INSERT INTO users (id, name) VALUES (?, ?);";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: Some(vec![Column::from("id"), Column::from("name")]),
data: vec![vec![Literal::Placeholder, Literal::Placeholder]],
..Default::default()
}
);
}
#[test]
fn insert_with_on_dup_update() {
let qstring = "INSERT INTO keystores (`key`, `value`) VALUES (?, ?) \
ON DUPLICATE KEY UPDATE `value` = `value` + 1";
let res = insertion(qstring.as_bytes());
let expected_ae = ArithmeticExpression {
op: ArithmeticOperator::Add,
left: ArithmeticBase::Column(Column::from("value")),
right: ArithmeticBase::Scalar(1.into()),
alias: None,
};
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("keystores"),
fields: Some(vec![Column::from("key"), Column::from("value")]),
data: vec![vec![Literal::Placeholder, Literal::Placeholder]],
on_duplicate: Some(vec![(
Column::from("value"),
FieldValueExpression::Arithmetic(expected_ae),
),]),
..Default::default()
}
);
}
#[test]
fn insert_with_leading_value_whitespace() {
let qstring = "INSERT INTO users (id, name) VALUES ( 42, \"test\");";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: Some(vec![Column::from("id"), Column::from("name")]),
data: vec![vec![42.into(), "test".into()]],
..Default::default()
}
);
}
}
Update `insert.rs` to use nom v5 functions
- Since in nom v5 macros were rewritten to use the streaming version of
the parser, and we want to do with complete inputs, rewrote the
`insert.rs` to use the complete nom functions
use nom::character::complete::{multispace0, multispace1};
use std::fmt;
use std::str;
use column::Column;
use common::{
assignment_expr_list, field_list, statement_terminator, table_reference,
value_list, FieldValueExpression, Literal,
};
use keywords::escape_if_keyword;
use table::Table;
use nom::IResult;
use nom::sequence::{tuple, preceded, delimited};
use nom::bytes::complete::{tag_no_case, tag};
use nom::combinator::opt;
use nom::multi::many1;
use common::Operator::In;
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct InsertStatement {
pub table: Table,
pub fields: Option<Vec<Column>>,
pub data: Vec<Vec<Literal>>,
pub ignore: bool,
pub on_duplicate: Option<Vec<(Column, FieldValueExpression)>>,
}
impl fmt::Display for InsertStatement {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "INSERT INTO {}", escape_if_keyword(&self.table.name))?;
if let Some(ref fields) = self.fields {
write!(
f,
" ({})",
fields
.iter()
.map(|ref col| col.name.to_owned())
.collect::<Vec<_>>()
.join(", ")
)?;
}
write!(
f,
" VALUES {}",
self.data
.iter()
.map(|datas| format!(
"({})",
datas
.into_iter()
.map(|l| l.to_string())
.collect::<Vec<_>>()
.join(", ")
))
.collect::<Vec<_>>()
.join(", ")
)
}
}
fn fields(i: &[u8]) -> IResult<&[u8], Vec<Column>> {
delimited(preceded(tag("("), multispace0),
field_list,
delimited(multispace0, tag(")"), multispace1))(i)
}
fn data(i: &[u8]) -> IResult<&[u8], Vec<Literal>> {
delimited(tag("("),
value_list,
preceded(tag(")"),
opt(delimited(multispace0,
tag(","),
multispace0))))(i)
}
fn on_duplicate(i: &[u8]) -> IResult<&[u8], Vec<(Column, FieldValueExpression)>> {
preceded(multispace0,
preceded(tag_no_case("on duplicate key update"),
preceded(multispace1, assignment_expr_list)))(i)
}
// Parse rule for a SQL insert query.
// TODO(malte): support REPLACE, nested selection, DEFAULT VALUES
pub fn insertion(i: &[u8]) -> IResult<&[u8], InsertStatement> {
let (remaining_input, (_, ignore_res, _, _, _, table, _, fields, _, _, data, on_duplicate,
statement_terminator)) =
tuple((tag_no_case("insert"), opt(preceded(multispace1,
tag_no_case("ignore"))),
multispace1, tag_no_case("into"), multispace1, table_reference, multispace1,
opt(fields), tag_no_case("values"), multispace1, many1(data),
opt(on_duplicate), statement_terminator))(i)?;
assert!(table.alias.is_none());
let ignore = ignore_res.is_some();
Ok((remaining_input, InsertStatement { table, fields, data, ignore, on_duplicate }))
}
#[cfg(test)]
mod tests {
use super::*;
use arithmetic::{ArithmeticBase, ArithmeticExpression, ArithmeticOperator};
use column::Column;
use table::Table;
#[test]
fn simple_insert() {
let qstring = "INSERT INTO users VALUES (42, \"test\");";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: None,
data: vec![vec![42.into(), "test".into()]],
..Default::default()
}
);
}
#[test]
fn complex_insert() {
let qstring = "INSERT INTO users VALUES (42, 'test', \"test\", CURRENT_TIMESTAMP);";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: None,
data: vec![vec![
42.into(),
"test".into(),
"test".into(),
Literal::CurrentTimestamp,
],],
..Default::default()
}
);
}
#[test]
fn insert_with_field_names() {
let qstring = "INSERT INTO users (id, name) VALUES (42, \"test\");";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: Some(vec![Column::from("id"), Column::from("name")]),
data: vec![vec![42.into(), "test".into()]],
..Default::default()
}
);
}
// Issue #3
#[test]
fn insert_without_spaces() {
let qstring = "INSERT INTO users(id, name) VALUES(42, \"test\");";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: Some(vec![Column::from("id"), Column::from("name")]),
data: vec![vec![42.into(), "test".into()]],
..Default::default()
}
);
}
#[test]
fn multi_insert() {
let qstring = "INSERT INTO users (id, name) VALUES (42, \"test\"),(21, \"test2\");";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: Some(vec![Column::from("id"), Column::from("name")]),
data: vec![
vec![42.into(), "test".into()],
vec![21.into(), "test2".into()],
],
..Default::default()
}
);
}
#[test]
fn insert_with_parameters() {
let qstring = "INSERT INTO users (id, name) VALUES (?, ?);";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: Some(vec![Column::from("id"), Column::from("name")]),
data: vec![vec![Literal::Placeholder, Literal::Placeholder]],
..Default::default()
}
);
}
#[test]
fn insert_with_on_dup_update() {
let qstring = "INSERT INTO keystores (`key`, `value`) VALUES (?, ?) \
ON DUPLICATE KEY UPDATE `value` = `value` + 1";
let res = insertion(qstring.as_bytes());
let expected_ae = ArithmeticExpression {
op: ArithmeticOperator::Add,
left: ArithmeticBase::Column(Column::from("value")),
right: ArithmeticBase::Scalar(1.into()),
alias: None,
};
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("keystores"),
fields: Some(vec![Column::from("key"), Column::from("value")]),
data: vec![vec![Literal::Placeholder, Literal::Placeholder]],
on_duplicate: Some(vec![(
Column::from("value"),
FieldValueExpression::Arithmetic(expected_ae),
),]),
..Default::default()
}
);
}
#[test]
fn insert_with_leading_value_whitespace() {
let qstring = "INSERT INTO users (id, name) VALUES ( 42, \"test\");";
let res = insertion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
InsertStatement {
table: Table::from("users"),
fields: Some(vec![Column::from("id"), Column::from("name")]),
data: vec![vec![42.into(), "test".into()]],
..Default::default()
}
);
}
}
|
//! Saving and loading data to and from disk.
//!
//! # HDF5
//!
//! The recommended way to save/load data in Numeric is using HDF5.
//!
//! **Note:** The HDF5 library will by default not be thread-safe (it depends on how you compiled
//! it), so do not call either of these functions concurrently.
//!
//! ##Saving to HDF5 file:
//!
//! ```no_run
//! use std::path::Path;
//! use numeric::Tensor;
//!
//! let path = Path::new("output.h5");
//! let t: Tensor<i32> = Tensor::range(100);
//! let ret = t.save_hdf5(&path);
//! ```
//! The data will be saved to the group `/data`.
//!
//! ## Loading from HDF5 file
//!
//! Now, we can load this file:
//!
//! ```no_run
//! use std::path::Path;
//! use numeric::Tensor;
//!
//! let path = Path::new("output.h5");
//! let t = match numeric::io::load_hdf5_as_f64(&path, "/data") {
//! Ok(v) => v,
//! Err(e) => panic!("Failed: {}", e),
//! };
//! ```
//!
//! Note that since we need to know the type of `t` at compile time, it doesn't matter that we
//! saved the file as `i32`, we have to specify how to load it. The way this is done is that it
//! will load the `i32` natively and then convert it to `f64`. If your data converted, you simply
//! have to load it as the same type as you know is in the file.
extern crate std;
use libc::{c_char, c_void, c_ulonglong, c_int};
use std::path::Path;
use hdf5_sys as ffi;
use tensor::Tensor;
#[allow(non_camel_case_types)]
type hsize_t = c_ulonglong;
#[allow(non_camel_case_types)]
type hid_t = c_int;
extern fn error_handler(_: hid_t, _: *const c_void) {
// Suppress errors. We will rely on return statuses alone.
}
macro_rules! add_save {
($t:ty, $h5type:expr) => (
impl Tensor<$t> {
/// Saves tensor to an HDF5 file.
///
/// **Warning**: This function is not thread-safe (unless you compiled HDF5 to be
/// thread-safe). Do no call this function concurrently from multiple threads.
pub fn save_hdf5(&self, path: &Path) -> std::io::Result<()> {
let filename = match path.to_str() {
Some(v) => v,
None => {
let err = std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("Path could not be converted to string: {:?}", path));
return Err(err);
},
};
// This could be made an option
let group = "data";
unsafe {
let filename_cstr = try!(::std::ffi::CString::new(filename));
let group_cstr = try!(::std::ffi::CString::new(group));
//ffi::H5Eset_auto2(0, error_handler, 0 as *const c_void);
let file = ffi::H5Fcreate(filename_cstr.as_ptr() as *const c_char,
ffi::H5F_ACC_TRUNC, ffi::H5P_DEFAULT, ffi::H5P_DEFAULT);
let mut shape: Vec<u64> = Vec::new();
for s in self.shape().iter() {
shape.push(*s as u64);
}
let space = ffi::H5Screate_simple(shape.len() as i32, shape.as_ptr(), std::ptr::null());
let dset = ffi::H5Dcreate2(file, group_cstr.as_ptr() as *const c_char,
$h5type, space,
ffi::H5P_DEFAULT,
ffi::H5P_DEFAULT,
ffi::H5P_DEFAULT);
let status = ffi::H5Dwrite(dset, $h5type, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
self.as_ptr() as * const c_void);
if status < 0 {
let err = std::io::Error::new(std::io::ErrorKind::Other,
format!("Failed to write '{}': {:?}", group, path));
return Err(err);
}
ffi::H5Dclose(dset);
ffi::H5Fclose(file);
}
Ok(())
}
}
)
}
add_save!(u8, ffi::H5T_NATIVE_UINT8);
add_save!(u16, ffi::H5T_NATIVE_UINT16);
add_save!(u32, ffi::H5T_NATIVE_UINT32);
add_save!(u64, ffi::H5T_NATIVE_UINT64);
add_save!(i8, ffi::H5T_NATIVE_INT8);
add_save!(i16, ffi::H5T_NATIVE_INT16);
add_save!(i32, ffi::H5T_NATIVE_INT32);
add_save!(i64, ffi::H5T_NATIVE_INT64);
add_save!(f32, ffi::H5T_NATIVE_FLOAT);
add_save!(f64, ffi::H5T_NATIVE_DOUBLE);
macro_rules! add_load {
($name:ident, $t:ty) => (
/// Load HDF5 file and convert to specified type.
pub fn $name(path: &Path, group: &str) -> std::io::Result<Tensor<$t>> {
let filename = match path.to_str() {
Some(v) => v,
None => {
let err = std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("Path could not be converted to string: {:?}", path));
return Err(err);
},
};
unsafe {
let filename_cstr = try!(::std::ffi::CString::new(filename));
let group_cstr = try!(::std::ffi::CString::new(group));
ffi::H5Eset_auto2(0, error_handler, 0 as *const c_void);
let file = ffi::H5Fopen(filename_cstr.as_ptr() as *const c_char,
ffi::H5F_ACC_RDONLY, ffi::H5P_DEFAULT);
if file < 0 {
let err = std::io::Error::new(std::io::ErrorKind::NotFound, format!("File not found: {:?}", path));
return Err(err);
}
let dset = ffi::H5Dopen2(file, group_cstr.as_ptr() as *const c_char,
ffi::H5P_DEFAULT);
if dset < 0 {
let err = std::io::Error::new(std::io::ErrorKind::NotFound, format!("Group '{}' not found: {}", group, filename));
return Err(err);
}
let datatype = ffi::H5Dget_type(dset);
let space = ffi::H5Dget_space(dset);
let ndims = ffi::H5Sget_simple_extent_ndims(space);
let mut shape: Tensor<hsize_t> = Tensor::zeros(&[ndims as usize]);
if ffi::H5Sget_simple_extent_dims(space, shape.as_mut_ptr(), 0 as *mut hsize_t) != ndims {
let err = std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Could not read shape of tesor: {}", filename));
return Err(err);
}
//let unsigned_shape: Vec<usize> = shape.iter().map(|x| x as usize).collect();
let unsigned_tensor = shape.convert::<usize>();
let unsigned_shape = &unsigned_tensor.data();
let data: Tensor<$t> = {
if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT8) == 1 {
let mut native_data: Tensor<u8> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT8, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT8) == 1 {
let mut native_data: Tensor<i8> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_INT8, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT16) == 1 {
let mut native_data: Tensor<u16> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT16, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT16) == 1 {
let mut native_data: Tensor<i16> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_INT16, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT32) == 1 {
let mut native_data: Tensor<u32> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT32, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT32) == 1 {
let mut native_data: Tensor<i32> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_INT32, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT64) == 1 {
let mut native_data: Tensor<u64> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT64, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT64) == 1 {
let mut native_data: Tensor<i64> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_INT64, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_FLOAT) == 1 {
let mut native_data: Tensor<f32> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_FLOAT, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_DOUBLE) == 1 {
let mut native_data: Tensor<f64> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_DOUBLE, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else {
let err = std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unable to convert '{}' to {}: {}", group, "f64", filename));
return Err(err);
}
};
ffi::H5Tclose(datatype);
ffi::H5Dclose(dset);
ffi::H5Fclose(file);
Ok(data)
}
}
)
}
add_load!(load_hdf5_as_u8, u8);
add_load!(load_hdf5_as_u16, u16);
add_load!(load_hdf5_as_u32, u32);
add_load!(load_hdf5_as_u64, u64);
add_load!(load_hdf5_as_i8, i8);
add_load!(load_hdf5_as_i16, i16);
add_load!(load_hdf5_as_i32, i32);
add_load!(load_hdf5_as_i64, i64);
add_load!(load_hdf5_as_f32, f32);
add_load!(load_hdf5_as_f64, f64);
add_load!(load_hdf5_as_isize, isize);
add_load!(load_hdf5_as_usize, usize);
Removed custom hsize_t and hid_t in io
//! Saving and loading data to and from disk.
//!
//! # HDF5
//!
//! The recommended way to save/load data in Numeric is using HDF5.
//!
//! **Note:** The HDF5 library will by default not be thread-safe (it depends on how you compiled
//! it), so do not call either of these functions concurrently.
//!
//! ##Saving to HDF5 file:
//!
//! ```no_run
//! use std::path::Path;
//! use numeric::Tensor;
//!
//! let path = Path::new("output.h5");
//! let t: Tensor<i32> = Tensor::range(100);
//! let ret = t.save_hdf5(&path);
//! ```
//! The data will be saved to the group `/data`.
//!
//! ## Loading from HDF5 file
//!
//! Now, we can load this file:
//!
//! ```no_run
//! use std::path::Path;
//! use numeric::Tensor;
//!
//! let path = Path::new("output.h5");
//! let t = match numeric::io::load_hdf5_as_f64(&path, "/data") {
//! Ok(v) => v,
//! Err(e) => panic!("Failed: {}", e),
//! };
//! ```
//!
//! Note that since we need to know the type of `t` at compile time, it doesn't matter that we
//! saved the file as `i32`, we have to specify how to load it. The way this is done is that it
//! will load the `i32` natively and then convert it to `f64`. If your data converted, you simply
//! have to load it as the same type as you know is in the file.
extern crate std;
use libc::{c_char, c_void};
use std::path::Path;
use hdf5_sys as ffi;
use tensor::Tensor;
extern fn error_handler(_: ffi::hid_t, _: *const c_void) {
// Suppress errors. We will rely on return statuses alone.
}
macro_rules! add_save {
($t:ty, $h5type:expr) => (
impl Tensor<$t> {
/// Saves tensor to an HDF5 file.
///
/// **Warning**: This function is not thread-safe (unless you compiled HDF5 to be
/// thread-safe). Do no call this function concurrently from multiple threads.
pub fn save_hdf5(&self, path: &Path) -> std::io::Result<()> {
let filename = match path.to_str() {
Some(v) => v,
None => {
let err = std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("Path could not be converted to string: {:?}", path));
return Err(err);
},
};
// This could be made an option
let group = "data";
unsafe {
let filename_cstr = try!(::std::ffi::CString::new(filename));
let group_cstr = try!(::std::ffi::CString::new(group));
//ffi::H5Eset_auto2(0, error_handler, 0 as *const c_void);
let file = ffi::H5Fcreate(filename_cstr.as_ptr() as *const c_char,
ffi::H5F_ACC_TRUNC, ffi::H5P_DEFAULT, ffi::H5P_DEFAULT);
let mut shape: Vec<u64> = Vec::new();
for s in self.shape().iter() {
shape.push(*s as u64);
}
let space = ffi::H5Screate_simple(shape.len() as i32, shape.as_ptr(), std::ptr::null());
let dset = ffi::H5Dcreate2(file, group_cstr.as_ptr() as *const c_char,
$h5type, space,
ffi::H5P_DEFAULT,
ffi::H5P_DEFAULT,
ffi::H5P_DEFAULT);
let status = ffi::H5Dwrite(dset, $h5type, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
self.as_ptr() as * const c_void);
if status < 0 {
let err = std::io::Error::new(std::io::ErrorKind::Other,
format!("Failed to write '{}': {:?}", group, path));
return Err(err);
}
ffi::H5Dclose(dset);
ffi::H5Fclose(file);
}
Ok(())
}
}
)
}
add_save!(u8, ffi::H5T_NATIVE_UINT8);
add_save!(u16, ffi::H5T_NATIVE_UINT16);
add_save!(u32, ffi::H5T_NATIVE_UINT32);
add_save!(u64, ffi::H5T_NATIVE_UINT64);
add_save!(i8, ffi::H5T_NATIVE_INT8);
add_save!(i16, ffi::H5T_NATIVE_INT16);
add_save!(i32, ffi::H5T_NATIVE_INT32);
add_save!(i64, ffi::H5T_NATIVE_INT64);
add_save!(f32, ffi::H5T_NATIVE_FLOAT);
add_save!(f64, ffi::H5T_NATIVE_DOUBLE);
macro_rules! add_load {
($name:ident, $t:ty) => (
/// Load HDF5 file and convert to specified type.
pub fn $name(path: &Path, group: &str) -> std::io::Result<Tensor<$t>> {
let filename = match path.to_str() {
Some(v) => v,
None => {
let err = std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("Path could not be converted to string: {:?}", path));
return Err(err);
},
};
unsafe {
let filename_cstr = try!(::std::ffi::CString::new(filename));
let group_cstr = try!(::std::ffi::CString::new(group));
ffi::H5Eset_auto2(0, error_handler, 0 as *const c_void);
let file = ffi::H5Fopen(filename_cstr.as_ptr() as *const c_char,
ffi::H5F_ACC_RDONLY, ffi::H5P_DEFAULT);
if file < 0 {
let err = std::io::Error::new(std::io::ErrorKind::NotFound, format!("File not found: {:?}", path));
return Err(err);
}
let dset = ffi::H5Dopen2(file, group_cstr.as_ptr() as *const c_char,
ffi::H5P_DEFAULT);
if dset < 0 {
let err = std::io::Error::new(std::io::ErrorKind::NotFound, format!("Group '{}' not found: {}", group, filename));
return Err(err);
}
let datatype = ffi::H5Dget_type(dset);
let space = ffi::H5Dget_space(dset);
let ndims = ffi::H5Sget_simple_extent_ndims(space);
let mut shape: Tensor<ffi::hsize_t> = Tensor::zeros(&[ndims as usize]);
if ffi::H5Sget_simple_extent_dims(space, shape.as_mut_ptr(), 0 as *mut ffi::hsize_t) != ndims {
let err = std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Could not read shape of tesor: {}", filename));
return Err(err);
}
//let unsigned_shape: Vec<usize> = shape.iter().map(|x| x as usize).collect();
let unsigned_tensor = shape.convert::<usize>();
let unsigned_shape = &unsigned_tensor.data();
let data: Tensor<$t> = {
if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT8) == 1 {
let mut native_data: Tensor<u8> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT8, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT8) == 1 {
let mut native_data: Tensor<i8> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_INT8, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT16) == 1 {
let mut native_data: Tensor<u16> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT16, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT16) == 1 {
let mut native_data: Tensor<i16> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_INT16, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT32) == 1 {
let mut native_data: Tensor<u32> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT32, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT32) == 1 {
let mut native_data: Tensor<i32> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_INT32, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_UINT64) == 1 {
let mut native_data: Tensor<u64> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_UINT64, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_INT64) == 1 {
let mut native_data: Tensor<i64> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_INT64, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_FLOAT) == 1 {
let mut native_data: Tensor<f32> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_FLOAT, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else if ffi::H5Tequal(datatype, ffi::H5T_NATIVE_DOUBLE) == 1 {
let mut native_data: Tensor<f64> = Tensor::empty(&unsigned_shape[..]);
// Finally load the actual data
ffi::H5Dread(dset, ffi::H5T_NATIVE_DOUBLE, ffi::H5S_ALL, ffi::H5S_ALL, ffi::H5P_DEFAULT,
native_data.as_mut_ptr() as *mut c_void);
native_data.convert::<$t>()
} else {
let err = std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Unable to convert '{}' to {}: {}", group, "f64", filename));
return Err(err);
}
};
ffi::H5Tclose(datatype);
ffi::H5Dclose(dset);
ffi::H5Fclose(file);
Ok(data)
}
}
)
}
add_load!(load_hdf5_as_u8, u8);
add_load!(load_hdf5_as_u16, u16);
add_load!(load_hdf5_as_u32, u32);
add_load!(load_hdf5_as_u64, u64);
add_load!(load_hdf5_as_i8, i8);
add_load!(load_hdf5_as_i16, i16);
add_load!(load_hdf5_as_i32, i32);
add_load!(load_hdf5_as_i64, i64);
add_load!(load_hdf5_as_f32, f32);
add_load!(load_hdf5_as_f64, f64);
add_load!(load_hdf5_as_isize, isize);
add_load!(load_hdf5_as_usize, usize);
|
//! Dissection of Internet Protocol (IP) packets.
//!
//! This module will eventually contain dissectors for protocols in the IP suite,
//! e.g., `rshark::ip::icmp` and `rshark::ip::tcp`.
//! For now, it only handles IP headers.
//!
//! See [RFC 791](https://tools.ietf.org/html/rfc791).
use {
Endianness,
Error,
NamedValue,
Result,
Val,
raw,
unsigned,
};
pub fn dissect(data : &[u8]) -> Result {
if data.len() < 20 {
return Err(Error::Underflow { expected: 20, have: data.len(),
message: "An IP packet must be at least 20 B".to_string() })
}
let mut values:Vec<NamedValue> = vec![];
// IP version (should be "4")
let version = data[0] >> 4;
values.push(("Version".to_string(), Ok(Val::Unsigned(version as u64))));
// Internet Header Length (IHL): number of 32b words in header
let words = data[0] & 0x0f;
values.push(("IHL".to_string(), Ok(Val::Unsigned(words as u64))));
// Differentiated Services Code Point (DSCP): RFC 2474
let dscp = data[1] >> 2;
values.push(("DSCP".to_string(), Ok(Val::Unsigned(dscp as u64))));
// Explicit Congestion Notification (ECN): RFC 3168
let ecn = data[1] & 0x03;
values.push(("ECN".to_string(), Ok(Val::Unsigned(ecn as u64))));
// Total length (including header)
let length = unsigned(&data[2..4], Endianness::BigEndian);
values.push(("Length".to_string(), length.map(|v| Val::Unsigned(v))));
// Identification (of datagraph fragments): RFC 6864
values.push(("Identification".to_string(), Ok(Val::Unsigned(data[8] as u64))));
// Protocol number (assigned by IANA)
let protocol = data[9];
values.push(("Protocol".to_string(), Ok(Val::Unsigned(protocol as u64))));
// Header checksum
values.push(("Checksum".to_string(), Ok(Val::Bytes(data[10..12].to_vec()))));
// Source and destination addresses
let source = &data[12..16];
values.push(("Source".to_string(), Ok(Val::Address {
bytes: source.to_vec(),
encoded: source.iter().map(|b| b.to_string()).collect::<Vec<_>>().join("."),
})));
let dest = &data[16..20];
values.push(("Destination".to_string(), Ok(Val::Address {
bytes: dest.to_vec(),
encoded: dest.iter().map(|b| b.to_string()).collect::<Vec<_>>().join("."),
})));
// Parse the remainder according to the specified protocol.
let remainder = &data[20..];
let dissect_pdu = match protocol {
// TODO: UDP, TCP, etc.
_ => raw,
};
values.push(("Protocol Data".to_string(), dissect_pdu(remainder)));
Ok(Val::Object(values))
}
Use slightly-deprecated SliceConcatExt::connect().
SliceConcatExt::connect() might be deprecated, but SliceConcatExt::join()
doesn't exist yet on the the stable release channel.
//! Dissection of Internet Protocol (IP) packets.
//!
//! This module will eventually contain dissectors for protocols in the IP suite,
//! e.g., `rshark::ip::icmp` and `rshark::ip::tcp`.
//! For now, it only handles IP headers.
//!
//! See [RFC 791](https://tools.ietf.org/html/rfc791).
use {
Endianness,
Error,
NamedValue,
Result,
Val,
raw,
unsigned,
};
pub fn dissect(data : &[u8]) -> Result {
if data.len() < 20 {
return Err(Error::Underflow { expected: 20, have: data.len(),
message: "An IP packet must be at least 20 B".to_string() })
}
let mut values:Vec<NamedValue> = vec![];
// IP version (should be "4")
let version = data[0] >> 4;
values.push(("Version".to_string(), Ok(Val::Unsigned(version as u64))));
// Internet Header Length (IHL): number of 32b words in header
let words = data[0] & 0x0f;
values.push(("IHL".to_string(), Ok(Val::Unsigned(words as u64))));
// Differentiated Services Code Point (DSCP): RFC 2474
let dscp = data[1] >> 2;
values.push(("DSCP".to_string(), Ok(Val::Unsigned(dscp as u64))));
// Explicit Congestion Notification (ECN): RFC 3168
let ecn = data[1] & 0x03;
values.push(("ECN".to_string(), Ok(Val::Unsigned(ecn as u64))));
// Total length (including header)
let length = unsigned(&data[2..4], Endianness::BigEndian);
values.push(("Length".to_string(), length.map(|v| Val::Unsigned(v))));
// Identification (of datagraph fragments): RFC 6864
values.push(("Identification".to_string(), Ok(Val::Unsigned(data[8] as u64))));
// Protocol number (assigned by IANA)
let protocol = data[9];
values.push(("Protocol".to_string(), Ok(Val::Unsigned(protocol as u64))));
// Header checksum
values.push(("Checksum".to_string(), Ok(Val::Bytes(data[10..12].to_vec()))));
// Source and destination addresses
let source = &data[12..16];
values.push(("Source".to_string(), Ok(Val::Address {
bytes: source.to_vec(),
encoded: source.iter().map(|b| b.to_string()).collect::<Vec<_>>().connect("."),
})));
let dest = &data[16..20];
values.push(("Destination".to_string(), Ok(Val::Address {
bytes: dest.to_vec(),
encoded: dest.iter().map(|b| b.to_string()).collect::<Vec<_>>().connect("."),
})));
// Parse the remainder according to the specified protocol.
let remainder = &data[20..];
let dissect_pdu = match protocol {
// TODO: UDP, TCP, etc.
_ => raw,
};
values.push(("Protocol Data".to_string(), dissect_pdu(remainder)));
Ok(Val::Object(values))
}
|
//! `POST /_matrix/client/*/login`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3login
use ruma_api::ruma_api;
use ruma_identifiers::{DeviceId, ServerName, UserId};
use ruma_serde::{JsonObject, Outgoing};
use serde::{
de::{self, DeserializeOwned},
Deserialize, Deserializer, Serialize,
};
use serde_json::Value as JsonValue;
use crate::uiaa::{IncomingUserIdentifier, UserIdentifier};
ruma_api! {
metadata: {
description: "Login to the homeserver.",
method: POST,
name: "login",
r0_path: "/_matrix/client/r0/login",
stable_path: "/_matrix/client/v3/login",
rate_limited: true,
authentication: None,
added: 1.0,
}
request: {
/// The authentication mechanism.
#[serde(flatten)]
pub login_info: LoginInfo<'a>,
/// ID of the client device
#[serde(skip_serializing_if = "Option::is_none")]
pub device_id: Option<&'a DeviceId>,
/// A display name to assign to the newly-created device.
///
/// Ignored if `device_id` corresponds to a known device.
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_device_display_name: Option<&'a str>,
}
response: {
/// The fully-qualified Matrix ID that has been registered.
pub user_id: Box<UserId>,
/// An access token for the account.
pub access_token: String,
/// The hostname of the homeserver on which the account has been registered.
///
/// Deprecated: Clients should instead use the `user_id.server_name()`
/// method if they require it.
#[serde(skip_serializing_if = "Option::is_none")]
pub home_server: Option<Box<ServerName>>,
/// ID of the logged-in device.
///
/// Will be the same as the corresponding parameter in the request, if one was
/// specified.
pub device_id: Box<DeviceId>,
/// Client configuration provided by the server.
///
/// If present, clients SHOULD use the provided object to reconfigure themselves.
#[serde(skip_serializing_if = "Option::is_none")]
pub well_known: Option<DiscoveryInfo>,
}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given login info.
pub fn new(login_info: LoginInfo<'a>) -> Self {
Self { login_info, device_id: None, initial_device_display_name: None }
}
}
impl Response {
/// Creates a new `Response` with the given user ID, access token and device ID.
pub fn new(user_id: Box<UserId>, access_token: String, device_id: Box<DeviceId>) -> Self {
Self { user_id, access_token, home_server: None, device_id, well_known: None }
}
}
/// The authentication mechanism.
///
/// To construct the custom `LoginInfo` variant you first have to construct
/// [`IncomingLoginInfo::new`] and then call [`IncomingLoginInfo::to_outgoing`] on it.
#[derive(Clone, Debug, Outgoing, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[incoming_derive(!Deserialize)]
#[serde(untagged)]
pub enum LoginInfo<'a> {
/// An identifier and password are supplied to authenticate.
Password(Password<'a>),
/// Token-based login.
Token(Token<'a>),
#[doc(hidden)]
_Custom(CustomLoginInfo<'a>),
}
impl IncomingLoginInfo {
/// Creates a new `IncomingLoginInfo` with the given `login_type` string, session and data.
///
/// Prefer to use the public variants of `IncomingLoginInfo` where possible; this
/// constructor is meant be used for unsupported authentication mechanisms only and
/// does not allow setting arbitrary data for supported ones.
///
/// # Errors
///
/// Returns an error if the `login_type` is known and serialization of `data` to the
/// corresponding `IncomingLoginInfo` variant fails.
pub fn new(login_type: &str, data: JsonObject) -> serde_json::Result<Self> {
Ok(match login_type {
"m.login.password" => {
Self::Password(serde_json::from_value(JsonValue::Object(data))?)
}
"m.login.token" => Self::Token(serde_json::from_value(JsonValue::Object(data))?),
_ => Self::_Custom(IncomingCustomLoginInfo {
login_type: login_type.into(),
extra: data,
}),
})
}
/// Convert `IncomingLoginInfo` to `LoginInfo`.
pub fn to_outgoing(&self) -> LoginInfo<'_> {
match self {
Self::Password(a) => LoginInfo::Password(a.to_outgoing()),
Self::Token(a) => LoginInfo::Token(a.to_outgoing()),
Self::_Custom(a) => LoginInfo::_Custom(CustomLoginInfo {
login_type: &a.login_type,
extra: &a.extra,
}),
}
}
}
impl<'de> Deserialize<'de> for IncomingLoginInfo {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
fn from_json_value<T: DeserializeOwned, E: de::Error>(val: JsonValue) -> Result<T, E> {
serde_json::from_value(val).map_err(E::custom)
}
// FIXME: Would be better to use serde_json::value::RawValue, but that would require
// implementing Deserialize manually for Request, bc. `#[serde(flatten)]` breaks things.
let json = JsonValue::deserialize(deserializer)?;
let login_type =
json["type"].as_str().ok_or_else(|| de::Error::missing_field("type"))?;
match login_type {
"m.login.password" => from_json_value(json).map(Self::Password),
"m.login.token" => from_json_value(json).map(Self::Token),
_ => from_json_value(json).map(Self::_Custom),
}
}
}
/// An identifier and password to supply as authentication.
#[derive(Clone, Debug, Outgoing, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[serde(tag = "type", rename = "m.login.password")]
pub struct Password<'a> {
/// Identification information for the user.
pub identifier: UserIdentifier<'a>,
/// The password.
pub password: &'a str,
}
impl<'a> Password<'a> {
/// Creates a new `Password` with the given identifier and password.
pub fn new(identifier: UserIdentifier<'a>, password: &'a str) -> Self {
Self { identifier, password }
}
}
impl IncomingPassword {
/// Convert `IncomingPassword` to `Password`.
fn to_outgoing(&self) -> Password<'_> {
Password { identifier: self.identifier.to_outgoing(), password: &self.password }
}
}
/// A token to supply as authentication.
#[derive(Clone, Debug, Outgoing, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[serde(tag = "type", rename = "m.login.token")]
pub struct Token<'a> {
/// The token.
pub token: &'a str,
}
impl<'a> Token<'a> {
/// Creates a new `Token` with the given token.
pub fn new(token: &'a str) -> Self {
Self { token }
}
}
impl IncomingToken {
/// Convert `IncomingToken` to `Token`.
fn to_outgoing(&self) -> Token<'_> {
Token { token: &self.token }
}
}
#[doc(hidden)]
#[derive(Clone, Debug, Serialize)]
#[non_exhaustive]
pub struct CustomLoginInfo<'a> {
#[serde(rename = "type")]
login_type: &'a str,
#[serde(flatten)]
extra: &'a JsonObject,
}
#[doc(hidden)]
#[derive(Clone, Debug, Deserialize)]
#[non_exhaustive]
pub struct IncomingCustomLoginInfo {
#[serde(rename = "type")]
login_type: String,
#[serde(flatten)]
extra: JsonObject,
}
impl Outgoing for CustomLoginInfo<'_> {
type Incoming = IncomingCustomLoginInfo;
}
/// Client configuration provided by the server.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct DiscoveryInfo {
/// Information about the homeserver to connect to.
#[serde(rename = "m.homeserver")]
pub homeserver: HomeserverInfo,
/// Information about the identity server to connect to.
#[serde(rename = "m.identity_server")]
pub identity_server: Option<IdentityServerInfo>,
}
impl DiscoveryInfo {
/// Create a new `DiscoveryInfo` with the given homeserver.
pub fn new(homeserver: HomeserverInfo) -> Self {
Self { homeserver, identity_server: None }
}
}
/// Information about the homeserver to connect to.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct HomeserverInfo {
/// The base URL for the homeserver for client-server connections.
pub base_url: String,
}
impl HomeserverInfo {
/// Create a new `HomeserverInfo` with the given base url.
pub fn new(base_url: String) -> Self {
Self { base_url }
}
}
/// Information about the identity server to connect to.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct IdentityServerInfo {
/// The base URL for the identity server for client-server connections.
pub base_url: String,
}
impl IdentityServerInfo {
/// Create a new `IdentityServerInfo` with the given base url.
pub fn new(base_url: String) -> Self {
Self { base_url }
}
}
#[cfg(test)]
mod tests {
use matches::assert_matches;
use serde_json::{from_value as from_json_value, json};
use super::{IncomingLoginInfo, IncomingPassword, IncomingToken};
use crate::uiaa::IncomingUserIdentifier;
#[test]
fn deserialize_login_type() {
assert_matches!(
from_json_value(json!({
"type": "m.login.password",
"identifier": {
"type": "m.id.user",
"user": "cheeky_monkey"
},
"password": "ilovebananas"
}))
.unwrap(),
IncomingLoginInfo::Password(IncomingPassword { identifier: IncomingUserIdentifier::UserIdOrLocalpart(user), password })
if user == "cheeky_monkey" && password == "ilovebananas"
);
assert_matches!(
from_json_value(json!({
"type": "m.login.token",
"token": "1234567890abcdef"
}))
.unwrap(),
IncomingLoginInfo::Token(IncomingToken { token })
if token == "1234567890abcdef"
);
}
#[test]
#[cfg(feature = "client")]
fn serialize_login_request_body() {
use ruma_api::{MatrixVersion, OutgoingRequest, SendAccessToken};
use ruma_common::thirdparty::Medium;
use serde_json::Value as JsonValue;
use super::{LoginInfo, Password, Request, Token};
use crate::uiaa::UserIdentifier;
let req: http::Request<Vec<u8>> = Request {
login_info: LoginInfo::Token(Token { token: "0xdeadbeef" }),
device_id: None,
initial_device_display_name: Some("test"),
}
.try_into_http_request(
"https://homeserver.tld",
SendAccessToken::None,
&[MatrixVersion::V1_1],
)
.unwrap();
let req_body_value: JsonValue = serde_json::from_slice(req.body()).unwrap();
assert_eq!(
req_body_value,
json!({
"type": "m.login.token",
"token": "0xdeadbeef",
"initial_device_display_name": "test",
})
);
let req: http::Request<Vec<u8>> = Request {
login_info: LoginInfo::Password(Password {
identifier: UserIdentifier::ThirdPartyId {
address: "hello@example.com",
medium: Medium::Email,
},
password: "deadbeef",
}),
device_id: None,
initial_device_display_name: Some("test"),
}
.try_into_http_request(
"https://homeserver.tld",
SendAccessToken::None,
&[MatrixVersion::V1_1],
)
.unwrap();
let req_body_value: JsonValue = serde_json::from_slice(req.body()).unwrap();
assert_eq!(
req_body_value,
json!({
"identifier": {
"type": "m.id.thirdparty",
"medium": "email",
"address": "hello@example.com"
},
"type": "m.login.password",
"password": "deadbeef",
"initial_device_display_name": "test",
})
);
}
}
}
client-api: Add LoginInfo::ApplicationService
According to MSC2778
//! `POST /_matrix/client/*/login`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3login
use ruma_api::ruma_api;
use ruma_identifiers::{DeviceId, ServerName, UserId};
use ruma_serde::{JsonObject, Outgoing};
use serde::{
de::{self, DeserializeOwned},
Deserialize, Deserializer, Serialize,
};
use serde_json::Value as JsonValue;
use crate::uiaa::{IncomingUserIdentifier, UserIdentifier};
ruma_api! {
metadata: {
description: "Login to the homeserver.",
method: POST,
name: "login",
r0_path: "/_matrix/client/r0/login",
stable_path: "/_matrix/client/v3/login",
rate_limited: true,
authentication: None,
added: 1.0,
}
request: {
/// The authentication mechanism.
#[serde(flatten)]
pub login_info: LoginInfo<'a>,
/// ID of the client device
#[serde(skip_serializing_if = "Option::is_none")]
pub device_id: Option<&'a DeviceId>,
/// A display name to assign to the newly-created device.
///
/// Ignored if `device_id` corresponds to a known device.
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_device_display_name: Option<&'a str>,
}
response: {
/// The fully-qualified Matrix ID that has been registered.
pub user_id: Box<UserId>,
/// An access token for the account.
pub access_token: String,
/// The hostname of the homeserver on which the account has been registered.
///
/// Deprecated: Clients should instead use the `user_id.server_name()`
/// method if they require it.
#[serde(skip_serializing_if = "Option::is_none")]
pub home_server: Option<Box<ServerName>>,
/// ID of the logged-in device.
///
/// Will be the same as the corresponding parameter in the request, if one was
/// specified.
pub device_id: Box<DeviceId>,
/// Client configuration provided by the server.
///
/// If present, clients SHOULD use the provided object to reconfigure themselves.
#[serde(skip_serializing_if = "Option::is_none")]
pub well_known: Option<DiscoveryInfo>,
}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given login info.
pub fn new(login_info: LoginInfo<'a>) -> Self {
Self { login_info, device_id: None, initial_device_display_name: None }
}
}
impl Response {
/// Creates a new `Response` with the given user ID, access token and device ID.
pub fn new(user_id: Box<UserId>, access_token: String, device_id: Box<DeviceId>) -> Self {
Self { user_id, access_token, home_server: None, device_id, well_known: None }
}
}
/// The authentication mechanism.
///
/// To construct the custom `LoginInfo` variant you first have to construct
/// [`IncomingLoginInfo::new`] and then call [`IncomingLoginInfo::to_outgoing`] on it.
#[derive(Clone, Debug, Outgoing, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[incoming_derive(!Deserialize)]
#[serde(untagged)]
pub enum LoginInfo<'a> {
/// An identifier and password are supplied to authenticate.
Password(Password<'a>),
/// Token-based login.
Token(Token<'a>),
/// Application Service-specific login.
ApplicationService(ApplicationService<'a>),
#[doc(hidden)]
_Custom(CustomLoginInfo<'a>),
}
impl IncomingLoginInfo {
/// Creates a new `IncomingLoginInfo` with the given `login_type` string, session and data.
///
/// Prefer to use the public variants of `IncomingLoginInfo` where possible; this
/// constructor is meant be used for unsupported authentication mechanisms only and
/// does not allow setting arbitrary data for supported ones.
///
/// # Errors
///
/// Returns an error if the `login_type` is known and serialization of `data` to the
/// corresponding `IncomingLoginInfo` variant fails.
pub fn new(login_type: &str, data: JsonObject) -> serde_json::Result<Self> {
Ok(match login_type {
"m.login.password" => {
Self::Password(serde_json::from_value(JsonValue::Object(data))?)
}
"m.login.token" => Self::Token(serde_json::from_value(JsonValue::Object(data))?),
"m.login.application_service" => {
Self::ApplicationService(serde_json::from_value(JsonValue::Object(data))?)
}
_ => Self::_Custom(IncomingCustomLoginInfo {
login_type: login_type.into(),
extra: data,
}),
})
}
/// Convert `IncomingLoginInfo` to `LoginInfo`.
pub fn to_outgoing(&self) -> LoginInfo<'_> {
match self {
Self::Password(a) => LoginInfo::Password(a.to_outgoing()),
Self::Token(a) => LoginInfo::Token(a.to_outgoing()),
Self::ApplicationService(a) => LoginInfo::ApplicationService(a.to_outgoing()),
Self::_Custom(a) => LoginInfo::_Custom(CustomLoginInfo {
login_type: &a.login_type,
extra: &a.extra,
}),
}
}
}
impl<'de> Deserialize<'de> for IncomingLoginInfo {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
fn from_json_value<T: DeserializeOwned, E: de::Error>(val: JsonValue) -> Result<T, E> {
serde_json::from_value(val).map_err(E::custom)
}
// FIXME: Would be better to use serde_json::value::RawValue, but that would require
// implementing Deserialize manually for Request, bc. `#[serde(flatten)]` breaks things.
let json = JsonValue::deserialize(deserializer)?;
let login_type =
json["type"].as_str().ok_or_else(|| de::Error::missing_field("type"))?;
match login_type {
"m.login.password" => from_json_value(json).map(Self::Password),
"m.login.token" => from_json_value(json).map(Self::Token),
_ => from_json_value(json).map(Self::_Custom),
}
}
}
/// An identifier and password to supply as authentication.
#[derive(Clone, Debug, Outgoing, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[serde(tag = "type", rename = "m.login.password")]
pub struct Password<'a> {
/// Identification information for the user.
pub identifier: UserIdentifier<'a>,
/// The password.
pub password: &'a str,
}
impl<'a> Password<'a> {
/// Creates a new `Password` with the given identifier and password.
pub fn new(identifier: UserIdentifier<'a>, password: &'a str) -> Self {
Self { identifier, password }
}
}
impl IncomingPassword {
/// Convert `IncomingPassword` to `Password`.
fn to_outgoing(&self) -> Password<'_> {
Password { identifier: self.identifier.to_outgoing(), password: &self.password }
}
}
/// A token to supply as authentication.
#[derive(Clone, Debug, Outgoing, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[serde(tag = "type", rename = "m.login.token")]
pub struct Token<'a> {
/// The token.
pub token: &'a str,
}
impl<'a> Token<'a> {
/// Creates a new `Token` with the given token.
pub fn new(token: &'a str) -> Self {
Self { token }
}
}
impl IncomingToken {
/// Convert `IncomingToken` to `Token`.
fn to_outgoing(&self) -> Token<'_> {
Token { token: &self.token }
}
}
/// An identifier to supply for Application Service authentication.
#[derive(Clone, Debug, Outgoing, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[serde(tag = "type", rename = "m.login.application_service")]
pub struct ApplicationService<'a> {
/// Identification information for the user.
pub identifier: UserIdentifier<'a>,
}
impl<'a> ApplicationService<'a> {
/// Creates a new `ApplicationService` with the given identifier.
pub fn new(identifier: UserIdentifier<'a>) -> Self {
Self { identifier }
}
}
impl IncomingApplicationService {
/// Convert `IncomingApplicationService` to `ApplicationService`.
fn to_outgoing(&self) -> ApplicationService<'_> {
ApplicationService { identifier: self.identifier.to_outgoing() }
}
}
#[doc(hidden)]
#[derive(Clone, Debug, Serialize)]
#[non_exhaustive]
pub struct CustomLoginInfo<'a> {
#[serde(rename = "type")]
login_type: &'a str,
#[serde(flatten)]
extra: &'a JsonObject,
}
#[doc(hidden)]
#[derive(Clone, Debug, Deserialize)]
#[non_exhaustive]
pub struct IncomingCustomLoginInfo {
#[serde(rename = "type")]
login_type: String,
#[serde(flatten)]
extra: JsonObject,
}
impl Outgoing for CustomLoginInfo<'_> {
type Incoming = IncomingCustomLoginInfo;
}
/// Client configuration provided by the server.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct DiscoveryInfo {
/// Information about the homeserver to connect to.
#[serde(rename = "m.homeserver")]
pub homeserver: HomeserverInfo,
/// Information about the identity server to connect to.
#[serde(rename = "m.identity_server")]
pub identity_server: Option<IdentityServerInfo>,
}
impl DiscoveryInfo {
/// Create a new `DiscoveryInfo` with the given homeserver.
pub fn new(homeserver: HomeserverInfo) -> Self {
Self { homeserver, identity_server: None }
}
}
/// Information about the homeserver to connect to.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct HomeserverInfo {
/// The base URL for the homeserver for client-server connections.
pub base_url: String,
}
impl HomeserverInfo {
/// Create a new `HomeserverInfo` with the given base url.
pub fn new(base_url: String) -> Self {
Self { base_url }
}
}
/// Information about the identity server to connect to.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct IdentityServerInfo {
/// The base URL for the identity server for client-server connections.
pub base_url: String,
}
impl IdentityServerInfo {
/// Create a new `IdentityServerInfo` with the given base url.
pub fn new(base_url: String) -> Self {
Self { base_url }
}
}
#[cfg(test)]
mod tests {
use matches::assert_matches;
use serde_json::{from_value as from_json_value, json};
use super::{IncomingLoginInfo, IncomingPassword, IncomingToken};
use crate::uiaa::IncomingUserIdentifier;
#[test]
fn deserialize_login_type() {
assert_matches!(
from_json_value(json!({
"type": "m.login.password",
"identifier": {
"type": "m.id.user",
"user": "cheeky_monkey"
},
"password": "ilovebananas"
}))
.unwrap(),
IncomingLoginInfo::Password(IncomingPassword { identifier: IncomingUserIdentifier::UserIdOrLocalpart(user), password })
if user == "cheeky_monkey" && password == "ilovebananas"
);
assert_matches!(
from_json_value(json!({
"type": "m.login.token",
"token": "1234567890abcdef"
}))
.unwrap(),
IncomingLoginInfo::Token(IncomingToken { token })
if token == "1234567890abcdef"
);
}
#[test]
#[cfg(feature = "client")]
fn serialize_login_request_body() {
use ruma_api::{MatrixVersion, OutgoingRequest, SendAccessToken};
use ruma_common::thirdparty::Medium;
use serde_json::Value as JsonValue;
use super::{LoginInfo, Password, Request, Token};
use crate::uiaa::UserIdentifier;
let req: http::Request<Vec<u8>> = Request {
login_info: LoginInfo::Token(Token { token: "0xdeadbeef" }),
device_id: None,
initial_device_display_name: Some("test"),
}
.try_into_http_request(
"https://homeserver.tld",
SendAccessToken::None,
&[MatrixVersion::V1_1],
)
.unwrap();
let req_body_value: JsonValue = serde_json::from_slice(req.body()).unwrap();
assert_eq!(
req_body_value,
json!({
"type": "m.login.token",
"token": "0xdeadbeef",
"initial_device_display_name": "test",
})
);
let req: http::Request<Vec<u8>> = Request {
login_info: LoginInfo::Password(Password {
identifier: UserIdentifier::ThirdPartyId {
address: "hello@example.com",
medium: Medium::Email,
},
password: "deadbeef",
}),
device_id: None,
initial_device_display_name: Some("test"),
}
.try_into_http_request(
"https://homeserver.tld",
SendAccessToken::None,
&[MatrixVersion::V1_1],
)
.unwrap();
let req_body_value: JsonValue = serde_json::from_slice(req.body()).unwrap();
assert_eq!(
req_body_value,
json!({
"identifier": {
"type": "m.id.thirdparty",
"medium": "email",
"address": "hello@example.com"
},
"type": "m.login.password",
"password": "deadbeef",
"initial_device_display_name": "test",
})
);
}
}
}
|
//! Implementation of event enum and event content enum macros.
use proc_macro2::{Span, TokenStream};
use quote::{format_ident, quote, ToTokens};
use syn::{Attribute, Data, DataEnum, DeriveInput, Ident, LitStr};
use crate::event_parse::{EventEnumDecl, EventEnumEntry, EventKind, EventKindVariation};
fn is_non_stripped_room_event(kind: EventKind, var: EventKindVariation) -> bool {
matches!(kind, EventKind::Message | EventKind::State)
&& matches!(
var,
EventKindVariation::Full
| EventKindVariation::Sync
| EventKindVariation::Redacted
| EventKindVariation::RedactedSync
)
}
fn has_prev_content_field(kind: EventKind, var: EventKindVariation) -> bool {
matches!(kind, EventKind::State)
&& matches!(var, EventKindVariation::Full | EventKindVariation::Sync)
}
type EventKindFn = fn(EventKind, EventKindVariation) -> bool;
/// This const is used to generate the accessor methods for the `Any*Event` enums.
///
/// DO NOT alter the field names unless the structs in `ruma_events::event_kinds` have changed.
const EVENT_FIELDS: &[(&str, EventKindFn)] = &[
("origin_server_ts", is_non_stripped_room_event),
("room_id", |kind, var| {
matches!(kind, EventKind::Message | EventKind::State | EventKind::Ephemeral)
&& matches!(var, EventKindVariation::Full | EventKindVariation::Redacted)
}),
("event_id", is_non_stripped_room_event),
("sender", |kind, var| {
matches!(kind, EventKind::Message | EventKind::State | EventKind::ToDevice)
&& var != EventKindVariation::Initial
}),
("state_key", |kind, _| matches!(kind, EventKind::State)),
("unsigned", is_non_stripped_room_event),
];
/// Create a content enum from `EventEnumInput`.
pub fn expand_event_enums(input: &EventEnumDecl) -> syn::Result<TokenStream> {
use EventKindVariation as V;
let ruma_events = crate::import_ruma_events();
let mut res = TokenStream::new();
let kind = input.kind;
let attrs = &input.attrs;
let events: Vec<_> = input.events.iter().map(|entry| entry.ev_type.clone()).collect();
let variants: Vec<_> =
input.events.iter().map(EventEnumEntry::to_variant).collect::<syn::Result<_>>()?;
res.extend(expand_event_enum(kind, &events, attrs, &variants, V::Full, &ruma_events));
res.extend(expand_content_enum(kind, &events, attrs, &variants, &ruma_events));
if matches!(kind, EventKind::Ephemeral | EventKind::Message | EventKind::State) {
res.extend(expand_event_enum(kind, &events, attrs, &variants, V::Sync, &ruma_events));
res.extend(expand_from_full_event(kind, V::Full, &variants));
res.extend(expand_into_full_event(kind, V::Sync, &variants, &ruma_events));
}
if matches!(kind, EventKind::State) {
res.extend(expand_event_enum(kind, &events, attrs, &variants, V::Stripped, &ruma_events));
res.extend(expand_event_enum(kind, &events, attrs, &variants, V::Initial, &ruma_events));
}
if matches!(kind, EventKind::Message | EventKind::State) {
res.extend(expand_redacted_event_enum(kind, &events, attrs, &variants, &ruma_events));
res.extend(expand_from_full_event(kind, V::Redacted, &variants));
res.extend(expand_into_full_event(kind, V::RedactedSync, &variants, &ruma_events));
}
Ok(res)
}
fn expand_event_enum(
kind: EventKind,
events: &[LitStr],
attrs: &[Attribute],
variants: &[EventEnumVariant],
var: EventKindVariation,
ruma_events: &TokenStream,
) -> TokenStream {
let serde = quote! { #ruma_events::exports::serde };
let serde_json = quote! { #ruma_events::exports::serde_json };
let event_struct = kind.to_event_ident(var).unwrap();
let ident = kind.to_event_enum_ident(var).unwrap();
let content: Vec<_> =
events.iter().map(|event| to_event_path(event, kind, var, ruma_events)).collect();
let variant_decls = variants.iter().map(|v| v.decl());
let self_variants = variants.iter().map(|v| v.ctor(quote! { Self }));
let (custom_variant, custom_deserialize) =
generate_custom_variant(&event_struct, var, ruma_events);
let event_enum = quote! {
#( #attrs )*
#[derive(Clone, Debug, #serde::Serialize)]
#[serde(untagged)]
#[allow(clippy::large_enum_variant)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub enum #ident {
#(
#[doc = #events]
#variant_decls(#content),
)*
#custom_variant
}
};
let variant_attrs = variants.iter().map(|v| {
let attrs = &v.attrs;
quote! { #(#attrs)* }
});
let event_deserialize_impl = quote! {
impl<'de> #serde::de::Deserialize<'de> for #ident {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: #serde::de::Deserializer<'de>,
{
use #serde::de::Error as _;
let json = Box::<#serde_json::value::RawValue>::deserialize(deserializer)?;
let #ruma_events::EventTypeDeHelper { ev_type, .. } =
#ruma_events::from_raw_json_value(&json)?;
match &*ev_type {
#(
#variant_attrs #events => {
let event = #serde_json::from_str::<#content>(json.get())
.map_err(D::Error::custom)?;
Ok(#self_variants(event))
},
)*
#custom_deserialize
}
}
}
};
let redacted_enum = expand_redacted_enum(kind, var, ruma_events);
let field_accessor_impl = accessor_methods(kind, var, variants, ruma_events);
let redact_impl = expand_redact(&ident, kind, var, variants, ruma_events);
let from_impl = expand_from_impl(ident, &content, variants);
quote! {
#event_enum
#field_accessor_impl
#redact_impl
#event_deserialize_impl
#redacted_enum
#from_impl
}
}
fn expand_from_impl(
ty: Ident,
content: &[TokenStream],
variants: &[EventEnumVariant],
) -> TokenStream {
let from_impls = content.iter().zip(variants).map(|(content, variant)| {
let ident = &variant.ident;
let attrs = &variant.attrs;
quote! {
#[automatically_derived]
#(#attrs)*
impl ::std::convert::From<#content> for #ty {
fn from(c: #content) -> Self {
Self::#ident(c)
}
}
}
});
quote! { #( #from_impls )* }
}
fn expand_from_full_event(
kind: EventKind,
var: EventKindVariation,
variants: &[EventEnumVariant],
) -> TokenStream {
let ident = kind.to_event_enum_ident(var).unwrap();
let sync = kind.to_event_enum_ident(var.to_sync().unwrap()).unwrap();
let ident_variants = variants.iter().map(|v| v.match_arm(&ident));
let self_variants = variants.iter().map(|v| v.ctor(quote! { Self }));
quote! {
#[automatically_derived]
impl ::std::convert::From<#ident> for #sync {
fn from(event: #ident) -> Self {
match event {
#(
#ident_variants(event) => {
#self_variants(::std::convert::From::from(event))
},
)*
#ident::_Custom(event) => {
Self::_Custom(::std::convert::From::from(event))
},
}
}
}
}
}
fn expand_into_full_event(
kind: EventKind,
var: EventKindVariation,
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> TokenStream {
let ruma_identifiers = quote! { #ruma_events::exports::ruma_identifiers };
let ident = kind.to_event_enum_ident(var).unwrap();
let full = kind.to_event_enum_ident(var.to_full().unwrap()).unwrap();
let self_variants = variants.iter().map(|v| v.match_arm(quote! { Self }));
let full_variants = variants.iter().map(|v| v.ctor(&full));
quote! {
#[automatically_derived]
impl #ident {
/// Convert this sync event into a full event (one with a `room_id` field).
pub fn into_full_event(
self,
room_id: #ruma_identifiers::RoomId
) -> #full {
match self {
#(
#self_variants(event) => {
#full_variants(event.into_full_event(room_id))
},
)*
Self::_Custom(event) => {
#full::_Custom(event.into_full_event(room_id))
},
}
}
}
}
}
/// Generates the 3 redacted state enums, 2 redacted message enums,
/// and `Deserialize` implementations.
///
/// No content enums are generated since no part of the API deals with
/// redacted event's content. There are only five state variants that contain content.
fn expand_redacted_event_enum(
kind: EventKind,
events: &[LitStr],
attrs: &[Attribute],
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> TokenStream {
use EventKindVariation as V;
let full = expand_event_enum(kind, events, attrs, variants, V::Redacted, ruma_events);
let sync = expand_event_enum(kind, events, attrs, variants, V::RedactedSync, ruma_events);
quote! { #full #sync }
}
/// Create a content enum from `EventEnumInput`.
fn expand_content_enum(
kind: EventKind,
events: &[LitStr],
attrs: &[Attribute],
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> TokenStream {
let serde = quote! { #ruma_events::exports::serde };
let serde_json = quote! { #ruma_events::exports::serde_json };
let ident = kind.to_content_enum();
let event_type_str = events;
let content: Vec<_> =
events.iter().map(|ev| to_event_content_path(kind, ev, None, ruma_events)).collect();
let variant_decls = variants.iter().map(|v| v.decl()).collect::<Vec<_>>();
let content_enum = quote! {
#( #attrs )*
#[derive(Clone, Debug, #serde::Serialize)]
#[serde(untagged)]
#[allow(clippy::large_enum_variant)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub enum #ident {
#(
#[doc = #event_type_str]
#variant_decls(#content),
)*
#[doc(hidden)]
_Custom {
#[serde(skip)]
event_type: ::std::string::String,
},
}
};
let variant_attrs = variants.iter().map(|v| {
let attrs = &v.attrs;
quote! { #(#attrs)* }
});
let variant_arms = variants.iter().map(|v| v.match_arm(quote! { Self })).collect::<Vec<_>>();
let variant_ctors = variants.iter().map(|v| v.ctor(quote! { Self }));
let event_content_impl = quote! {
#[automatically_derived]
impl #ruma_events::EventContent for #ident {
fn event_type(&self) -> &::std::primitive::str {
match self {
#( #variant_arms(content) => content.event_type(), )*
Self::_Custom { event_type } => &event_type,
}
}
fn from_parts(
event_type: &::std::primitive::str,
input: &#serde_json::value::RawValue,
) -> #serde_json::Result<Self> {
match event_type {
#(
#variant_attrs #event_type_str => {
let content = #content::from_parts(event_type, input)?;
::std::result::Result::Ok(#variant_ctors(content))
}
)*
ty => {
::std::result::Result::Ok(Self::_Custom { event_type: ty.to_owned() })
}
}
}
}
};
let marker_trait_impl = marker_trait(kind, ruma_events);
let from_impl = expand_from_impl(ident, &content, variants);
quote! {
#content_enum
#event_content_impl
#marker_trait_impl
#from_impl
}
}
fn expand_redact(
ident: &Ident,
kind: EventKind,
var: EventKindVariation,
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> Option<TokenStream> {
let ruma_identifiers = quote! { #ruma_events::exports::ruma_identifiers };
let redacted_enum = kind.to_event_enum_ident(var.to_redacted()?)?;
let self_variants = variants.iter().map(|v| v.match_arm(quote! { Self }));
let redacted_variants = variants.iter().map(|v| v.ctor(&redacted_enum));
Some(quote! {
#[automatically_derived]
impl #ruma_events::Redact for #ident {
type Redacted = #redacted_enum;
fn redact(
self,
redaction: #ruma_events::room::redaction::SyncRoomRedactionEvent,
version: &#ruma_identifiers::RoomVersionId,
) -> #redacted_enum {
match self {
#(
#self_variants(event) => #redacted_variants(
#ruma_events::Redact::redact(event, redaction, version),
),
)*
Self::_Custom(event) => #redacted_enum::_Custom(
#ruma_events::Redact::redact(event, redaction, version),
)
}
}
}
})
}
fn expand_redacted_enum(
kind: EventKind,
var: EventKindVariation,
ruma_events: &TokenStream,
) -> Option<TokenStream> {
let serde = quote! { #ruma_events::exports::serde };
let serde_json = quote! { #ruma_events::exports::serde_json };
if let EventKind::State | EventKind::Message = kind {
let ident = format_ident!("AnyPossiblyRedacted{}", kind.to_event_ident(var)?);
let regular_enum_ident = kind.to_event_enum_ident(var)?;
let redacted_enum_ident = kind.to_event_enum_ident(var.to_redacted()?)?;
Some(quote! {
/// An enum that holds either regular un-redacted events or redacted events.
#[derive(Clone, Debug, #serde::Serialize)]
#[serde(untagged)]
#[allow(clippy::exhaustive_enums)]
pub enum #ident {
/// An un-redacted event.
Regular(#regular_enum_ident),
/// A redacted event.
Redacted(#redacted_enum_ident),
}
impl<'de> #serde::de::Deserialize<'de> for #ident {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: #serde::de::Deserializer<'de>,
{
let json = Box::<#serde_json::value::RawValue>::deserialize(deserializer)?;
let #ruma_events::RedactionDeHelper { unsigned } =
#ruma_events::from_raw_json_value(&json)?;
Ok(match unsigned {
Some(unsigned) if unsigned.redacted_because.is_some() => {
Self::Redacted(#ruma_events::from_raw_json_value(&json)?)
}
_ => Self::Regular(#ruma_events::from_raw_json_value(&json)?),
})
}
}
})
} else {
None
}
}
fn generate_custom_variant(
event_struct: &Ident,
var: EventKindVariation,
ruma_events: &TokenStream,
) -> (TokenStream, TokenStream) {
use EventKindVariation as V;
let serde_json = quote! { #ruma_events::exports::serde_json };
if matches!(var, V::Redacted | V::RedactedSync) {
(
quote! {
/// A redacted event not defined by the Matrix specification
#[doc(hidden)]
_Custom(
#ruma_events::#event_struct<#ruma_events::custom::RedactedCustomEventContent>,
),
},
quote! {
event => {
let event = #serde_json::from_str::<#ruma_events::#event_struct<
#ruma_events::custom::RedactedCustomEventContent,
>>(json.get())
.map_err(D::Error::custom)?;
Ok(Self::_Custom(event))
},
},
)
} else {
(
quote! {
/// An event not defined by the Matrix specification
#[doc(hidden)]
_Custom(#ruma_events::#event_struct<#ruma_events::custom::CustomEventContent>),
},
quote! {
event => {
let event =
#serde_json::from_str::<
#ruma_events::#event_struct<#ruma_events::custom::CustomEventContent>
>(json.get())
.map_err(D::Error::custom)?;
Ok(Self::_Custom(event))
},
},
)
}
}
fn marker_trait(kind: EventKind, ruma_events: &TokenStream) -> TokenStream {
let marker_trait = match kind {
EventKind::State => quote! { StateEventContent },
EventKind::Message => quote! { MessageEventContent },
EventKind::Ephemeral => quote! { EphemeralRoomEventContent },
EventKind::GlobalAccountData => quote! { GlobalAccountDataEventContent },
EventKind::RoomAccountData => quote! { RoomAccountDataEventContent },
EventKind::ToDevice => quote! { ToDeviceEventContent },
_ => return TokenStream::new(),
};
let ident = kind.to_content_enum();
quote! {
#[automatically_derived]
impl #ruma_events::#marker_trait for #ident {}
}
}
fn accessor_methods(
kind: EventKind,
var: EventKindVariation,
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> Option<TokenStream> {
let ident = kind.to_event_enum_ident(var).unwrap();
let methods = EVENT_FIELDS.iter().map(|(name, has_field)| {
generate_accessor(name, kind, var, *has_field, variants, ruma_events)
});
let self_variants: Vec<_> = variants.iter().map(|v| v.match_arm(quote! { Self })).collect();
let event_type = quote! {
/// Returns the `type` of this event.
pub fn event_type(&self) -> &::std::primitive::str {
match self {
#( #self_variants(event) =>
#ruma_events::EventContent::event_type(&event.content), )*
Self::_Custom(event) =>
#ruma_events::EventContent::event_type(&event.content),
}
}
};
let content_accessors = (!var.is_redacted()).then(|| {
let content_enum = kind.to_content_enum();
let content_variants: Vec<_> = variants.iter().map(|v| v.ctor(&content_enum)).collect();
let prev_content = has_prev_content_field(kind, var).then(|| {
quote! {
/// Returns the previous content for this event.
pub fn prev_content(&self) -> Option<#content_enum> {
match self {
#(
#self_variants(event) => {
event.prev_content.as_ref().map(|c| #content_variants(c.clone()))
},
)*
Self::_Custom(event) => {
event.prev_content.as_ref().map(|c| #content_enum::_Custom {
event_type: #ruma_events::EventContent::event_type(c).to_owned(),
})
},
}
}
}
});
quote! {
/// Returns the content for this event.
pub fn content(&self) -> #content_enum {
match self {
#( #self_variants(event) => #content_variants(event.content.clone()), )*
Self::_Custom(event) => #content_enum::_Custom {
event_type: #ruma_events::EventContent::event_type(&event.content)
.to_owned(),
},
}
}
#prev_content
}
});
Some(quote! {
#[automatically_derived]
impl #ident {
#event_type
#content_accessors
#( #methods )*
}
})
}
fn to_event_path(
name: &LitStr,
kind: EventKind,
var: EventKindVariation,
ruma_events: &TokenStream,
) -> TokenStream {
let span = name.span();
let name = name.value();
// There is no need to give a good compiler error as `to_camel_case` is called first.
assert_eq!(&name[..2], "m.");
let path: Vec<_> = name[2..].split('.').collect();
let event: String = name[2..]
.split(&['.', '_'] as &[char])
.map(|s| s.chars().next().unwrap().to_uppercase().to_string() + &s[1..])
.collect();
let path = path.iter().map(|s| Ident::new(s, span));
let event_name = if kind == EventKind::ToDevice {
assert_eq!(var, EventKindVariation::Full);
format_ident!("ToDevice{}Event", event)
} else {
format_ident!("{}{}Event", var, event)
};
quote! { #ruma_events::#( #path )::*::#event_name }
}
fn to_event_content_path(
kind: EventKind,
name: &LitStr,
prefix: Option<&str>,
ruma_events: &TokenStream,
) -> TokenStream {
let span = name.span();
let name = name.value();
// There is no need to give a good compiler error as `to_camel_case` is called first.
assert_eq!(&name[..2], "m.");
let path: Vec<_> = name[2..].split('.').collect();
let event: String = name[2..]
.split(&['.', '_'] as &[char])
.map(|s| s.chars().next().unwrap().to_uppercase().to_string() + &s[1..])
.collect();
let content_str = match kind {
EventKind::ToDevice => {
format_ident!("ToDevice{}{}EventContent", prefix.unwrap_or(""), event)
}
_ => format_ident!("{}{}EventContent", prefix.unwrap_or(""), event),
};
let path = path.iter().map(|s| Ident::new(s, span));
quote! {
#ruma_events::#( #path )::*::#content_str
}
}
/// Splits the given `event_type` string on `.` and `_` removing the `m.room.` then
/// camel casing to give the `Event` struct name.
fn to_camel_case(name: &LitStr) -> syn::Result<Ident> {
let span = name.span();
let name = name.value();
if &name[..2] != "m." {
return Err(syn::Error::new(
span,
format!("well-known matrix events have to start with `m.` found `{}`", name),
));
}
let s: String = name[2..]
.split(&['.', '_'] as &[char])
.map(|s| s.chars().next().unwrap().to_uppercase().to_string() + &s[1..])
.collect();
Ok(Ident::new(&s, span))
}
fn generate_accessor(
name: &str,
kind: EventKind,
var: EventKindVariation,
is_event_kind: EventKindFn,
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> Option<TokenStream> {
is_event_kind(kind, var).then(|| {
let docs = format!("Returns this event's {} field.", name);
let ident = Ident::new(name, Span::call_site());
let field_type = field_return_type(name, var, ruma_events);
let variants = variants.iter().map(|v| v.match_arm(quote! { Self }));
quote! {
#[doc = #docs]
pub fn #ident(&self) -> &#field_type {
match self {
#( #variants(event) => &event.#ident, )*
Self::_Custom(event) => &event.#ident,
}
}
}
})
}
fn field_return_type(
name: &str,
var: EventKindVariation,
ruma_events: &TokenStream,
) -> TokenStream {
let ruma_common = quote! { #ruma_events::exports::ruma_common };
let ruma_identifiers = quote! { #ruma_events::exports::ruma_identifiers };
match name {
"origin_server_ts" => quote! { #ruma_common::MilliSecondsSinceUnixEpoch },
"room_id" => quote! { #ruma_identifiers::RoomId },
"event_id" => quote! { #ruma_identifiers::EventId },
"sender" => quote! { #ruma_identifiers::UserId },
"state_key" => quote! { ::std::primitive::str },
"unsigned" => {
if var.is_redacted() {
quote! { #ruma_events::RedactedUnsigned }
} else {
quote! { #ruma_events::Unsigned }
}
}
_ => panic!("the `ruma_events_macros::event_enum::EVENT_FIELD` const was changed"),
}
}
pub(crate) struct EventEnumVariant {
pub attrs: Vec<Attribute>,
pub ident: Ident,
}
impl EventEnumVariant {
pub(crate) fn to_tokens<T>(&self, prefix: Option<T>, with_attrs: bool) -> TokenStream
where
T: ToTokens,
{
let mut tokens = TokenStream::new();
if with_attrs {
for attr in &self.attrs {
attr.to_tokens(&mut tokens);
}
}
if let Some(p) = prefix {
tokens.extend(quote! { #p :: })
}
self.ident.to_tokens(&mut tokens);
tokens
}
pub(crate) fn decl(&self) -> TokenStream {
self.to_tokens::<TokenStream>(None, true)
}
pub(crate) fn match_arm(&self, prefix: impl ToTokens) -> TokenStream {
self.to_tokens(Some(prefix), true)
}
pub(crate) fn ctor(&self, prefix: impl ToTokens) -> TokenStream {
self.to_tokens(Some(prefix), false)
}
}
impl EventEnumEntry {
pub(crate) fn to_variant(&self) -> syn::Result<EventEnumVariant> {
let attrs = self.attrs.clone();
let ident = to_camel_case(&self.ev_type)?;
Ok(EventEnumVariant { attrs, ident })
}
}
pub(crate) fn expand_from_impls_derived(input: DeriveInput) -> TokenStream {
let variants = match &input.data {
Data::Enum(DataEnum { variants, .. }) => variants,
_ => panic!("this derive macro only works with enums"),
};
let from_impls = variants.iter().map(|variant| match &variant.fields {
syn::Fields::Unnamed(fields) if fields.unnamed.len() == 1 => {
let inner_struct = &fields.unnamed.first().unwrap().ty;
let var_ident = &variant.ident;
let id = &input.ident;
quote! {
#[automatically_derived]
impl ::std::convert::From<#inner_struct> for #id {
fn from(c: #inner_struct) -> Self {
Self::#var_ident(c)
}
}
}
}
_ => {
panic!("this derive macro only works with enum variants with a single unnamed field")
}
});
quote! {
#( #from_impls )*
}
}
events-macros: Refactor expand_redacted_enum
//! Implementation of event enum and event content enum macros.
use proc_macro2::{Span, TokenStream};
use quote::{format_ident, quote, ToTokens};
use syn::{Attribute, Data, DataEnum, DeriveInput, Ident, LitStr};
use crate::event_parse::{EventEnumDecl, EventEnumEntry, EventKind, EventKindVariation};
fn is_non_stripped_room_event(kind: EventKind, var: EventKindVariation) -> bool {
matches!(kind, EventKind::Message | EventKind::State)
&& matches!(
var,
EventKindVariation::Full
| EventKindVariation::Sync
| EventKindVariation::Redacted
| EventKindVariation::RedactedSync
)
}
fn has_prev_content_field(kind: EventKind, var: EventKindVariation) -> bool {
matches!(kind, EventKind::State)
&& matches!(var, EventKindVariation::Full | EventKindVariation::Sync)
}
type EventKindFn = fn(EventKind, EventKindVariation) -> bool;
/// This const is used to generate the accessor methods for the `Any*Event` enums.
///
/// DO NOT alter the field names unless the structs in `ruma_events::event_kinds` have changed.
const EVENT_FIELDS: &[(&str, EventKindFn)] = &[
("origin_server_ts", is_non_stripped_room_event),
("room_id", |kind, var| {
matches!(kind, EventKind::Message | EventKind::State | EventKind::Ephemeral)
&& matches!(var, EventKindVariation::Full | EventKindVariation::Redacted)
}),
("event_id", is_non_stripped_room_event),
("sender", |kind, var| {
matches!(kind, EventKind::Message | EventKind::State | EventKind::ToDevice)
&& var != EventKindVariation::Initial
}),
("state_key", |kind, _| matches!(kind, EventKind::State)),
("unsigned", is_non_stripped_room_event),
];
/// Create a content enum from `EventEnumInput`.
pub fn expand_event_enums(input: &EventEnumDecl) -> syn::Result<TokenStream> {
use EventKindVariation as V;
let ruma_events = crate::import_ruma_events();
let mut res = TokenStream::new();
let kind = input.kind;
let attrs = &input.attrs;
let events: Vec<_> = input.events.iter().map(|entry| entry.ev_type.clone()).collect();
let variants: Vec<_> =
input.events.iter().map(EventEnumEntry::to_variant).collect::<syn::Result<_>>()?;
res.extend(expand_event_enum(kind, &events, attrs, &variants, V::Full, &ruma_events));
res.extend(expand_content_enum(kind, &events, attrs, &variants, &ruma_events));
if matches!(kind, EventKind::Ephemeral | EventKind::Message | EventKind::State) {
res.extend(expand_event_enum(kind, &events, attrs, &variants, V::Sync, &ruma_events));
res.extend(expand_from_full_event(kind, V::Full, &variants));
res.extend(expand_into_full_event(kind, V::Sync, &variants, &ruma_events));
}
if matches!(kind, EventKind::State) {
res.extend(expand_event_enum(kind, &events, attrs, &variants, V::Stripped, &ruma_events));
res.extend(expand_event_enum(kind, &events, attrs, &variants, V::Initial, &ruma_events));
}
if matches!(kind, EventKind::Message | EventKind::State) {
res.extend(expand_redacted_event_enum(kind, &events, attrs, &variants, &ruma_events));
res.extend(expand_possibly_redacted_enum(kind, V::Full, &ruma_events));
res.extend(expand_possibly_redacted_enum(kind, V::Sync, &ruma_events));
res.extend(expand_from_full_event(kind, V::Redacted, &variants));
res.extend(expand_into_full_event(kind, V::RedactedSync, &variants, &ruma_events));
}
Ok(res)
}
fn expand_event_enum(
kind: EventKind,
events: &[LitStr],
attrs: &[Attribute],
variants: &[EventEnumVariant],
var: EventKindVariation,
ruma_events: &TokenStream,
) -> TokenStream {
let serde = quote! { #ruma_events::exports::serde };
let serde_json = quote! { #ruma_events::exports::serde_json };
let event_struct = kind.to_event_ident(var).unwrap();
let ident = kind.to_event_enum_ident(var).unwrap();
let content: Vec<_> =
events.iter().map(|event| to_event_path(event, kind, var, ruma_events)).collect();
let variant_decls = variants.iter().map(|v| v.decl());
let self_variants = variants.iter().map(|v| v.ctor(quote! { Self }));
let (custom_variant, custom_deserialize) =
generate_custom_variant(&event_struct, var, ruma_events);
let event_enum = quote! {
#( #attrs )*
#[derive(Clone, Debug, #serde::Serialize)]
#[serde(untagged)]
#[allow(clippy::large_enum_variant)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub enum #ident {
#(
#[doc = #events]
#variant_decls(#content),
)*
#custom_variant
}
};
let variant_attrs = variants.iter().map(|v| {
let attrs = &v.attrs;
quote! { #(#attrs)* }
});
let event_deserialize_impl = quote! {
impl<'de> #serde::de::Deserialize<'de> for #ident {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: #serde::de::Deserializer<'de>,
{
use #serde::de::Error as _;
let json = Box::<#serde_json::value::RawValue>::deserialize(deserializer)?;
let #ruma_events::EventTypeDeHelper { ev_type, .. } =
#ruma_events::from_raw_json_value(&json)?;
match &*ev_type {
#(
#variant_attrs #events => {
let event = #serde_json::from_str::<#content>(json.get())
.map_err(D::Error::custom)?;
Ok(#self_variants(event))
},
)*
#custom_deserialize
}
}
}
};
let field_accessor_impl = accessor_methods(kind, var, variants, ruma_events);
let redact_impl = expand_redact(&ident, kind, var, variants, ruma_events);
let from_impl = expand_from_impl(ident, &content, variants);
quote! {
#event_enum
#field_accessor_impl
#redact_impl
#event_deserialize_impl
#from_impl
}
}
fn expand_from_impl(
ty: Ident,
content: &[TokenStream],
variants: &[EventEnumVariant],
) -> TokenStream {
let from_impls = content.iter().zip(variants).map(|(content, variant)| {
let ident = &variant.ident;
let attrs = &variant.attrs;
quote! {
#[automatically_derived]
#(#attrs)*
impl ::std::convert::From<#content> for #ty {
fn from(c: #content) -> Self {
Self::#ident(c)
}
}
}
});
quote! { #( #from_impls )* }
}
fn expand_from_full_event(
kind: EventKind,
var: EventKindVariation,
variants: &[EventEnumVariant],
) -> TokenStream {
let ident = kind.to_event_enum_ident(var).unwrap();
let sync = kind.to_event_enum_ident(var.to_sync().unwrap()).unwrap();
let ident_variants = variants.iter().map(|v| v.match_arm(&ident));
let self_variants = variants.iter().map(|v| v.ctor(quote! { Self }));
quote! {
#[automatically_derived]
impl ::std::convert::From<#ident> for #sync {
fn from(event: #ident) -> Self {
match event {
#(
#ident_variants(event) => {
#self_variants(::std::convert::From::from(event))
},
)*
#ident::_Custom(event) => {
Self::_Custom(::std::convert::From::from(event))
},
}
}
}
}
}
fn expand_into_full_event(
kind: EventKind,
var: EventKindVariation,
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> TokenStream {
let ruma_identifiers = quote! { #ruma_events::exports::ruma_identifiers };
let ident = kind.to_event_enum_ident(var).unwrap();
let full = kind.to_event_enum_ident(var.to_full().unwrap()).unwrap();
let self_variants = variants.iter().map(|v| v.match_arm(quote! { Self }));
let full_variants = variants.iter().map(|v| v.ctor(&full));
quote! {
#[automatically_derived]
impl #ident {
/// Convert this sync event into a full event (one with a `room_id` field).
pub fn into_full_event(
self,
room_id: #ruma_identifiers::RoomId
) -> #full {
match self {
#(
#self_variants(event) => {
#full_variants(event.into_full_event(room_id))
},
)*
Self::_Custom(event) => {
#full::_Custom(event.into_full_event(room_id))
},
}
}
}
}
}
/// Generates the 3 redacted state enums, 2 redacted message enums,
/// and `Deserialize` implementations.
///
/// No content enums are generated since no part of the API deals with
/// redacted event's content. There are only five state variants that contain content.
fn expand_redacted_event_enum(
kind: EventKind,
events: &[LitStr],
attrs: &[Attribute],
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> TokenStream {
use EventKindVariation as V;
let full = expand_event_enum(kind, events, attrs, variants, V::Redacted, ruma_events);
let sync = expand_event_enum(kind, events, attrs, variants, V::RedactedSync, ruma_events);
quote! { #full #sync }
}
/// Create a content enum from `EventEnumInput`.
fn expand_content_enum(
kind: EventKind,
events: &[LitStr],
attrs: &[Attribute],
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> TokenStream {
let serde = quote! { #ruma_events::exports::serde };
let serde_json = quote! { #ruma_events::exports::serde_json };
let ident = kind.to_content_enum();
let event_type_str = events;
let content: Vec<_> =
events.iter().map(|ev| to_event_content_path(kind, ev, None, ruma_events)).collect();
let variant_decls = variants.iter().map(|v| v.decl()).collect::<Vec<_>>();
let content_enum = quote! {
#( #attrs )*
#[derive(Clone, Debug, #serde::Serialize)]
#[serde(untagged)]
#[allow(clippy::large_enum_variant)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub enum #ident {
#(
#[doc = #event_type_str]
#variant_decls(#content),
)*
#[doc(hidden)]
_Custom {
#[serde(skip)]
event_type: ::std::string::String,
},
}
};
let variant_attrs = variants.iter().map(|v| {
let attrs = &v.attrs;
quote! { #(#attrs)* }
});
let variant_arms = variants.iter().map(|v| v.match_arm(quote! { Self })).collect::<Vec<_>>();
let variant_ctors = variants.iter().map(|v| v.ctor(quote! { Self }));
let event_content_impl = quote! {
#[automatically_derived]
impl #ruma_events::EventContent for #ident {
fn event_type(&self) -> &::std::primitive::str {
match self {
#( #variant_arms(content) => content.event_type(), )*
Self::_Custom { event_type } => &event_type,
}
}
fn from_parts(
event_type: &::std::primitive::str,
input: &#serde_json::value::RawValue,
) -> #serde_json::Result<Self> {
match event_type {
#(
#variant_attrs #event_type_str => {
let content = #content::from_parts(event_type, input)?;
::std::result::Result::Ok(#variant_ctors(content))
}
)*
ty => {
::std::result::Result::Ok(Self::_Custom { event_type: ty.to_owned() })
}
}
}
}
};
let marker_trait_impl = marker_trait(kind, ruma_events);
let from_impl = expand_from_impl(ident, &content, variants);
quote! {
#content_enum
#event_content_impl
#marker_trait_impl
#from_impl
}
}
fn expand_redact(
ident: &Ident,
kind: EventKind,
var: EventKindVariation,
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> Option<TokenStream> {
let ruma_identifiers = quote! { #ruma_events::exports::ruma_identifiers };
let redacted_enum = kind.to_event_enum_ident(var.to_redacted()?)?;
let self_variants = variants.iter().map(|v| v.match_arm(quote! { Self }));
let redacted_variants = variants.iter().map(|v| v.ctor(&redacted_enum));
Some(quote! {
#[automatically_derived]
impl #ruma_events::Redact for #ident {
type Redacted = #redacted_enum;
fn redact(
self,
redaction: #ruma_events::room::redaction::SyncRoomRedactionEvent,
version: &#ruma_identifiers::RoomVersionId,
) -> #redacted_enum {
match self {
#(
#self_variants(event) => #redacted_variants(
#ruma_events::Redact::redact(event, redaction, version),
),
)*
Self::_Custom(event) => #redacted_enum::_Custom(
#ruma_events::Redact::redact(event, redaction, version),
)
}
}
}
})
}
fn expand_possibly_redacted_enum(
kind: EventKind,
var: EventKindVariation,
ruma_events: &TokenStream,
) -> TokenStream {
let serde = quote! { #ruma_events::exports::serde };
let serde_json = quote! { #ruma_events::exports::serde_json };
let ident = format_ident!("AnyPossiblyRedacted{}", kind.to_event_ident(var).unwrap());
let regular_enum_ident = kind.to_event_enum_ident(var).unwrap();
let redacted_enum_ident = kind.to_event_enum_ident(var.to_redacted().unwrap()).unwrap();
quote! {
/// An enum that holds either regular un-redacted events or redacted events.
#[derive(Clone, Debug, #serde::Serialize)]
#[serde(untagged)]
#[allow(clippy::exhaustive_enums)]
pub enum #ident {
/// An un-redacted event.
Regular(#regular_enum_ident),
/// A redacted event.
Redacted(#redacted_enum_ident),
}
impl<'de> #serde::de::Deserialize<'de> for #ident {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: #serde::de::Deserializer<'de>,
{
let json = Box::<#serde_json::value::RawValue>::deserialize(deserializer)?;
let #ruma_events::RedactionDeHelper { unsigned } =
#ruma_events::from_raw_json_value(&json)?;
Ok(match unsigned {
Some(unsigned) if unsigned.redacted_because.is_some() => {
Self::Redacted(#ruma_events::from_raw_json_value(&json)?)
}
_ => Self::Regular(#ruma_events::from_raw_json_value(&json)?),
})
}
}
}
}
fn generate_custom_variant(
event_struct: &Ident,
var: EventKindVariation,
ruma_events: &TokenStream,
) -> (TokenStream, TokenStream) {
use EventKindVariation as V;
let serde_json = quote! { #ruma_events::exports::serde_json };
if matches!(var, V::Redacted | V::RedactedSync) {
(
quote! {
/// A redacted event not defined by the Matrix specification
#[doc(hidden)]
_Custom(
#ruma_events::#event_struct<#ruma_events::custom::RedactedCustomEventContent>,
),
},
quote! {
event => {
let event = #serde_json::from_str::<#ruma_events::#event_struct<
#ruma_events::custom::RedactedCustomEventContent,
>>(json.get())
.map_err(D::Error::custom)?;
Ok(Self::_Custom(event))
},
},
)
} else {
(
quote! {
/// An event not defined by the Matrix specification
#[doc(hidden)]
_Custom(#ruma_events::#event_struct<#ruma_events::custom::CustomEventContent>),
},
quote! {
event => {
let event =
#serde_json::from_str::<
#ruma_events::#event_struct<#ruma_events::custom::CustomEventContent>
>(json.get())
.map_err(D::Error::custom)?;
Ok(Self::_Custom(event))
},
},
)
}
}
fn marker_trait(kind: EventKind, ruma_events: &TokenStream) -> TokenStream {
let marker_trait = match kind {
EventKind::State => quote! { StateEventContent },
EventKind::Message => quote! { MessageEventContent },
EventKind::Ephemeral => quote! { EphemeralRoomEventContent },
EventKind::GlobalAccountData => quote! { GlobalAccountDataEventContent },
EventKind::RoomAccountData => quote! { RoomAccountDataEventContent },
EventKind::ToDevice => quote! { ToDeviceEventContent },
_ => return TokenStream::new(),
};
let ident = kind.to_content_enum();
quote! {
#[automatically_derived]
impl #ruma_events::#marker_trait for #ident {}
}
}
fn accessor_methods(
kind: EventKind,
var: EventKindVariation,
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> TokenStream {
let ident = kind.to_event_enum_ident(var).unwrap();
let methods = EVENT_FIELDS.iter().map(|(name, has_field)| {
generate_accessor(name, kind, var, *has_field, variants, ruma_events)
});
let self_variants: Vec<_> = variants.iter().map(|v| v.match_arm(quote! { Self })).collect();
let event_type = quote! {
/// Returns the `type` of this event.
pub fn event_type(&self) -> &::std::primitive::str {
match self {
#( #self_variants(event) =>
#ruma_events::EventContent::event_type(&event.content), )*
Self::_Custom(event) =>
#ruma_events::EventContent::event_type(&event.content),
}
}
};
let content_accessors = (!var.is_redacted()).then(|| {
let content_enum = kind.to_content_enum();
let content_variants: Vec<_> = variants.iter().map(|v| v.ctor(&content_enum)).collect();
let prev_content = has_prev_content_field(kind, var).then(|| {
quote! {
/// Returns the previous content for this event.
pub fn prev_content(&self) -> Option<#content_enum> {
match self {
#(
#self_variants(event) => {
event.prev_content.as_ref().map(|c| #content_variants(c.clone()))
},
)*
Self::_Custom(event) => {
event.prev_content.as_ref().map(|c| #content_enum::_Custom {
event_type: #ruma_events::EventContent::event_type(c).to_owned(),
})
},
}
}
}
});
quote! {
/// Returns the content for this event.
pub fn content(&self) -> #content_enum {
match self {
#( #self_variants(event) => #content_variants(event.content.clone()), )*
Self::_Custom(event) => #content_enum::_Custom {
event_type: #ruma_events::EventContent::event_type(&event.content)
.to_owned(),
},
}
}
#prev_content
}
});
quote! {
#[automatically_derived]
impl #ident {
#event_type
#content_accessors
#( #methods )*
}
}
}
fn to_event_path(
name: &LitStr,
kind: EventKind,
var: EventKindVariation,
ruma_events: &TokenStream,
) -> TokenStream {
let span = name.span();
let name = name.value();
// There is no need to give a good compiler error as `to_camel_case` is called first.
assert_eq!(&name[..2], "m.");
let path: Vec<_> = name[2..].split('.').collect();
let event: String = name[2..]
.split(&['.', '_'] as &[char])
.map(|s| s.chars().next().unwrap().to_uppercase().to_string() + &s[1..])
.collect();
let path = path.iter().map(|s| Ident::new(s, span));
let event_name = if kind == EventKind::ToDevice {
assert_eq!(var, EventKindVariation::Full);
format_ident!("ToDevice{}Event", event)
} else {
format_ident!("{}{}Event", var, event)
};
quote! { #ruma_events::#( #path )::*::#event_name }
}
fn to_event_content_path(
kind: EventKind,
name: &LitStr,
prefix: Option<&str>,
ruma_events: &TokenStream,
) -> TokenStream {
let span = name.span();
let name = name.value();
// There is no need to give a good compiler error as `to_camel_case` is called first.
assert_eq!(&name[..2], "m.");
let path: Vec<_> = name[2..].split('.').collect();
let event: String = name[2..]
.split(&['.', '_'] as &[char])
.map(|s| s.chars().next().unwrap().to_uppercase().to_string() + &s[1..])
.collect();
let content_str = match kind {
EventKind::ToDevice => {
format_ident!("ToDevice{}{}EventContent", prefix.unwrap_or(""), event)
}
_ => format_ident!("{}{}EventContent", prefix.unwrap_or(""), event),
};
let path = path.iter().map(|s| Ident::new(s, span));
quote! {
#ruma_events::#( #path )::*::#content_str
}
}
/// Splits the given `event_type` string on `.` and `_` removing the `m.room.` then
/// camel casing to give the `Event` struct name.
fn to_camel_case(name: &LitStr) -> syn::Result<Ident> {
let span = name.span();
let name = name.value();
if &name[..2] != "m." {
return Err(syn::Error::new(
span,
format!("well-known matrix events have to start with `m.` found `{}`", name),
));
}
let s: String = name[2..]
.split(&['.', '_'] as &[char])
.map(|s| s.chars().next().unwrap().to_uppercase().to_string() + &s[1..])
.collect();
Ok(Ident::new(&s, span))
}
fn generate_accessor(
name: &str,
kind: EventKind,
var: EventKindVariation,
is_event_kind: EventKindFn,
variants: &[EventEnumVariant],
ruma_events: &TokenStream,
) -> Option<TokenStream> {
is_event_kind(kind, var).then(|| {
let docs = format!("Returns this event's {} field.", name);
let ident = Ident::new(name, Span::call_site());
let field_type = field_return_type(name, var, ruma_events);
let variants = variants.iter().map(|v| v.match_arm(quote! { Self }));
quote! {
#[doc = #docs]
pub fn #ident(&self) -> &#field_type {
match self {
#( #variants(event) => &event.#ident, )*
Self::_Custom(event) => &event.#ident,
}
}
}
})
}
fn field_return_type(
name: &str,
var: EventKindVariation,
ruma_events: &TokenStream,
) -> TokenStream {
let ruma_common = quote! { #ruma_events::exports::ruma_common };
let ruma_identifiers = quote! { #ruma_events::exports::ruma_identifiers };
match name {
"origin_server_ts" => quote! { #ruma_common::MilliSecondsSinceUnixEpoch },
"room_id" => quote! { #ruma_identifiers::RoomId },
"event_id" => quote! { #ruma_identifiers::EventId },
"sender" => quote! { #ruma_identifiers::UserId },
"state_key" => quote! { ::std::primitive::str },
"unsigned" => {
if var.is_redacted() {
quote! { #ruma_events::RedactedUnsigned }
} else {
quote! { #ruma_events::Unsigned }
}
}
_ => panic!("the `ruma_events_macros::event_enum::EVENT_FIELD` const was changed"),
}
}
pub(crate) struct EventEnumVariant {
pub attrs: Vec<Attribute>,
pub ident: Ident,
}
impl EventEnumVariant {
pub(crate) fn to_tokens<T>(&self, prefix: Option<T>, with_attrs: bool) -> TokenStream
where
T: ToTokens,
{
let mut tokens = TokenStream::new();
if with_attrs {
for attr in &self.attrs {
attr.to_tokens(&mut tokens);
}
}
if let Some(p) = prefix {
tokens.extend(quote! { #p :: })
}
self.ident.to_tokens(&mut tokens);
tokens
}
pub(crate) fn decl(&self) -> TokenStream {
self.to_tokens::<TokenStream>(None, true)
}
pub(crate) fn match_arm(&self, prefix: impl ToTokens) -> TokenStream {
self.to_tokens(Some(prefix), true)
}
pub(crate) fn ctor(&self, prefix: impl ToTokens) -> TokenStream {
self.to_tokens(Some(prefix), false)
}
}
impl EventEnumEntry {
pub(crate) fn to_variant(&self) -> syn::Result<EventEnumVariant> {
let attrs = self.attrs.clone();
let ident = to_camel_case(&self.ev_type)?;
Ok(EventEnumVariant { attrs, ident })
}
}
pub(crate) fn expand_from_impls_derived(input: DeriveInput) -> TokenStream {
let variants = match &input.data {
Data::Enum(DataEnum { variants, .. }) => variants,
_ => panic!("this derive macro only works with enums"),
};
let from_impls = variants.iter().map(|variant| match &variant.fields {
syn::Fields::Unnamed(fields) if fields.unnamed.len() == 1 => {
let inner_struct = &fields.unnamed.first().unwrap().ty;
let var_ident = &variant.ident;
let id = &input.ident;
quote! {
#[automatically_derived]
impl ::std::convert::From<#inner_struct> for #id {
fn from(c: #inner_struct) -> Self {
Self::#var_ident(c)
}
}
}
}
_ => {
panic!("this derive macro only works with enum variants with a single unnamed field")
}
});
quote! {
#( #from_impls )*
}
}
|
use std::rc::Rc;
use std::ops::Deref;
use std::path::{PathBuf, Path};
use std::collections::{HashSet, HashMap};
use cargo::core::{Workspace, Package};
use cargo::sources::PathSource;
use cargo::util::Config as CargoConfig;
use syntex_syntax::attr;
use syntex_syntax::visit::{self, Visitor, FnKind};
use syntex_syntax::codemap::{CodeMap, Span, FilePathMapping};
use syntex_syntax::ast::*;
use syntex_syntax::parse::{self, ParseSess};
use syntex_syntax::parse::token::*;
use syntex_syntax::tokenstream::TokenTree;
use syntex_syntax::errors::Handler;
use syntex_syntax::errors::emitter::ColorConfig;
use config::Config;
/// Represents the results of analysis of a single file. Does not store the file
/// in question as this is expected to be maintained by the user.
#[derive(Clone)]
pub struct LineAnalysis {
/// This represents lines that should be ignored in coverage
/// but may be identifed as coverable in the DWARF tables
pub ignore: HashSet<usize>,
/// This represents lines that should be included in coverage
/// But may be ignored.
pub cover: HashSet<usize>,
}
/// When the LineAnalysis results are mapped to their files there needs to be
/// an easy way to get the information back. For the container used implement
/// this trait
pub trait SourceAnalysisQuery {
fn should_ignore(&self, path: &Path, l:&usize) -> bool;
}
impl SourceAnalysisQuery for HashMap<PathBuf, LineAnalysis> {
fn should_ignore(&self, path: &Path, l:&usize) -> bool {
if self.contains_key(path) {
self.get(path).unwrap().ignore.contains(l)
} else {
false
}
}
}
impl LineAnalysis {
fn new() -> LineAnalysis {
LineAnalysis {
ignore: HashSet::new(),
cover: HashSet::new()
}
}
pub fn should_ignore(&self, line: &usize) -> bool {
self.ignore.contains(line)
}
fn add_to_ignore(&mut self, lines: &[usize]) {
for l in lines {
self.ignore.insert(*l);
if self.cover.contains(l) {
self.cover.remove(l);
}
}
}
fn add_to_cover(&mut self, lines: &[usize]) {
for l in lines {
if !self.ignore.contains(l) {
self.cover.insert(*l);
}
}
}
}
struct CoverageVisitor<'a> {
lines: Vec<(PathBuf, usize)>,
coverable: Vec<(PathBuf, usize)>,
covered: &'a HashSet<PathBuf>,
codemap: &'a CodeMap,
config: &'a Config,
}
/// Returns a list of files and line numbers to ignore (not indexes!)
pub fn get_line_analysis(project: &Workspace, config: &Config) -> HashMap<PathBuf, LineAnalysis> {
let mut result: HashMap<PathBuf, LineAnalysis> = HashMap::new();
// Members iterates over all non-virtual packages in the workspace
for pkg in project.members() {
if config.packages.is_empty() || config.packages.contains(&pkg.name().to_string()) {
analyse_package(pkg, &config, project.config(), &mut result);
}
}
result
}
fn analyse_package(pkg: &Package,
config:&Config,
cargo_conf: &CargoConfig,
result: &mut HashMap<PathBuf, LineAnalysis>) {
let mut src = PathSource::new(pkg.root(), pkg.package_id().source_id(), cargo_conf);
if let Ok(package) = src.root_package() {
let codemap = Rc::new(CodeMap::new(FilePathMapping::empty()));
let handler = Handler::with_tty_emitter(ColorConfig::Auto, false, false, Some(codemap.clone()));
let parse_session = ParseSess::with_span_handler(handler, codemap.clone());
for target in package.targets() {
let file = target.src_path();
if !(config.ignore_tests && file.starts_with(pkg.root().join("tests"))) {
let mut parser = parse::new_parser_from_file(&parse_session, file);
parser.cfg_mods = false;
if let Ok(krate) = parser.parse_crate_mod() {
let done_files: HashSet<PathBuf> = result.keys()
.map(|x| x.clone())
.collect::<HashSet<_>>();
let lines = {
let mut visitor = CoverageVisitor::from_session(&parse_session, &done_files, config);
visitor.visit_mod(&krate.module, krate.span, &krate.attrs, NodeId::new(0));
visitor
};
for ignore in &lines.lines {
if result.contains_key(&ignore.0) {
let l = result.get_mut(&ignore.0).unwrap();
l.add_to_ignore(&[ignore.1]);
}
else {
let mut l = LineAnalysis::new();
l.add_to_ignore(&[ignore.1]);
result.insert(ignore.0.clone(), l);
}
}
for cover in &lines.coverable {
if result.contains_key(&cover.0) {
let l = result.get_mut(&cover.0).unwrap();
l.add_to_cover(&[cover.1]);
}
else {
let mut l = LineAnalysis::new();
l.add_to_cover(&[cover.1]);
result.insert(cover.0.clone(), l);
}
}
}
}
}
}
}
fn add_lines(codemap: &CodeMap, lines: &mut Vec<(PathBuf, usize)>, s: Span) {
if let Ok(ls) = codemap.span_to_lines(s) {
for line in &ls.lines {
let pb = PathBuf::from(codemap.span_to_filename(s) as String);
// Line number is index+1
lines.push((pb, line.line_index + 1));
}
}
}
impl<'a> CoverageVisitor<'a> {
/// Construct a new ignored lines object for the given project
fn from_session(session: &'a ParseSess,
covered: &'a HashSet<PathBuf>,
config: &'a Config) -> CoverageVisitor<'a> {
CoverageVisitor {
lines: vec![],
coverable: vec![],
covered: covered,
codemap: session.codemap(),
config: config,
}
}
fn get_line_indexes(&mut self, span: Span) -> Vec<usize> {
if let Ok(ts) = self.codemap.span_to_lines(span) {
ts.lines.iter()
.map(|x| x.line_index)
.collect::<Vec<_>>()
} else {
Vec::new()
}
}
/// Add lines to the line ignore list
fn ignore_lines(&mut self, span: Span) {
add_lines(self.codemap, &mut self.lines, span);
}
fn cover_lines(&mut self, span: Span) {
add_lines(self.codemap, &mut self.coverable, span);
}
/// Looks for #[cfg(test)] attribute.
fn contains_cfg_test(&mut self, attrs: &[Attribute]) -> bool {
attrs.iter()
.filter(|x| x.path == "cfg")
.filter_map(|x| x.meta_item_list())
.flat_map(|x| x)
.any(|x| {
if let Some(w) = x.word() {
w.name().as_str() == "test"
} else {
false
}
})
}
/// This function finds ignorable lines within actual coverable code.
/// As opposed to other functions which find isolated lines that aren't
/// executed or lines filtered by the user. These lines are things like
/// close braces that are within coverable code but not coverable.
fn find_ignorable_lines(&mut self, span: Span) {
if let Ok(l) = self.codemap.span_to_lines(span) {
for line in &l.lines {
let pb = PathBuf::from(self.codemap.span_to_filename(span) as String);
if let Some(s) = l.file.get_line(line.line_index) {
// Is this one of those pointless {, } or }; or )?; only lines?
if !s.chars().any(|x| !"(){}[]?;\t ,".contains(x)) {
self.lines.push((pb, line.line_index + 1));
}
}
}
}
}
fn ignore_mac_args(&mut self, mac: &Mac_, s:Span) {
let mut cover: HashSet<usize> = HashSet::new();
for token in mac.stream().into_trees() {
match token {
TokenTree::Token(ref s, ref t) => {
match t {
&Token::Literal(_,_) | &Token::Pound | &Token::Comma => {},
_ => {
for l in self.get_line_indexes(*s) {
cover.insert(l);
}
},
}
},
_ => {},
}
}
let pb = PathBuf::from(self.codemap.span_to_filename(s) as String);
if let Ok(ts) = self.codemap.span_to_lines(s) {
for l in ts.lines.iter().skip(1) {
let linestr = if let Some(linestr) = ts.file.get_line(l.line_index) {
linestr
} else {
""
};
if !cover.contains(&l.line_index) && (linestr.len() <= (l.end_col.0 - l.start_col.0)) {
self.lines.push((pb.clone(), l.line_index+1));
}
}
}
}
/// Ignores where statements given the generics struct and the span this where
/// is contained within. In every instance tested the first line of the containing
/// span is coverable therefore shouldn't be added to ignore list.
fn ignore_where_statements(&mut self, gen: &Generics, container: Span) {
let pb = PathBuf::from(self.codemap.span_to_filename(gen.span) as String);
let first_line = {
let mut line = None;
if let Ok(fl) = self.codemap.span_to_lines(container) {
if let Some(s) = fl.lines.get(0) {
line = Some(s.line_index);
}
}
line
};
if let Some(first_line) = first_line {
for w in &gen.where_clause.predicates {
let span = match w {
&WherePredicate::BoundPredicate(ref b) => b.span,
&WherePredicate::RegionPredicate(ref r) => r.span,
&WherePredicate::EqPredicate(ref e) => e.span,
};
for l in self.get_line_indexes(span) {
if l != first_line {
self.lines.push((pb.clone(), l+1));
}
}
}
}
}
}
impl<'v, 'a> Visitor<'v> for CoverageVisitor<'a> {
fn visit_item(&mut self, i: &'v Item) {
match i.node {
ItemKind::ExternCrate(..) => self.ignore_lines(i.span),
ItemKind::Fn(_, _, _, _, ref gen, ref block) => {
if attr::contains_name(&i.attrs, "test") && self.config.ignore_tests {
self.ignore_lines(i.span);
self.ignore_lines(block.deref().span);
} else if attr::contains_name(&i.attrs, "inline") {
self.cover_lines(block.deref().span);
}
self.ignore_where_statements(gen, i.span);
},
ItemKind::Impl(_, _, _, _, _, _, ref items) => {
for i in items {
match i.node {
ImplItemKind::Method(ref sig,_) => {
self.cover_lines(i.span);
self.ignore_where_statements(&sig.generics, i.span);
}
_ => {},
}
}
},
_ => {},
}
visit::walk_item(self, i);
}
fn visit_mod(&mut self, m: &'v Mod, s: Span, _attrs: &[Attribute], _n: NodeId) {
// If mod is cfg(test) and --ignore-tests ignore contents!
if let Ok(fl) = self.codemap.span_to_lines(s) {
if self.config.ignore_tests && self.contains_cfg_test(_attrs) {
self.ignore_lines(s);
if fl.lines.len() == 1 {
// Ignore the file
self.ignore_lines(m.inner);
}
}
else {
if fl.lines.len() == 1 {
// mod imports show up as coverable. Ignore
self.ignore_lines(s);
}
let mod_path = PathBuf::from(self.codemap.span_to_filename(m.inner));
if !self.covered.contains(&mod_path) {
visit::walk_mod(self, m);
}
}
}
}
fn visit_trait_item(&mut self, ti: &TraitItem) {
match ti.node {
TraitItemKind::Method(_, Some(ref b)) => {
self.cover_lines(b.span);
},
_ => {},
}
visit::walk_trait_item(self, ti);
}
fn visit_fn(&mut self, fk: FnKind, fd: &FnDecl, s: Span, _: NodeId) {
match fk {
FnKind::ItemFn(_, g, _,_,_,_,_) => {
if !g.ty_params.is_empty() {
self.cover_lines(s);
}
},
FnKind::Method(_, sig, _, _) => {
if !sig.generics.ty_params.is_empty() {
self.cover_lines(s);
}
},
_ => {},
}
visit::walk_fn(self, fk, fd, s);
}
fn visit_expr(&mut self, ex: &Expr) {
if let Ok(s) = self.codemap.span_to_lines(ex.span) {
// If expression is multiple lines we might have to remove some of
// said lines.
if s.lines.len() > 1 {
let mut cover: HashSet<usize> = HashSet::new();
match ex.node {
ExprKind::Call(_, ref args) => {
cover.insert(s.lines[0].line_index);
for a in args {
match a.node {
ExprKind::Lit(..) => {},
_ => {
for l in self.get_line_indexes(a.span) {
cover.insert(l);
}
},
}
}
},
ExprKind::MethodCall(_, _, ref args) => {
let mut it = args.iter();
it.next(); // First is function call
for i in it {
match i.node {
ExprKind::Lit(..) => {},
_ => {
for l in self.get_line_indexes(i.span) {
cover.insert(l);
}
},
}
}
},
ExprKind::Mac(ref mac) => {
self.ignore_mac_args(&mac.node, ex.span);
},
_ => {},
}
if !cover.is_empty() {
let pb = PathBuf::from(self.codemap.span_to_filename(ex.span) as String);
for l in &s.lines {
if !cover.contains(&l.line_index) {
self.lines.push((pb.clone(), l.line_index + 1));
}
}
}
}
}
visit::walk_expr(self, ex);
}
fn visit_mac(&mut self, mac: &Mac) {
// Use this to ignore unreachable lines
let mac_text = &format!("{}", mac.node.path)[..];
// TODO unimplemented should have extra logic to exclude the
// function from coverage
match mac_text {
"unimplemented" => self.ignore_lines(mac.span),
"unreachable" => self.ignore_lines(mac.span),
_ => self.ignore_mac_args(&mac.node, mac.span),
}
visit::walk_mac(self, mac);
}
/// Ignores attributes which may get identified as coverable lines.
fn visit_attribute(&mut self, attr: &Attribute) {
if attr.check_name("derive") {
self.ignore_lines(attr.span);
}
}
/// Struct fields are mistakenly identified as instructions and uncoverable.
fn visit_struct_field(&mut self, s: &'v StructField) {
self.ignore_lines(s.span);
visit::walk_struct_field(self, s);
}
fn visit_block(&mut self, b: &'v Block) {
self.find_ignorable_lines(b.span);
visit::walk_block(self, b);
}
fn visit_stmt(&mut self, s: &Stmt) {
match s.node {
StmtKind::Mac(ref p) => {
let ref mac = p.0.node;
self.ignore_mac_args(mac, s.span);
},
_ => {}
}
visit::walk_stmt(self, s);
}
}
#[cfg(test)]
mod tests {
use super::*;
use syntex_syntax::parse::filemap_to_parser;
use syntex_syntax::parse::parser::Parser;
struct TestContext {
conf: Config,
codemap: Rc<CodeMap>,
parse_session: ParseSess
}
impl TestContext {
fn generate_parser(&self, filename: &str, src_string: &str) -> Parser {
let filemap = self.codemap.new_filemap(filename.to_string(),
src_string.to_string());
filemap_to_parser(&self.parse_session, filemap)
}
}
impl Default for TestContext {
fn default() -> TestContext {
let codemap = Rc::new(CodeMap::new(FilePathMapping::empty()));
let handler = Handler::with_tty_emitter(ColorConfig::Auto, false, false, Some(codemap.clone()));
let parse_session = ParseSess::with_span_handler(handler, codemap.clone());
TestContext {
conf: Config::default(),
codemap: codemap,
parse_session: parse_session
}
}
}
fn parse_crate(ctx: &TestContext, parser: &mut Parser) -> Vec<usize> {
let krate = parser.parse_crate_mod();
assert!(krate.is_ok());
let krate = krate.unwrap();
let unused: HashSet<PathBuf> = HashSet::new();
let mut visitor = CoverageVisitor::from_session(&ctx.parse_session, &unused, &ctx.conf);
visitor.visit_mod(&krate.module, krate.span, &krate.attrs, NodeId::new(0));
visitor.lines.iter().map(|x| x.1).collect::<Vec<_>>()
}
#[test]
fn filter_str_literals() {
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("literals.rs", "fn test() {\nwriteln!(#\"test\n\ttest\n\ttest\"#);\n}\n");
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.len() > 1);
assert!(lines.contains(&3));
assert!(lines.contains(&4));
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("literals.rs", "fn test() {\nwrite(\"test\ntest\ntest\");\n}\nfn write(s:&str){}");
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.len() > 1);
assert!(lines.contains(&3));
assert!(lines.contains(&4));
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("literals.rs", "\n\nfn test() {\nwriteln!(\n#\"test\"#\n);\n}\n");
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.contains(&5));
}
#[test]
fn filter_struct_members() {
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("struct_test.rs", "#[derive(Debug)]\npub struct Struct {\npub i: i32,\nj:String,\n}");
let lines = parse_crate(&ctx, &mut parser);
assert_eq!(lines.len(), 3);
assert!(lines.contains(&1));
assert!(lines.contains(&3));
assert!(lines.contains(&4));
}
#[test]
fn filter_mods() {
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("test.rs", "mod foo {\nfn double(x:i32)->i32 {\n x*2\n}\n}");
let lines = parse_crate(&ctx, &mut parser);
assert!(!lines.contains(&3));
let mut parser = ctx.generate_parser("test.rs", "mod foo{}");
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.contains(&1));
}
#[test]
fn filter_macros() {
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("test.rs", "\n\nfn unused() {\nunimplemented!();\n}");
let lines = parse_crate(&ctx, &mut parser);
// Braces should be ignored so number could be higher
assert!(lines.len() >= 1);
assert!(lines.contains(&4));
let mut parser = ctx.generate_parser("test.rs", "fn unused() {\nunreachable!();\n}");
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.len() >= 1);
assert!(lines.contains(&2));
let mut parser = ctx.generate_parser("test.rs", "fn unused() {\nprintln!(\"text\");\n}");
let lines = parse_crate(&ctx, &mut parser);
assert!(!lines.contains(&2));
}
#[test]
fn filter_tests() {
let ctx = TestContext::default();
let src_string = "#[cfg(test)]\nmod tests {\n fn boo(){\nassert!(true);\n}\n}";
let mut parser = ctx.generate_parser("test.rs", src_string);
let lines = parse_crate(&ctx, &mut parser);
assert!(!lines.contains(&4));
let mut ctx = TestContext::default();
ctx.conf.ignore_tests = true;
let mut parser = ctx.generate_parser("test.rs", src_string);
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.contains(&4));
let ctx = TestContext::default();
let src_string = "#[test]\nfn mytest() { \n assert!(true);\n}";
let mut parser = ctx.generate_parser("test.rs", src_string);
let lines = parse_crate(&ctx, &mut parser);
assert!(!lines.contains(&2));
assert!(!lines.contains(&3));
let mut ctx = TestContext::default();
ctx.conf.ignore_tests = true;
let mut parser = ctx.generate_parser("test.rs", src_string);
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.contains(&2));
assert!(lines.contains(&3));
}
}
Fixed issue with missed where
Where predicate span checking had flawed logic, now go to 1 after the start of function span until end of where span to ensure the whole predicate is covered.
use std::rc::Rc;
use std::ops::Deref;
use std::path::{PathBuf, Path};
use std::collections::{HashSet, HashMap};
use cargo::core::{Workspace, Package};
use cargo::sources::PathSource;
use cargo::util::Config as CargoConfig;
use syntex_syntax::attr;
use syntex_syntax::visit::{self, Visitor, FnKind};
use syntex_syntax::codemap::{CodeMap, Span, FilePathMapping};
use syntex_syntax::ast::*;
use syntex_syntax::parse::{self, ParseSess};
use syntex_syntax::parse::token::*;
use syntex_syntax::tokenstream::TokenTree;
use syntex_syntax::errors::Handler;
use syntex_syntax::errors::emitter::ColorConfig;
use config::Config;
/// Represents the results of analysis of a single file. Does not store the file
/// in question as this is expected to be maintained by the user.
#[derive(Clone)]
pub struct LineAnalysis {
/// This represents lines that should be ignored in coverage
/// but may be identifed as coverable in the DWARF tables
pub ignore: HashSet<usize>,
/// This represents lines that should be included in coverage
/// But may be ignored.
pub cover: HashSet<usize>,
}
/// When the LineAnalysis results are mapped to their files there needs to be
/// an easy way to get the information back. For the container used implement
/// this trait
pub trait SourceAnalysisQuery {
fn should_ignore(&self, path: &Path, l:&usize) -> bool;
}
impl SourceAnalysisQuery for HashMap<PathBuf, LineAnalysis> {
fn should_ignore(&self, path: &Path, l:&usize) -> bool {
if self.contains_key(path) {
self.get(path).unwrap().ignore.contains(l)
} else {
false
}
}
}
impl LineAnalysis {
fn new() -> LineAnalysis {
LineAnalysis {
ignore: HashSet::new(),
cover: HashSet::new()
}
}
pub fn should_ignore(&self, line: &usize) -> bool {
self.ignore.contains(line)
}
fn add_to_ignore(&mut self, lines: &[usize]) {
for l in lines {
self.ignore.insert(*l);
if self.cover.contains(l) {
self.cover.remove(l);
}
}
}
fn add_to_cover(&mut self, lines: &[usize]) {
for l in lines {
if !self.ignore.contains(l) {
self.cover.insert(*l);
}
}
}
}
struct CoverageVisitor<'a> {
lines: Vec<(PathBuf, usize)>,
coverable: Vec<(PathBuf, usize)>,
covered: &'a HashSet<PathBuf>,
codemap: &'a CodeMap,
config: &'a Config,
}
/// Returns a list of files and line numbers to ignore (not indexes!)
pub fn get_line_analysis(project: &Workspace, config: &Config) -> HashMap<PathBuf, LineAnalysis> {
let mut result: HashMap<PathBuf, LineAnalysis> = HashMap::new();
// Members iterates over all non-virtual packages in the workspace
for pkg in project.members() {
if config.packages.is_empty() || config.packages.contains(&pkg.name().to_string()) {
analyse_package(pkg, &config, project.config(), &mut result);
}
}
result
}
fn analyse_package(pkg: &Package,
config:&Config,
cargo_conf: &CargoConfig,
result: &mut HashMap<PathBuf, LineAnalysis>) {
let mut src = PathSource::new(pkg.root(), pkg.package_id().source_id(), cargo_conf);
if let Ok(package) = src.root_package() {
let codemap = Rc::new(CodeMap::new(FilePathMapping::empty()));
let handler = Handler::with_tty_emitter(ColorConfig::Auto, false, false, Some(codemap.clone()));
let parse_session = ParseSess::with_span_handler(handler, codemap.clone());
for target in package.targets() {
let file = target.src_path();
if !(config.ignore_tests && file.starts_with(pkg.root().join("tests"))) {
let mut parser = parse::new_parser_from_file(&parse_session, file);
parser.cfg_mods = false;
if let Ok(krate) = parser.parse_crate_mod() {
let done_files: HashSet<PathBuf> = result.keys()
.map(|x| x.clone())
.collect::<HashSet<_>>();
let lines = {
let mut visitor = CoverageVisitor::from_session(&parse_session, &done_files, config);
visitor.visit_mod(&krate.module, krate.span, &krate.attrs, NodeId::new(0));
visitor
};
for ignore in &lines.lines {
if result.contains_key(&ignore.0) {
let l = result.get_mut(&ignore.0).unwrap();
l.add_to_ignore(&[ignore.1]);
}
else {
let mut l = LineAnalysis::new();
l.add_to_ignore(&[ignore.1]);
result.insert(ignore.0.clone(), l);
}
}
for cover in &lines.coverable {
if result.contains_key(&cover.0) {
let l = result.get_mut(&cover.0).unwrap();
l.add_to_cover(&[cover.1]);
}
else {
let mut l = LineAnalysis::new();
l.add_to_cover(&[cover.1]);
result.insert(cover.0.clone(), l);
}
}
}
}
}
}
}
fn add_lines(codemap: &CodeMap, lines: &mut Vec<(PathBuf, usize)>, s: Span) {
if let Ok(ls) = codemap.span_to_lines(s) {
for line in &ls.lines {
let pb = PathBuf::from(codemap.span_to_filename(s) as String);
// Line number is index+1
lines.push((pb, line.line_index + 1));
}
}
}
impl<'a> CoverageVisitor<'a> {
/// Construct a new ignored lines object for the given project
fn from_session(session: &'a ParseSess,
covered: &'a HashSet<PathBuf>,
config: &'a Config) -> CoverageVisitor<'a> {
CoverageVisitor {
lines: vec![],
coverable: vec![],
covered: covered,
codemap: session.codemap(),
config: config,
}
}
fn get_line_indexes(&mut self, span: Span) -> Vec<usize> {
if let Ok(ts) = self.codemap.span_to_lines(span) {
ts.lines.iter()
.map(|x| x.line_index)
.collect::<Vec<_>>()
} else {
Vec::new()
}
}
/// Add lines to the line ignore list
fn ignore_lines(&mut self, span: Span) {
add_lines(self.codemap, &mut self.lines, span);
}
fn cover_lines(&mut self, span: Span) {
add_lines(self.codemap, &mut self.coverable, span);
}
/// Looks for #[cfg(test)] attribute.
fn contains_cfg_test(&mut self, attrs: &[Attribute]) -> bool {
attrs.iter()
.filter(|x| x.path == "cfg")
.filter_map(|x| x.meta_item_list())
.flat_map(|x| x)
.any(|x| {
if let Some(w) = x.word() {
w.name().as_str() == "test"
} else {
false
}
})
}
/// This function finds ignorable lines within actual coverable code.
/// As opposed to other functions which find isolated lines that aren't
/// executed or lines filtered by the user. These lines are things like
/// close braces that are within coverable code but not coverable.
fn find_ignorable_lines(&mut self, span: Span) {
if let Ok(l) = self.codemap.span_to_lines(span) {
for line in &l.lines {
let pb = PathBuf::from(self.codemap.span_to_filename(span) as String);
if let Some(s) = l.file.get_line(line.line_index) {
// Is this one of those pointless {, } or }; or )?; only lines?
if !s.chars().any(|x| !"(){}[]?;\t ,".contains(x)) {
self.lines.push((pb, line.line_index + 1));
}
}
}
}
}
fn ignore_mac_args(&mut self, mac: &Mac_, s:Span) {
let mut cover: HashSet<usize> = HashSet::new();
for token in mac.stream().into_trees() {
match token {
TokenTree::Token(ref s, ref t) => {
match t {
&Token::Literal(_,_) | &Token::Pound | &Token::Comma => {},
_ => {
for l in self.get_line_indexes(*s) {
cover.insert(l);
}
},
}
},
_ => {},
}
}
let pb = PathBuf::from(self.codemap.span_to_filename(s) as String);
if let Ok(ts) = self.codemap.span_to_lines(s) {
for l in ts.lines.iter().skip(1) {
let linestr = if let Some(linestr) = ts.file.get_line(l.line_index) {
linestr
} else {
""
};
if !cover.contains(&l.line_index) && (linestr.len() <= (l.end_col.0 - l.start_col.0)) {
self.lines.push((pb.clone(), l.line_index+1));
}
}
}
}
/// Ignores where statements given the generics struct and the span this where
/// is contained within. In every instance tested the first line of the containing
/// span is coverable (as it is function definition) therefore shouldn't be
/// added to ignore list.
fn ignore_where_statements(&mut self, gen: &Generics, container: Span) {
let pb = PathBuf::from(self.codemap.span_to_filename(gen.span) as String);
let first_line = {
let mut line = None;
if let Ok(fl) = self.codemap.span_to_lines(container) {
if let Some(s) = fl.lines.get(0) {
line = Some(s.line_index);
}
}
line
};
if let Some(first_line) = first_line {
for w in &gen.where_clause.predicates {
let span = match w {
&WherePredicate::BoundPredicate(ref b) => b.span,
&WherePredicate::RegionPredicate(ref r) => r.span,
&WherePredicate::EqPredicate(ref e) => e.span,
};
let end = self.get_line_indexes(span.end_point());
if let Some(&end) = end.last() {
for l in (first_line+1)..(end+1) {
self.lines.push((pb.clone(), l+1));
}
}
}
}
}
}
impl<'v, 'a> Visitor<'v> for CoverageVisitor<'a> {
fn visit_item(&mut self, i: &'v Item) {
match i.node {
ItemKind::ExternCrate(..) => self.ignore_lines(i.span),
ItemKind::Fn(_, _, _, _, ref gen, ref block) => {
if attr::contains_name(&i.attrs, "test") && self.config.ignore_tests {
self.ignore_lines(i.span);
self.ignore_lines(block.deref().span);
} else if attr::contains_name(&i.attrs, "inline") {
self.cover_lines(block.deref().span);
}
self.ignore_where_statements(gen, i.span);
},
ItemKind::Impl(_, _, _, _, _, _, ref items) => {
for i in items {
match i.node {
ImplItemKind::Method(ref sig,_) => {
self.cover_lines(i.span);
self.ignore_where_statements(&sig.generics, i.span);
}
_ => {},
}
}
},
_ => {},
}
visit::walk_item(self, i);
}
fn visit_mod(&mut self, m: &'v Mod, s: Span, _attrs: &[Attribute], _n: NodeId) {
// If mod is cfg(test) and --ignore-tests ignore contents!
if let Ok(fl) = self.codemap.span_to_lines(s) {
if self.config.ignore_tests && self.contains_cfg_test(_attrs) {
self.ignore_lines(s);
if fl.lines.len() == 1 {
// Ignore the file
self.ignore_lines(m.inner);
}
}
else {
if fl.lines.len() == 1 {
// mod imports show up as coverable. Ignore
self.ignore_lines(s);
}
let mod_path = PathBuf::from(self.codemap.span_to_filename(m.inner));
if !self.covered.contains(&mod_path) {
visit::walk_mod(self, m);
}
}
}
}
fn visit_trait_item(&mut self, ti: &TraitItem) {
match ti.node {
TraitItemKind::Method(_, Some(ref b)) => {
self.cover_lines(b.span);
},
_ => {},
}
visit::walk_trait_item(self, ti);
}
fn visit_fn(&mut self, fk: FnKind, fd: &FnDecl, s: Span, _: NodeId) {
match fk {
FnKind::ItemFn(_, g, _,_,_,_,_) => {
if !g.ty_params.is_empty() {
self.cover_lines(s);
}
},
FnKind::Method(_, sig, _, _) => {
if !sig.generics.ty_params.is_empty() {
self.cover_lines(s);
}
},
_ => {},
}
visit::walk_fn(self, fk, fd, s);
}
fn visit_expr(&mut self, ex: &Expr) {
if let Ok(s) = self.codemap.span_to_lines(ex.span) {
// If expression is multiple lines we might have to remove some of
// said lines.
if s.lines.len() > 1 {
let mut cover: HashSet<usize> = HashSet::new();
match ex.node {
ExprKind::Call(_, ref args) => {
cover.insert(s.lines[0].line_index);
for a in args {
match a.node {
ExprKind::Lit(..) => {},
_ => {
for l in self.get_line_indexes(a.span) {
cover.insert(l);
}
},
}
}
},
ExprKind::MethodCall(_, _, ref args) => {
let mut it = args.iter();
it.next(); // First is function call
for i in it {
match i.node {
ExprKind::Lit(..) => {},
_ => {
for l in self.get_line_indexes(i.span) {
cover.insert(l);
}
},
}
}
},
ExprKind::Mac(ref mac) => {
self.ignore_mac_args(&mac.node, ex.span);
},
_ => {},
}
if !cover.is_empty() {
let pb = PathBuf::from(self.codemap.span_to_filename(ex.span) as String);
for l in &s.lines {
if !cover.contains(&l.line_index) {
self.lines.push((pb.clone(), l.line_index + 1));
}
}
}
}
}
visit::walk_expr(self, ex);
}
fn visit_mac(&mut self, mac: &Mac) {
// Use this to ignore unreachable lines
let mac_text = &format!("{}", mac.node.path)[..];
// TODO unimplemented should have extra logic to exclude the
// function from coverage
match mac_text {
"unimplemented" => self.ignore_lines(mac.span),
"unreachable" => self.ignore_lines(mac.span),
_ => self.ignore_mac_args(&mac.node, mac.span),
}
visit::walk_mac(self, mac);
}
/// Ignores attributes which may get identified as coverable lines.
fn visit_attribute(&mut self, attr: &Attribute) {
if attr.check_name("derive") {
self.ignore_lines(attr.span);
}
}
/// Struct fields are mistakenly identified as instructions and uncoverable.
fn visit_struct_field(&mut self, s: &'v StructField) {
self.ignore_lines(s.span);
visit::walk_struct_field(self, s);
}
fn visit_block(&mut self, b: &'v Block) {
self.find_ignorable_lines(b.span);
visit::walk_block(self, b);
}
fn visit_stmt(&mut self, s: &Stmt) {
match s.node {
StmtKind::Mac(ref p) => {
let ref mac = p.0.node;
self.ignore_mac_args(mac, s.span);
},
_ => {}
}
visit::walk_stmt(self, s);
}
}
#[cfg(test)]
mod tests {
use super::*;
use syntex_syntax::parse::filemap_to_parser;
use syntex_syntax::parse::parser::Parser;
struct TestContext {
conf: Config,
codemap: Rc<CodeMap>,
parse_session: ParseSess
}
impl TestContext {
fn generate_parser(&self, filename: &str, src_string: &str) -> Parser {
let filemap = self.codemap.new_filemap(filename.to_string(),
src_string.to_string());
filemap_to_parser(&self.parse_session, filemap)
}
}
impl Default for TestContext {
fn default() -> TestContext {
let codemap = Rc::new(CodeMap::new(FilePathMapping::empty()));
let handler = Handler::with_tty_emitter(ColorConfig::Auto, false, false, Some(codemap.clone()));
let parse_session = ParseSess::with_span_handler(handler, codemap.clone());
TestContext {
conf: Config::default(),
codemap: codemap,
parse_session: parse_session
}
}
}
fn parse_crate(ctx: &TestContext, parser: &mut Parser) -> Vec<usize> {
let krate = parser.parse_crate_mod();
assert!(krate.is_ok());
let krate = krate.unwrap();
let unused: HashSet<PathBuf> = HashSet::new();
let mut visitor = CoverageVisitor::from_session(&ctx.parse_session, &unused, &ctx.conf);
visitor.visit_mod(&krate.module, krate.span, &krate.attrs, NodeId::new(0));
visitor.lines.iter().map(|x| x.1).collect::<Vec<_>>()
}
#[test]
fn filter_str_literals() {
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("literals.rs", "fn test() {\nwriteln!(#\"test\n\ttest\n\ttest\"#);\n}\n");
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.len() > 1);
assert!(lines.contains(&3));
assert!(lines.contains(&4));
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("literals.rs", "fn test() {\nwrite(\"test\ntest\ntest\");\n}\nfn write(s:&str){}");
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.len() > 1);
assert!(lines.contains(&3));
assert!(lines.contains(&4));
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("literals.rs", "\n\nfn test() {\nwriteln!(\n#\"test\"#\n);\n}\n");
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.contains(&5));
}
#[test]
fn filter_struct_members() {
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("struct_test.rs", "#[derive(Debug)]\npub struct Struct {\npub i: i32,\nj:String,\n}");
let lines = parse_crate(&ctx, &mut parser);
assert_eq!(lines.len(), 3);
assert!(lines.contains(&1));
assert!(lines.contains(&3));
assert!(lines.contains(&4));
}
#[test]
fn filter_mods() {
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("test.rs", "mod foo {\nfn double(x:i32)->i32 {\n x*2\n}\n}");
let lines = parse_crate(&ctx, &mut parser);
assert!(!lines.contains(&3));
let mut parser = ctx.generate_parser("test.rs", "mod foo{}");
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.contains(&1));
}
#[test]
fn filter_macros() {
let ctx = TestContext::default();
let mut parser = ctx.generate_parser("test.rs", "\n\nfn unused() {\nunimplemented!();\n}");
let lines = parse_crate(&ctx, &mut parser);
// Braces should be ignored so number could be higher
assert!(lines.len() >= 1);
assert!(lines.contains(&4));
let mut parser = ctx.generate_parser("test.rs", "fn unused() {\nunreachable!();\n}");
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.len() >= 1);
assert!(lines.contains(&2));
let mut parser = ctx.generate_parser("test.rs", "fn unused() {\nprintln!(\"text\");\n}");
let lines = parse_crate(&ctx, &mut parser);
assert!(!lines.contains(&2));
}
#[test]
fn filter_tests() {
let ctx = TestContext::default();
let src_string = "#[cfg(test)]\nmod tests {\n fn boo(){\nassert!(true);\n}\n}";
let mut parser = ctx.generate_parser("test.rs", src_string);
let lines = parse_crate(&ctx, &mut parser);
assert!(!lines.contains(&4));
let mut ctx = TestContext::default();
ctx.conf.ignore_tests = true;
let mut parser = ctx.generate_parser("test.rs", src_string);
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.contains(&4));
let ctx = TestContext::default();
let src_string = "#[test]\nfn mytest() { \n assert!(true);\n}";
let mut parser = ctx.generate_parser("test.rs", src_string);
let lines = parse_crate(&ctx, &mut parser);
assert!(!lines.contains(&2));
assert!(!lines.contains(&3));
let mut ctx = TestContext::default();
ctx.conf.ignore_tests = true;
let mut parser = ctx.generate_parser("test.rs", src_string);
let lines = parse_crate(&ctx, &mut parser);
assert!(lines.contains(&2));
assert!(lines.contains(&3));
}
}
|
use std::cell::RefCell;
use rustc::ty::{Ty, layout::Size};
use rustc::mir;
use super::{
MemoryAccess, RangeMap, EvalResult,
Pointer,
};
pub type Timestamp = u64;
/// Information about a potentially mutable borrow
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Mut {
/// A unique, mutable reference
Uniq(Timestamp),
/// Any raw pointer, or a shared borrow with interior mutability
Raw,
}
impl Mut {
#[inline(always)]
fn is_raw(self) -> bool {
match self {
Mut::Raw => true,
_ => false,
}
}
}
/// Information about any kind of borrow
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Borrow {
/// A mutable borrow, a raw pointer, or a shared borrow with interior mutability
Mut(Mut),
/// A shared borrow without interior mutability
Frz(Timestamp)
}
impl Borrow {
#[inline(always)]
fn is_uniq(self) -> bool {
match self {
Borrow::Mut(Mut::Uniq(_)) => true,
_ => false,
}
}
}
/// An item in the borrow stack
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum BorStackItem {
/// Defines which references are permitted to mutate *if* the location is not frozen
Mut(Mut),
/// A barrier, tracking the function it belongs to by its index on the call stack
#[allow(dead_code)] // for future use
FnBarrier(usize)
}
impl Default for Borrow {
fn default() -> Self {
Borrow::Mut(Mut::Raw)
}
}
/// Extra global machine state
#[derive(Clone, Debug)]
pub struct State {
clock: Timestamp
}
impl State {
pub fn new() -> State {
State { clock: 0 }
}
}
/// Extra per-location state
#[derive(Clone, Debug)]
struct Stack {
borrows: Vec<BorStackItem>, // used as a stack
frozen_since: Option<Timestamp>,
}
impl Default for Stack {
fn default() -> Self {
Stack {
borrows: Vec::new(),
frozen_since: None,
}
}
}
/// Extra per-allocation state
#[derive(Clone, Debug, Default)]
pub struct Stacks {
stacks: RefCell<RangeMap<Stack>>,
}
/// Core operations
impl<'tcx> Stack {
fn check(&self, bor: Borrow) -> bool {
match bor {
Borrow::Frz(acc_t) =>
// Must be frozen at least as long as the `acc_t` says.
self.frozen_since.map_or(false, |loc_t| loc_t <= acc_t),
Borrow::Mut(acc_m) =>
// Raw pointers are fine with frozen locations. This is important because &Cell is raw!
if self.frozen_since.is_some() {
acc_m.is_raw()
} else {
self.borrows.last().map_or(false, |&loc_itm| loc_itm == BorStackItem::Mut(acc_m))
}
}
}
/// Reactive `bor` for this stack. If `force_mut` is set, we want to aggressively
/// unfreeze this location (because we are about to push a `Uniq`).
fn reactivate(&mut self, bor: Borrow, force_mut: bool) -> EvalResult<'tcx> {
// Unless mutation is bound to happen, do NOT change anything if `bor` is already active.
// In particular, if it is a `Mut(Raw)` and we are frozen, this should be a NOP.
if !force_mut && self.check(bor) {
return Ok(());
}
let acc_m = match bor {
Borrow::Frz(_) =>
if force_mut {
return err!(MachineError(format!("Using a shared borrow for mutation")))
} else {
return err!(MachineError(format!("Location should be frozen but it is not")))
}
Borrow::Mut(acc_m) => acc_m,
};
// We definitely have to unfreeze this, even if we use the topmost item.
self.frozen_since = None;
// Pop until we see the one we are looking for.
while let Some(&itm) = self.borrows.last() {
match itm {
BorStackItem::FnBarrier(_) => {
return err!(MachineError(format!("Trying to reactivate a borrow that lives behind a barrier")));
}
BorStackItem::Mut(loc_m) => {
if loc_m == acc_m { return Ok(()); }
trace!("reactivate: Popping {:?}", itm);
self.borrows.pop();
}
}
}
// Nothing to be found. Simulate a "virtual raw" element at the bottom of the stack.
if acc_m.is_raw() {
Ok(())
} else {
err!(MachineError(format!("Borrow-to-reactivate does not exist on the stack")))
}
}
fn initiate(&mut self, bor: Borrow) -> EvalResult<'tcx> {
match bor {
Borrow::Frz(t) => {
trace!("initiate: Freezing");
match self.frozen_since {
None => self.frozen_since = Some(t),
Some(since) => assert!(since <= t),
}
}
Borrow::Mut(m) => {
trace!("initiate: Pushing {:?}", bor);
match self.frozen_since {
None => self.borrows.push(BorStackItem::Mut(m)),
Some(_) =>
// FIXME: Do we want an exception for raw borrows?
return err!(MachineError(format!("Trying to mutate frozen location")))
}
}
}
Ok(())
}
}
impl State {
fn increment_clock(&mut self) -> Timestamp {
self.clock += 1;
self.clock
}
}
/// Higher-level operations
impl<'tcx> Stacks {
pub fn memory_accessed(
&self,
ptr: Pointer<Borrow>,
size: Size,
access: MemoryAccess,
) -> EvalResult<'tcx> {
trace!("memory_accessed({:?}) with tag {:?}: {:?}, size {}", access, ptr.tag, ptr, size.bytes());
let mut stacks = self.stacks.borrow_mut();
for stack in stacks.iter_mut(ptr.offset, size) {
// FIXME: Compare this with what the blog post says.
stack.reactivate(ptr.tag, /*force_mut*/access == MemoryAccess::Write)?;
}
Ok(())
}
pub fn memory_deallocated(
&mut self,
ptr: Pointer<Borrow>,
) -> EvalResult<'tcx> {
trace!("memory_deallocated with tag {:?}: {:?}", ptr.tag, ptr);
let stacks = self.stacks.get_mut();
for stack in stacks.iter_mut_all() {
// This is like mutating.
stack.reactivate(ptr.tag, /*force_mut*/true)?;
}
Ok(())
}
fn reborrow(
&self,
ptr: Pointer<Borrow>,
size: Size,
new_bor: Borrow,
) -> EvalResult<'tcx> {
let mut stacks = self.stacks.borrow_mut();
for stack in stacks.iter_mut(ptr.offset, size) {
if stack.check(new_bor) {
// The new borrow is already active! This can happen when creating multiple
// shared references from the same mutable reference. Do nothing.
} else {
// FIXME: The blog post says we should `reset` if this is a local.
stack.reactivate(ptr.tag, /*force_mut*/new_bor.is_uniq())?;
stack.initiate(new_bor)?;
}
}
Ok(())
}
}
/// Machine hooks
pub trait EvalContextExt<'tcx> {
fn tag_reference(
&mut self,
ptr: Pointer<Borrow>,
pointee_ty: Ty<'tcx>,
size: Size,
borrow_kind: Option<mir::BorrowKind>,
) -> EvalResult<'tcx, Borrow>;
fn tag_dereference(
&self,
ptr: Pointer<Borrow>,
ptr_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Borrow>;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
fn tag_reference(
&mut self,
ptr: Pointer<Borrow>,
pointee_ty: Ty<'tcx>,
size: Size,
borrow_kind: Option<mir::BorrowKind>,
) -> EvalResult<'tcx, Borrow> {
let time = self.machine.stacked_borrows.increment_clock();
// FIXME This does not do enough checking when only part of the data has
// interior mutability.
let new_bor = match borrow_kind {
Some(mir::BorrowKind::Mut { .. }) => Borrow::Mut(Mut::Uniq(time)),
Some(_) =>
if self.type_is_freeze(pointee_ty) {
Borrow::Frz(time)
} else {
Borrow::Mut(Mut::Raw)
},
None => Borrow::Mut(Mut::Raw),
};
trace!("tag_reference: Creating new reference ({:?}) for {:?} (pointee {}, size {}): {:?}",
borrow_kind, ptr, pointee_ty, size.bytes(), new_bor);
// Make sure this reference is not dangling or so
self.memory.check_bounds(ptr, size, false)?;
// Update the stacks. We cannot use `get_mut` becuse this might be immutable
// memory.
let alloc = self.memory.get(ptr.alloc_id).expect("We checked that the ptr is fine!");
alloc.extra.reborrow(ptr, size, new_bor)?;
Ok(new_bor)
}
fn tag_dereference(
&self,
ptr: Pointer<Borrow>,
ptr_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Borrow> {
// If this is a raw ptr, forget about the tag.
Ok(if ptr_ty.is_unsafe_ptr() {
trace!("tag_dereference: Erasing tag for {:?} ({})", ptr, ptr_ty);
Borrow::Mut(Mut::Raw)
} else {
// FIXME: Do we want to adjust the tag if it does not match the type?
ptr.tag
})
}
}
expand comment about incomplete support for interior mutability
use std::cell::RefCell;
use rustc::ty::{Ty, layout::Size};
use rustc::mir;
use super::{
MemoryAccess, RangeMap, EvalResult,
Pointer,
};
pub type Timestamp = u64;
/// Information about a potentially mutable borrow
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Mut {
/// A unique, mutable reference
Uniq(Timestamp),
/// Any raw pointer, or a shared borrow with interior mutability
Raw,
}
impl Mut {
#[inline(always)]
fn is_raw(self) -> bool {
match self {
Mut::Raw => true,
_ => false,
}
}
}
/// Information about any kind of borrow
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum Borrow {
/// A mutable borrow, a raw pointer, or a shared borrow with interior mutability
Mut(Mut),
/// A shared borrow without interior mutability
Frz(Timestamp)
}
impl Borrow {
#[inline(always)]
fn is_uniq(self) -> bool {
match self {
Borrow::Mut(Mut::Uniq(_)) => true,
_ => false,
}
}
}
/// An item in the borrow stack
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub enum BorStackItem {
/// Defines which references are permitted to mutate *if* the location is not frozen
Mut(Mut),
/// A barrier, tracking the function it belongs to by its index on the call stack
#[allow(dead_code)] // for future use
FnBarrier(usize)
}
impl Default for Borrow {
fn default() -> Self {
Borrow::Mut(Mut::Raw)
}
}
/// Extra global machine state
#[derive(Clone, Debug)]
pub struct State {
clock: Timestamp
}
impl State {
pub fn new() -> State {
State { clock: 0 }
}
}
/// Extra per-location state
#[derive(Clone, Debug)]
struct Stack {
borrows: Vec<BorStackItem>, // used as a stack
frozen_since: Option<Timestamp>,
}
impl Default for Stack {
fn default() -> Self {
Stack {
borrows: Vec::new(),
frozen_since: None,
}
}
}
/// Extra per-allocation state
#[derive(Clone, Debug, Default)]
pub struct Stacks {
stacks: RefCell<RangeMap<Stack>>,
}
/// Core operations
impl<'tcx> Stack {
fn check(&self, bor: Borrow) -> bool {
match bor {
Borrow::Frz(acc_t) =>
// Must be frozen at least as long as the `acc_t` says.
self.frozen_since.map_or(false, |loc_t| loc_t <= acc_t),
Borrow::Mut(acc_m) =>
// Raw pointers are fine with frozen locations. This is important because &Cell is raw!
if self.frozen_since.is_some() {
acc_m.is_raw()
} else {
self.borrows.last().map_or(false, |&loc_itm| loc_itm == BorStackItem::Mut(acc_m))
}
}
}
/// Reactive `bor` for this stack. If `force_mut` is set, we want to aggressively
/// unfreeze this location (because we are about to push a `Uniq`).
fn reactivate(&mut self, bor: Borrow, force_mut: bool) -> EvalResult<'tcx> {
// Unless mutation is bound to happen, do NOT change anything if `bor` is already active.
// In particular, if it is a `Mut(Raw)` and we are frozen, this should be a NOP.
if !force_mut && self.check(bor) {
return Ok(());
}
let acc_m = match bor {
Borrow::Frz(_) =>
if force_mut {
return err!(MachineError(format!("Using a shared borrow for mutation")))
} else {
return err!(MachineError(format!("Location should be frozen but it is not")))
}
Borrow::Mut(acc_m) => acc_m,
};
// We definitely have to unfreeze this, even if we use the topmost item.
self.frozen_since = None;
// Pop until we see the one we are looking for.
while let Some(&itm) = self.borrows.last() {
match itm {
BorStackItem::FnBarrier(_) => {
return err!(MachineError(format!("Trying to reactivate a borrow that lives behind a barrier")));
}
BorStackItem::Mut(loc_m) => {
if loc_m == acc_m { return Ok(()); }
trace!("reactivate: Popping {:?}", itm);
self.borrows.pop();
}
}
}
// Nothing to be found. Simulate a "virtual raw" element at the bottom of the stack.
if acc_m.is_raw() {
Ok(())
} else {
err!(MachineError(format!("Borrow-to-reactivate does not exist on the stack")))
}
}
fn initiate(&mut self, bor: Borrow) -> EvalResult<'tcx> {
match bor {
Borrow::Frz(t) => {
trace!("initiate: Freezing");
match self.frozen_since {
None => self.frozen_since = Some(t),
Some(since) => assert!(since <= t),
}
}
Borrow::Mut(m) => {
trace!("initiate: Pushing {:?}", bor);
match self.frozen_since {
None => self.borrows.push(BorStackItem::Mut(m)),
Some(_) =>
// FIXME: Do we want an exception for raw borrows?
return err!(MachineError(format!("Trying to mutate frozen location")))
}
}
}
Ok(())
}
}
impl State {
fn increment_clock(&mut self) -> Timestamp {
self.clock += 1;
self.clock
}
}
/// Higher-level operations
impl<'tcx> Stacks {
pub fn memory_accessed(
&self,
ptr: Pointer<Borrow>,
size: Size,
access: MemoryAccess,
) -> EvalResult<'tcx> {
trace!("memory_accessed({:?}) with tag {:?}: {:?}, size {}", access, ptr.tag, ptr, size.bytes());
let mut stacks = self.stacks.borrow_mut();
for stack in stacks.iter_mut(ptr.offset, size) {
// FIXME: Compare this with what the blog post says.
stack.reactivate(ptr.tag, /*force_mut*/access == MemoryAccess::Write)?;
}
Ok(())
}
pub fn memory_deallocated(
&mut self,
ptr: Pointer<Borrow>,
) -> EvalResult<'tcx> {
trace!("memory_deallocated with tag {:?}: {:?}", ptr.tag, ptr);
let stacks = self.stacks.get_mut();
for stack in stacks.iter_mut_all() {
// This is like mutating.
stack.reactivate(ptr.tag, /*force_mut*/true)?;
}
Ok(())
}
fn reborrow(
&self,
ptr: Pointer<Borrow>,
size: Size,
new_bor: Borrow,
) -> EvalResult<'tcx> {
let mut stacks = self.stacks.borrow_mut();
for stack in stacks.iter_mut(ptr.offset, size) {
if stack.check(new_bor) {
// The new borrow is already active! This can happen when creating multiple
// shared references from the same mutable reference. Do nothing.
} else {
// FIXME: The blog post says we should `reset` if this is a local.
stack.reactivate(ptr.tag, /*force_mut*/new_bor.is_uniq())?;
stack.initiate(new_bor)?;
}
}
Ok(())
}
}
/// Machine hooks
pub trait EvalContextExt<'tcx> {
fn tag_reference(
&mut self,
ptr: Pointer<Borrow>,
pointee_ty: Ty<'tcx>,
size: Size,
borrow_kind: Option<mir::BorrowKind>,
) -> EvalResult<'tcx, Borrow>;
fn tag_dereference(
&self,
ptr: Pointer<Borrow>,
ptr_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Borrow>;
}
impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
fn tag_reference(
&mut self,
ptr: Pointer<Borrow>,
pointee_ty: Ty<'tcx>,
size: Size,
borrow_kind: Option<mir::BorrowKind>,
) -> EvalResult<'tcx, Borrow> {
let time = self.machine.stacked_borrows.increment_clock();
let new_bor = match borrow_kind {
Some(mir::BorrowKind::Mut { .. }) => Borrow::Mut(Mut::Uniq(time)),
Some(_) =>
// FIXME This does not do enough checking when only part of the data has
// interior mutability. When the type is `(i32, Cell<i32>)`, we want the
// first field to be frozen but not the second.
if self.type_is_freeze(pointee_ty) {
Borrow::Frz(time)
} else {
Borrow::Mut(Mut::Raw)
},
None => Borrow::Mut(Mut::Raw),
};
trace!("tag_reference: Creating new reference ({:?}) for {:?} (pointee {}, size {}): {:?}",
borrow_kind, ptr, pointee_ty, size.bytes(), new_bor);
// Make sure this reference is not dangling or so
self.memory.check_bounds(ptr, size, false)?;
// Update the stacks. We cannot use `get_mut` becuse this might be immutable
// memory.
let alloc = self.memory.get(ptr.alloc_id).expect("We checked that the ptr is fine!");
alloc.extra.reborrow(ptr, size, new_bor)?;
Ok(new_bor)
}
fn tag_dereference(
&self,
ptr: Pointer<Borrow>,
ptr_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Borrow> {
// If this is a raw ptr, forget about the tag.
Ok(if ptr_ty.is_unsafe_ptr() {
trace!("tag_dereference: Erasing tag for {:?} ({})", ptr, ptr_ty);
Borrow::Mut(Mut::Raw)
} else {
// FIXME: Do we want to adjust the tag if it does not match the type?
ptr.tag
})
}
}
|
/*
* Copyright (c) 2014-2015 Mathias Hällman
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
* A key can be of any of the following types:
* - Function key such as F1 or F24
* - Symbolic key such as enter or escape
* - Unicode codepoint such as a, ä, or あ
* The key also records key modifier information.
*/
#[derive(Clone, Copy, Eq, Hash, PartialEq)]
#[cfg_attr(test, derive(Debug))]
pub enum Key {
Fn{num: isize, mods: KeyMod},
Sym{sym: KeySym, mods: KeyMod},
Unicode{codepoint: char, mods: KeyMod},
}
#[derive(Clone, Copy, Eq, Hash, PartialEq)]
#[cfg_attr(test, derive(Debug))]
pub enum KeySym
{
Unknown = -1,
None = 0,
/* Special names in C0 */
Backspace,
Tab,
Enter,
Escape,
/* Special names in G0 */
Space,
Del,
/* Special keys */
Up,
Down,
Left,
Right,
Begin,
Find,
Insert,
Delete,
Select,
Pageup,
Pagedown,
Home,
End,
/* Special keys from terminfo */
Cancel,
Clear,
Close,
Command,
Copy,
Exit,
Help,
Mark,
Message,
Move,
Open,
Options,
Print,
Redo,
Reference,
Refresh,
Replace,
Restart,
Resume,
Save,
Suspend,
Undo,
/* Numeric keypad special keys */
KP0,
KP1,
KP2,
KP3,
KP4,
KP5,
KP6,
KP7,
KP8,
KP9,
KPEnter,
KPPlus,
KPMinus,
KPMult,
KPDiv,
KPComma,
KPPeriod,
KPEquals,
/* et cetera ad nauseum */
NSyms,
}
bitflags! {
#[cfg_attr(test, derive(Debug))]
flags KeyMod: u8 {
const MOD_NONE = 0,
const MOD_SHIFT = 1 << 0,
const MOD_ALT = 1 << 1,
const MOD_CTRL = 1 << 2,
}
}
#[allow(dead_code)] // as the name suggests, this function should not be called
fn totally_useless_function_just_to_suppress_warnings_about_dead_code() {
let mut a = MOD_NONE;
a.toggle(MOD_NONE); a.remove(MOD_NONE); a.intersects(MOD_NONE);
a.is_all(); KeyMod::from_bits_truncate(0); KeyMod::from_bits(0);
a.insert(MOD_NONE); a.contains(MOD_NONE);
}
Don't derive Debug on bitflags enum
/*
* Copyright (c) 2014-2015 Mathias Hällman
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
* A key can be of any of the following types:
* - Function key such as F1 or F24
* - Symbolic key such as enter or escape
* - Unicode codepoint such as a, ä, or あ
* The key also records key modifier information.
*/
#[derive(Clone, Copy, Eq, Hash, PartialEq)]
#[cfg_attr(test, derive(Debug))]
pub enum Key {
Fn{num: isize, mods: KeyMod},
Sym{sym: KeySym, mods: KeyMod},
Unicode{codepoint: char, mods: KeyMod},
}
#[derive(Clone, Copy, Eq, Hash, PartialEq)]
#[cfg_attr(test, derive(Debug))]
pub enum KeySym
{
Unknown = -1,
None = 0,
/* Special names in C0 */
Backspace,
Tab,
Enter,
Escape,
/* Special names in G0 */
Space,
Del,
/* Special keys */
Up,
Down,
Left,
Right,
Begin,
Find,
Insert,
Delete,
Select,
Pageup,
Pagedown,
Home,
End,
/* Special keys from terminfo */
Cancel,
Clear,
Close,
Command,
Copy,
Exit,
Help,
Mark,
Message,
Move,
Open,
Options,
Print,
Redo,
Reference,
Refresh,
Replace,
Restart,
Resume,
Save,
Suspend,
Undo,
/* Numeric keypad special keys */
KP0,
KP1,
KP2,
KP3,
KP4,
KP5,
KP6,
KP7,
KP8,
KP9,
KPEnter,
KPPlus,
KPMinus,
KPMult,
KPDiv,
KPComma,
KPPeriod,
KPEquals,
/* et cetera ad nauseum */
NSyms,
}
bitflags! {
flags KeyMod: u8 {
const MOD_NONE = 0,
const MOD_SHIFT = 1 << 0,
const MOD_ALT = 1 << 1,
const MOD_CTRL = 1 << 2,
}
}
#[allow(dead_code)] // as the name suggests, this function should not be called
fn totally_useless_function_just_to_suppress_warnings_about_dead_code() {
let mut a = MOD_NONE;
a.toggle(MOD_NONE); a.remove(MOD_NONE); a.intersects(MOD_NONE);
a.is_all(); KeyMod::from_bits_truncate(0); KeyMod::from_bits(0);
a.insert(MOD_NONE); a.contains(MOD_NONE);
}
|
use {Euclid, Point};
use point::Euclidean;
/// Clustering via the *k*-means algorithm (aka Lloyd's algorithm).
///
/// > *k*-means clustering aims to partition *n* observations into *k*
/// clusters in which each observation belongs to the cluster with the
/// nearest mean, serving as a prototype of the cluster.<sup><a
/// href="https://en.wikipedia.org/wiki/K-means_clustering">wikipedia</a></sup>
///
/// This is a heuristic, iterative approximation to the true optimal
/// assignment. The parameters used to control the approximation can
/// be set via `KmeansBuilder`.
///
/// # Examples
///
/// ```rust
/// use cogset::{Euclid, Kmeans};
///
/// let data = [Euclid([0.0, 0.0]),
/// Euclid([1.0, 0.5]),
/// Euclid([0.2, 0.2]),
/// Euclid([0.3, 0.8]),
/// Euclid([0.0, 1.0])];
/// let k = 3;
///
/// let kmeans = Kmeans::new(&data, k);
///
/// println!("{:?}", kmeans.clusters());
/// ```
pub struct Kmeans<T> {
assignments: Vec<usize>,
centres: Vec<Euclid<T>>,
iterations: usize,
converged: bool,
}
impl<T> Kmeans<T>
where Euclid<T>: Point + Euclidean + Clone
{
/// Run k-means on `data` with the default settings.
pub fn new(data: &[Euclid<T>], k: usize) -> Kmeans<T> {
KmeansBuilder::new().kmeans(data, k)
}
/// Retrieve the means and the clusters themselves that this
/// *k*-means instance computed.
///
/// The clusters are represented by vectors of indexes into the
/// original data.
pub fn clusters(&self) -> Vec<(Euclid<T>, Vec<usize>)> {
let mut ret = self.centres.iter().cloned().map(|c| (c, vec![])).collect::<Vec<_>>();
for (idx, &assign) in self.assignments.iter().enumerate() {
ret[assign].1.push(idx);
}
ret
}
/// Return whether the algorithm converged, and how many steps
/// that took.
///
/// `Ok` is returned if the algorithm did meet the tolerance
/// criterion, and `Err` if it reached the iteration limit
/// instead.
pub fn converged(&self) -> Result<usize, usize> {
if self.converged {
Ok(self.iterations)
} else {
Err(self.iterations)
}
}
}
const DEFAULT_MAX_ITER: usize = 100;
const DEFAULT_TOL: f64 = 1e-6;
/// A builder for *k*-means to provide control over parameters for the
/// algorithm.
///
/// This allows one to tweak settings like the tolerance and the
/// number of iterations.
///
/// # Examples
///
/// ```rust
/// use cogset::{Euclid, KmeansBuilder};
///
/// let data = [Euclid([0.0, 0.0]),
/// Euclid([1.0, 0.5]),
/// Euclid([0.2, 0.2]),
/// Euclid([0.3, 0.8]),
/// Euclid([0.0, 1.0])];
///
/// let k = 3;
///
/// // we want the means extra precise.
/// let tol = 1e-12;
/// let kmeans = KmeansBuilder::new().tolerance(tol).kmeans(&data, k);
///
/// println!("{:?}", kmeans.clusters());
/// ```
pub struct KmeansBuilder {
tol: f64,
max_iter: usize,
}
impl KmeansBuilder {
/// Create a default `KmeansBuilder`
pub fn new() -> KmeansBuilder {
KmeansBuilder {
tol: DEFAULT_TOL,
max_iter: DEFAULT_MAX_ITER,
}
}
/// Set the tolerance used to decide if the iteration has
/// converged to `tol`.
pub fn tolerance(self, tol: f64) -> KmeansBuilder {
KmeansBuilder { tol: tol, .. self }
}
/// Set the maximum number of iterations to run before aborting to
/// `max_iter`.
pub fn max_iter(self, max_iter: usize) -> KmeansBuilder {
KmeansBuilder { max_iter: max_iter, .. self }
}
/// Run *k*-means with the given settings.
///
/// This is functionally identical to `Kmeans::new`, other than
/// the internal parameters differing.
pub fn kmeans<T>(self, data: &[Euclid<T>], k: usize) -> Kmeans<T>
where Euclid<T>: Point + Euclidean + Clone
{
assert!(2 <= k && k < data.len());
let n = data.len();
let mut assignments = vec![!0; n];
let mut costs = vec![0.0; n];
let mut centres = data.iter().take(k).cloned().collect::<Vec<_>>();
let mut counts = vec![0; k];
update_assignments(data, &mut assignments, &mut counts, &mut costs, ¢res);
let mut objective = costs.iter().fold(0.0, |a, b| a + *b);
let mut converged = false;
let mut iter = 0;
while iter < self.max_iter {
update_centres(data, &assignments, &counts, &mut centres);
update_assignments(data, &mut assignments, &mut counts, &mut costs, ¢res);
let new_objective = costs.iter().fold(0.0, |a, b| a + *b);
if (new_objective - objective).abs() < self.tol {
converged = true;
break
}
objective = new_objective;
iter += 1
}
Kmeans {
assignments: assignments,
centres: centres,
iterations: iter,
converged: converged,
}
}
}
fn update_assignments<T>(data: &[Euclid<T>],
assignments: &mut [usize], counts: &mut [usize], costs: &mut [f64],
centres: &[Euclid<T>])
where Euclid<T>: Point + Euclidean + Clone
{
use std::f64::INFINITY as INF;
for place in counts.iter_mut() { *place = 0 }
for ((point, assign), cost) in data.iter().zip(assignments.iter_mut()).zip(costs.iter_mut()) {
let mut min_dist = INF;
let mut index = 0;
for (i, c) in centres.iter().enumerate() {
let dist = point.dist(c);
if dist < min_dist {
min_dist = dist;
index = i;
}
}
*cost = min_dist;
*assign = index;
counts[index] += 1;
}
}
fn update_centres<T>(data: &[Euclid<T>],
assignments: &[usize], counts: &[usize],
centres: &mut [Euclid<T>])
where Euclid<T>: Point + Euclidean + Clone
{
for place in centres.iter_mut() { *place = <Euclid<T>>::zero() }
for (point, assign) in data.iter().zip(assignments.iter()) {
centres[*assign].add(point)
}
for (place, scale) in centres.iter_mut().zip(counts.iter()) {
place.scale(1.0 / *scale as f64)
}
}
#[cfg(test)]
mod tests {
use super::*;
use Euclid;
use rand::{XorShiftRng,Rng};
#[test]
fn smoke() {
let points = [Euclid([0.0, 0.0]),
Euclid([1.0, 0.5]),
Euclid([0.2, 0.2]),
Euclid([0.3, 0.8]),
Euclid([0.0, 1.0]),
];
let res = Kmeans::new(&points, 3);
let mut clusters = res.clusters();
for &mut (_, ref mut v) in &mut clusters {
v.sort()
}
clusters.sort_by(|a, b| a.1.cmp(&b.1));
assert_eq!(clusters,
[(Euclid([0.1, 0.1]), vec![0, 2]),
(Euclid([1.0, 0.5]), vec![1]),
(Euclid([0.15, 0.9]), vec![3, 4])]);
assert_eq!(res.converged(), Ok(2));
}
#[test]
fn no_converge() {
let mut rng = XorShiftRng::new_unseeded();
let points = (0..100)
.map(|_| Euclid([rng.gen()]))
.collect::<Vec<Euclid<[f64; 1]>>>();
let res = KmeansBuilder::new().tolerance(1e-18).max_iter(2).kmeans(&points, 4);
assert_eq!(res.converged(), Err(2))
}
}
kmeans: add benchmarks.
use {Euclid, Point};
use point::Euclidean;
/// Clustering via the *k*-means algorithm (aka Lloyd's algorithm).
///
/// > *k*-means clustering aims to partition *n* observations into *k*
/// clusters in which each observation belongs to the cluster with the
/// nearest mean, serving as a prototype of the cluster.<sup><a
/// href="https://en.wikipedia.org/wiki/K-means_clustering">wikipedia</a></sup>
///
/// This is a heuristic, iterative approximation to the true optimal
/// assignment. The parameters used to control the approximation can
/// be set via `KmeansBuilder`.
///
/// # Examples
///
/// ```rust
/// use cogset::{Euclid, Kmeans};
///
/// let data = [Euclid([0.0, 0.0]),
/// Euclid([1.0, 0.5]),
/// Euclid([0.2, 0.2]),
/// Euclid([0.3, 0.8]),
/// Euclid([0.0, 1.0])];
/// let k = 3;
///
/// let kmeans = Kmeans::new(&data, k);
///
/// println!("{:?}", kmeans.clusters());
/// ```
pub struct Kmeans<T> {
assignments: Vec<usize>,
centres: Vec<Euclid<T>>,
iterations: usize,
converged: bool,
}
impl<T> Kmeans<T>
where Euclid<T>: Point + Euclidean + Clone
{
/// Run k-means on `data` with the default settings.
pub fn new(data: &[Euclid<T>], k: usize) -> Kmeans<T> {
KmeansBuilder::new().kmeans(data, k)
}
/// Retrieve the means and the clusters themselves that this
/// *k*-means instance computed.
///
/// The clusters are represented by vectors of indexes into the
/// original data.
pub fn clusters(&self) -> Vec<(Euclid<T>, Vec<usize>)> {
let mut ret = self.centres.iter().cloned().map(|c| (c, vec![])).collect::<Vec<_>>();
for (idx, &assign) in self.assignments.iter().enumerate() {
ret[assign].1.push(idx);
}
ret
}
/// Return whether the algorithm converged, and how many steps
/// that took.
///
/// `Ok` is returned if the algorithm did meet the tolerance
/// criterion, and `Err` if it reached the iteration limit
/// instead.
pub fn converged(&self) -> Result<usize, usize> {
if self.converged {
Ok(self.iterations)
} else {
Err(self.iterations)
}
}
}
const DEFAULT_MAX_ITER: usize = 100;
const DEFAULT_TOL: f64 = 1e-6;
/// A builder for *k*-means to provide control over parameters for the
/// algorithm.
///
/// This allows one to tweak settings like the tolerance and the
/// number of iterations.
///
/// # Examples
///
/// ```rust
/// use cogset::{Euclid, KmeansBuilder};
///
/// let data = [Euclid([0.0, 0.0]),
/// Euclid([1.0, 0.5]),
/// Euclid([0.2, 0.2]),
/// Euclid([0.3, 0.8]),
/// Euclid([0.0, 1.0])];
///
/// let k = 3;
///
/// // we want the means extra precise.
/// let tol = 1e-12;
/// let kmeans = KmeansBuilder::new().tolerance(tol).kmeans(&data, k);
///
/// println!("{:?}", kmeans.clusters());
/// ```
pub struct KmeansBuilder {
tol: f64,
max_iter: usize,
}
impl KmeansBuilder {
/// Create a default `KmeansBuilder`
pub fn new() -> KmeansBuilder {
KmeansBuilder {
tol: DEFAULT_TOL,
max_iter: DEFAULT_MAX_ITER,
}
}
/// Set the tolerance used to decide if the iteration has
/// converged to `tol`.
pub fn tolerance(self, tol: f64) -> KmeansBuilder {
KmeansBuilder { tol: tol, .. self }
}
/// Set the maximum number of iterations to run before aborting to
/// `max_iter`.
pub fn max_iter(self, max_iter: usize) -> KmeansBuilder {
KmeansBuilder { max_iter: max_iter, .. self }
}
/// Run *k*-means with the given settings.
///
/// This is functionally identical to `Kmeans::new`, other than
/// the internal parameters differing.
pub fn kmeans<T>(self, data: &[Euclid<T>], k: usize) -> Kmeans<T>
where Euclid<T>: Point + Euclidean + Clone
{
assert!(2 <= k && k < data.len());
let n = data.len();
let mut assignments = vec![!0; n];
let mut costs = vec![0.0; n];
let mut centres = data.iter().take(k).cloned().collect::<Vec<_>>();
let mut counts = vec![0; k];
update_assignments(data, &mut assignments, &mut counts, &mut costs, ¢res);
let mut objective = costs.iter().fold(0.0, |a, b| a + *b);
let mut converged = false;
let mut iter = 0;
while iter < self.max_iter {
update_centres(data, &assignments, &counts, &mut centres);
update_assignments(data, &mut assignments, &mut counts, &mut costs, ¢res);
let new_objective = costs.iter().fold(0.0, |a, b| a + *b);
if (new_objective - objective).abs() < self.tol {
converged = true;
break
}
objective = new_objective;
iter += 1
}
Kmeans {
assignments: assignments,
centres: centres,
iterations: iter,
converged: converged,
}
}
}
fn update_assignments<T>(data: &[Euclid<T>],
assignments: &mut [usize], counts: &mut [usize], costs: &mut [f64],
centres: &[Euclid<T>])
where Euclid<T>: Point + Euclidean + Clone
{
use std::f64::INFINITY as INF;
for place in counts.iter_mut() { *place = 0 }
for ((point, assign), cost) in data.iter().zip(assignments.iter_mut()).zip(costs.iter_mut()) {
let mut min_dist = INF;
let mut index = 0;
for (i, c) in centres.iter().enumerate() {
let dist = point.dist(c);
if dist < min_dist {
min_dist = dist;
index = i;
}
}
*cost = min_dist;
*assign = index;
counts[index] += 1;
}
}
fn update_centres<T>(data: &[Euclid<T>],
assignments: &[usize], counts: &[usize],
centres: &mut [Euclid<T>])
where Euclid<T>: Point + Euclidean + Clone
{
for place in centres.iter_mut() { *place = <Euclid<T>>::zero() }
for (point, assign) in data.iter().zip(assignments.iter()) {
centres[*assign].add(point)
}
for (place, scale) in centres.iter_mut().zip(counts.iter()) {
place.scale(1.0 / *scale as f64)
}
}
#[cfg(test)]
mod tests {
use super::*;
use Euclid;
use rand::{XorShiftRng,Rng};
#[test]
fn smoke() {
let points = [Euclid([0.0, 0.0]),
Euclid([1.0, 0.5]),
Euclid([0.2, 0.2]),
Euclid([0.3, 0.8]),
Euclid([0.0, 1.0]),
];
let res = Kmeans::new(&points, 3);
let mut clusters = res.clusters();
for &mut (_, ref mut v) in &mut clusters {
v.sort()
}
clusters.sort_by(|a, b| a.1.cmp(&b.1));
assert_eq!(clusters,
[(Euclid([0.1, 0.1]), vec![0, 2]),
(Euclid([1.0, 0.5]), vec![1]),
(Euclid([0.15, 0.9]), vec![3, 4])]);
assert_eq!(res.converged(), Ok(2));
}
#[test]
fn no_converge() {
let mut rng = XorShiftRng::new_unseeded();
let points = (0..100)
.map(|_| Euclid([rng.gen()]))
.collect::<Vec<Euclid<[f64; 1]>>>();
let res = KmeansBuilder::new().tolerance(1e-18).max_iter(2).kmeans(&points, 4);
assert_eq!(res.converged(), Err(2))
}
}
#[cfg(all(test, feature = "unstable"))]
mod benches {
use super::Kmeans;
use Euclid;
use rand::{XorShiftRng,Rng};
use test::Bencher;
macro_rules! benches {
($($name: ident, $k: expr, $n: expr;)*) => {
$(
#[bench]
fn $name(b: &mut Bencher) {
let k = $k;
let n = $n;
let mut rng = XorShiftRng::new_unseeded();
let points = rng
.gen_iter::<f64>()
.take(n)
.map(|f| Euclid([f]))
.collect::<Vec<_>>();
b.iter(|| Kmeans::new(&points, k))
}
)*
}
}
benches! {
k003_n00010, 3, 10;
k003_n00100, 3, 100;
k003_n01000, 3, 1000;
k003_n10000, 3, 10000;
k010_n00100, 10, 100;
k010_n01000, 10, 1000;
k010_n10000, 10, 10000;
k100_n01000, 100, 1000;
// too slow:
// k100_n10000, 100, 10000;
}
}
|
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::LinkedList;
use common;
use implicit;
use explicit;
use implicit::Expr;
use common::{Id, Result};
use refined::{Base, T};
use rustproof_libsmt::backends::smtlib2::*;
use rustproof_libsmt::backends::backend::*;
use rustproof_libsmt::backends::z3::Z3;
use rustproof_libsmt::theories::integer;
use rustproof_libsmt::logics::lia::LIA;
macro_rules! otry {
($expr:expr) => (match $expr {
Some(val) => val,
None => return None,
})
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum C {
WellFormed(Box<Type>),
Subtype(Box<Type>, Box<Type>),
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Liquid {
E(implicit::Expr),
K(Id, Box<LinkedList<Expr>>), // list of pending substitutions
}
pub type Type = T<Liquid>;
pub type Implication = (LinkedList<Expr>, Box<Type>, Box<Type>);
pub type Idx = i32; // constraint index
pub type Constraint = ((HashSet<Id>, LinkedList<Expr>), C); // Boolean valued expressions & their environments
#[derive(Debug, Clone)]
pub struct KInfo {
all_qs: HashSet<implicit::Expr>,
curr_qs: HashSet<implicit::Expr>,
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Env {
shape: HashMap<Id, explicit::Type>,
refined_env: HashMap<Id, Type>,
path_constraints: LinkedList<Expr>,
}
impl Env {
fn new(shape: &HashMap<Id, explicit::Type>) -> Env {
Env {
shape: shape.clone(),
refined_env: HashMap::new(),
path_constraints: LinkedList::new(),
}
}
fn get(&self, s: &Id) -> Type {
match self.refined_env.get(s) {
Some(ty) => ty.clone(),
None => panic!("env.get('{}' missing ({:?})", s, self.in_scope()),
}
}
fn insert(&mut self, s: &Id, ty: &Type) {
self.refined_env.insert(s.clone(), ty.clone());
}
fn add_constraint(&mut self, e: &Expr) {
self.path_constraints.push_back(e.clone())
}
fn in_scope(&self) -> HashSet<Id> {
let keys: HashSet<_> = self.refined_env.keys().cloned().collect();
return keys
}
fn snapshot(&self) -> (HashSet<Id>, LinkedList<Expr>) {
(self.in_scope(), self.path_constraints.clone())
}
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct KEnv {
shape: HashMap<Id, explicit::Type>,
env_id: String,
next_id: i32,
}
fn hm_shape(env: &HashMap<Id, explicit::Type>, expr: &Expr) -> explicit::Type {
use implicit::Expr::*;
use explicit::Type::*;
match *expr {
Var(ref id) => env.get(id).unwrap().clone(),
Const(common::Const::Int(_)) => TInt,
Const(common::Const::Bool(_)) => TBool,
Op2(op, _, _) => explicit::opty(op),
Fun(ref id, ref e) => TFun(box env.get(id).unwrap().clone(), box hm_shape(env, e)),
App(ref e1, _) => {
if let TFun(_, e2) = hm_shape(env, e1) {
*e2
} else {
panic!("expected TFun, not {:?}", expr);
}
}
If(_, ref e2, _) => hm_shape(env, e2),
Let(_, _, ref e2) => hm_shape(env, e2),
Fix(_, ref e) => hm_shape(env, e),
MkArray(_, _) => TIntArray,
GetArray(_, _) => TInt,
SetArray(_, _, _) => TIntArray,
Star => panic!("star found when it shouldn't be"),
V => panic!("v found when it shouldn't be"),
}
}
impl KEnv {
fn new(shape: &HashMap<Id, explicit::Type>) -> KEnv {
KEnv {
shape: shape.clone(),
env_id: String::from("κ"), // ν
next_id: 0,
}
}
fn fresh_ty(&mut self, env: &Env, ty: &explicit::Type) -> Type {
let id = self.next_id;
self.next_id += 1;
let base = match *ty {
explicit::Type::TInt => Base::Int,
explicit::Type::TBool => Base::Bool,
_ => panic!("FIXME: handle {:?}", ty),
};
let k = Liquid::K(format!("!k{}", id), box LinkedList::new());
T::Ref(env.in_scope(), base, box k)
}
fn fresh(&mut self, env: &Env, expr: &Expr) -> Type {
if let &Expr::Fun(ref id, ref e) = expr {
let t1 = &self.shape.get(id).unwrap().clone();
let fx = self.fresh_ty(env, t1);
let f = self.fresh(env, e);
T::Fun(id.clone(), box fx, box f)
} else {
let ty = hm_shape(&env.shape, expr);
self.fresh_ty(env, &ty)
}
}
}
fn ty<'a>(_: &mut KEnv, _: &Env, c: &common::Const) -> Type {
use common::Op2;
use common::Const::*;
use self::Liquid::E;
let base = match *c {
Int(_) => Base::Int,
Bool(_) => Base::Bool,
};
println!("ty({:?})", base);
// {ν : int | ν = 3 }
let eq = E(Expr::Op2(Op2::Eq, box Expr::V, box Expr::Const(*c)));
T::Ref(HashSet::new(), base, box eq)
}
fn base(ty: &Type) -> Option<Base> {
match *ty {
T::Ref(_, b, _) => Some(b),
_ => None,
}
}
fn subst(_: &Id, _: &Expr, ty: &Type) -> Type {
println!("TODO: subst");
ty.clone()
}
pub fn cons<'a>(k_env: &mut KEnv, env: &Env, expr: &Expr) -> (Type, LinkedList<Constraint>) {
use implicit::Expr::*;
use common::Op2::Eq;
match *expr {
Var(ref id) => {
let ty: Type = if let Some(b) = base(&env.get(id)) {
let eq = Op2(Eq, box V, box Var(id.clone()));
T::Ref(env.in_scope(), b, box Liquid::E(eq))
} else {
println!("{} not base -- using just env ({:?})", id, env.get(id));
env.get(id)
};
(ty, LinkedList::new())
}
Const(ref c) => {
(ty(k_env, &env, c), LinkedList::new())
}
Op2(op, ref e1, ref e2) => {
let (_, mut c1) = cons(k_env, env, e1);
let (_, mut c2) = cons(k_env, env, e2);
c1.append(&mut c2);
let ty = explicit::opty(op);
let base = match ty {
explicit::Type::TInt => Base::Int,
explicit::Type::TBool => Base::Bool,
_ => panic!("FIXME: handle {:?}", ty),
};
let eq = Op2(Eq, box V, box expr.clone());
let f = T::Ref(env.in_scope(), base, box Liquid::E(eq));
(f, c1)
}
If(ref e1, ref e2, ref e3) => {
let mut env_t = env.clone();
let mut env_f = env.clone();
env_t.add_constraint(&e1.clone());
env_f.add_constraint(&App(box Var(String::from("not")), e1.clone()));
let f = k_env.fresh(&env, expr);
// type of e1 has already been verified to be a bool by HM
let (_, mut c1) = cons(k_env, &env, e1);
let (f2, mut c2) = cons(k_env, &env_t, e2);
let (f3, mut c3) = cons(k_env, &env_f, e3);
c1.append(&mut c2);
c1.append(&mut c3);
// Γ ⊢ (f)
c1.push_back((env.snapshot(), C::WellFormed(box f.clone())));
// Γ,e1 ⊢ (f2 <: f)
c1.push_back((env_t.snapshot(), C::Subtype(box f2.clone(), box f.clone())));
// Γ,¬e1 ⊢ (f3 <: f)
c1.push_back((env_f.snapshot(), C::Subtype(box f3.clone(), box f.clone())));
(f, c1)
}
Fun(ref x, ref e) => {
let mut env = env.clone();
let fx = k_env.fresh(&env, &Var(x.clone()));
env.insert(x, &fx);
let f = k_env.fresh(&env, e);
let (fe, mut c) = cons(k_env, &env, e);
// Γ ⊢ (x:fx → f)
c.push_back((env.snapshot(), C::WellFormed(box f.clone())));
// Γ,x:fx ⊢ (fe <: f)
c.push_back((env.snapshot(), C::Subtype(box fe.clone(), box f.clone())));
(f, c)
}
Fix(ref x, ref e) => {
// const w/ ∀α.(α→α)→α
let mut env = env.clone();
let fx = k_env.fresh(&env, e);
env.insert(x, &fx);
// FIXME
cons(k_env, &env, e)
}
Let(ref id, ref e1, ref e2) => {
let mut env = env.clone();
let f = k_env.fresh(&env, expr);
let (f1, mut c1) = cons(k_env, &env, e1);
env.insert(id, &f1);
let (f2, mut c2) = cons(k_env, &env, e2);
c1.append(&mut c2);
// Γ ⊢ (f)
c1.push_back((env.snapshot(), C::WellFormed(box f.clone())));
// Γ,x:f1 ⊢ (f2 <: f)
c1.push_back((env.snapshot(), C::Subtype(box f2.clone(), box f.clone())));
(f, c1)
}
App(ref e1, ref e2) => {
let (f1, mut c1) = cons(k_env, env, e1);
println!("## {:?}\t:\t{:?}", e1, f1);
let (f2, mut c2) = cons(k_env, env, e2);
c1.append(&mut c2);
if let T::Fun(ref x, ref fx, ref f) = f1 {
let f = subst(x, e2, f);
// Γ ⊢ (f2 <: fx)
c1.push_back((env.snapshot(), C::Subtype(box f2.clone(), fx.clone())));
return (f, c1);
} else {
panic!("expected TFun, not {:?}", f1);
}
// let (x:Fx → F, C1) = Cons(Γ, e1) in
// let (F
// 0
// x, C2) = Cons(Γ, e2) in
// ([e2/x]F, C1 ∪ C2 ∪ {Γ ` F
// 0
// x <: Fx})
}
_ => {
println!("unhandled {:?}", expr);
(T::Ref(env.in_scope(), Base::Bool, box Liquid::E(Const(common::Const::Bool(true)))), LinkedList::new())
}
}
}
fn split(map: &mut HashMap<Idx, Constraint>, constraints: &LinkedList<Constraint>) {
let mut idx = 1;
for c in constraints.iter() {
if let &((ref scope, ref pathc), C::Subtype(box T::Fun(_, ref tx1, ref t1), box T::Fun(ref x2, ref tx2, ref t2))) = c {
let mut contra_cs: LinkedList<Constraint> = LinkedList::new();
contra_cs.push_back(((scope.clone(), pathc.clone()), C::Subtype(tx2.clone(), tx1.clone())));
let mut rscope = scope.clone();
rscope.insert(x2.clone());
contra_cs.push_back(((rscope, pathc.clone()), C::Subtype(t1.clone(), t2.clone())));
// recurse
split(map, &contra_cs);
} else if let &((ref scope, ref pathc), C::WellFormed(box T::Fun(ref id, _, ref t))) = c {
let mut wf_cs: LinkedList<Constraint> = LinkedList::new();
let mut scope = scope.clone();
scope.insert(id.clone());
wf_cs.push_back(((scope, pathc.clone()), C::WellFormed(t.clone())));
// recurse
split(map, &wf_cs);
} else {
map.insert(idx, c.clone());
idx += 1;
}
}
}
fn replace(v: &Id, q: &implicit::Expr) -> Option<implicit::Expr> {
use implicit::Expr as I;
let r = match *q {
I::Var(ref id) => I::Var(id.clone()),
I::Const(ref c) => I::Const(*c),
I::Op2(ref op, ref l, ref r) => I::Op2(*op, box otry!(replace(v, l)), box otry!(replace(v, r))),
I::Fun(ref id, ref e) => I::Fun(id.clone(), box otry!(replace(v, e))),
I::App(ref e1, ref e2) => I::App(box otry!(replace(v, e1)), box otry!(replace(v, e2))),
I::If(ref e1, ref e2, ref e3) => I::If(box otry!(replace(v, e1)), box otry!(replace(v, e2)), box otry!(replace(v, e3))),
I::Let(ref id, ref e1, ref e2) => I::Let(id.clone(), box otry!(replace(v, e1)), box otry!(replace(v, e2))),
I::Fix(ref id, ref e) => I::Fix(id.clone(), box otry!(replace(v, e))),
I::MkArray(ref sz, ref n) => I::MkArray(box otry!(replace(v, sz)), box otry!(replace(v, n))),
I::GetArray(ref id, ref idx) => I::GetArray(box otry!(replace(v, id)), box otry!(replace(v, idx))),
I::SetArray(ref id, ref idx, ref var) => I::SetArray(box otry!(replace(v, id)), box otry!(replace(v, idx)), box otry!(replace(v, var))),
I::V => I::V,
I::Star => I::Var(v.clone()),
};
Some(r)
}
// instantiate Q for k w/ alpha-renamed variables that are in-scope
// and of the right shape at the location of the well-formedness
// constraint
fn qstar(_: &Id, in_scope: &HashSet<Id>, _: &HashMap<Id, explicit::Type>, qset: &[implicit::Expr]) -> HashSet<implicit::Expr> {
let mut qstar: HashSet<implicit::Expr> = HashSet::new();
for tmpl in qset {
for v in in_scope.iter() {
match replace(v, tmpl) {
Some(e) => {
qstar.insert(e);
},
None => {
println!("not used:\t{:?}", tmpl);
},
};
}
}
qstar
}
fn build_a(constraints: &HashMap<Idx, Constraint>, env: &HashMap<Id, explicit::Type>, q: &[implicit::Expr]) -> HashMap<Id, KInfo> {
let mut a: HashMap<Id, KInfo> = HashMap::new();
for (_, c) in constraints.iter() {
if let &((_, _), C::WellFormed(ref ty)) = c {
if let &box T::Ref(ref in_scope, _, box Liquid::K(ref id, _)) = ty {
// TODO: subst?
let all_qs = qstar(id, in_scope, env, q);
a.insert(id.clone(), KInfo{
all_qs: all_qs.clone(),
curr_qs: all_qs,
});
} else {
panic!("WellFormed with E doesn't make sense: {:?}.", ty)
}
}
// TODO: track antecedents that reference each k
}
a
}
fn solve(constraints: &LinkedList<Implication>, a: &mut HashMap<Id, KInfo>) -> Result<HashMap<Id, KInfo>> {
for &(ref path, ref a, ref p) in constraints.iter() {
println!("C\t{:?}\n\t\t{:?}\n\t\t\t{:?}", path, a, p);
};
let mut z3 = Z3::new_with_binary("./z3");
// Defining an instance of Z3 solver
let mut solver = SMTLib2::new(Some(LIA));
solver.set_logic(&mut z3);
// Defining the symbolic vars x & y
let x = solver.new_var(Some("x"), integer::Sorts::Int);
let y = solver.new_var(Some("y"), integer::Sorts::Int);
// Defining the integer constants
let int5 = solver.new_const(integer::OpCodes::Const(5));
let int1 = solver.new_const(integer::OpCodes::Const(1));
// Defining the assert conditions
let cond1 = solver.assert(integer::OpCodes::Add, &[x, y]);
let _ = solver.assert(integer::OpCodes::Gt, &[cond1, int5]);
let _ = solver.assert(integer::OpCodes::Gt, &[x, int1]);
let _ = solver.assert(integer::OpCodes::Gt, &[y, int1]);
let (result, _) = solver.solve(&mut z3, false);
match result {
Ok(result) => {
println!("x: {}; y: {}", result[&x], result[&y]);
}
Err(e) => println!("No solutions for x and y found for given set of constraints ({:?})", e),
}
Ok(a.clone())
}
pub fn infer(expr: &Expr, env: &HashMap<Id, explicit::Type>, q: &[implicit::Expr]) -> Result<HashMap<Id, HashSet<implicit::Expr>>> {
let mut k_env = KEnv::new(env);
let (_, constraint_list) = cons(&mut k_env, &Env::new(env), expr);
let mut constraints: HashMap<Idx, Constraint> = HashMap::new();
split(&mut constraints, &constraint_list);
let mut a = build_a(&constraints, env, q);
let mut all_constraints: LinkedList<Implication> = LinkedList::new();
for (_, c) in constraints.iter() {
if let &((_, ref path), C::Subtype(ref p, ref e)) = c {
all_constraints.push_back((path.clone(), p.clone(), e.clone()));
}
}
let min_a = solve(&all_constraints, &mut a)?;
let mut res = HashMap::new();
for (k, v) in min_a {
res.insert(k, v.curr_qs.clone());
}
Ok(res)
}
test implication in z3
#[cfg(test)]
use std;
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::LinkedList;
use common;
use implicit;
use explicit;
use implicit::Expr;
use common::{Id, Result};
use refined::{Base, T};
use rustproof_libsmt::backends::smtlib2::*;
use rustproof_libsmt::backends::backend::*;
use rustproof_libsmt::backends::z3::Z3;
use rustproof_libsmt::theories::{core, integer};
use rustproof_libsmt::logics::lia::LIA;
macro_rules! otry {
($expr:expr) => (match $expr {
Some(val) => val,
None => return None,
})
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum C {
WellFormed(Box<Type>),
Subtype(Box<Type>, Box<Type>),
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Liquid {
E(implicit::Expr),
K(Id, Box<LinkedList<Expr>>), // list of pending substitutions
}
pub type Type = T<Liquid>;
pub type Implication = (LinkedList<Expr>, Box<Type>, Box<Type>);
pub type Idx = i32; // constraint index
pub type Constraint = ((HashSet<Id>, LinkedList<Expr>), C); // Boolean valued expressions & their environments
#[derive(Debug, Clone)]
pub struct KInfo {
all_qs: HashSet<implicit::Expr>,
curr_qs: HashSet<implicit::Expr>,
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Env {
shape: HashMap<Id, explicit::Type>,
refined_env: HashMap<Id, Type>,
path_constraints: LinkedList<Expr>,
}
impl Env {
fn new(shape: &HashMap<Id, explicit::Type>) -> Env {
Env {
shape: shape.clone(),
refined_env: HashMap::new(),
path_constraints: LinkedList::new(),
}
}
fn get(&self, s: &Id) -> Type {
match self.refined_env.get(s) {
Some(ty) => ty.clone(),
None => panic!("env.get('{}' missing ({:?})", s, self.in_scope()),
}
}
fn insert(&mut self, s: &Id, ty: &Type) {
self.refined_env.insert(s.clone(), ty.clone());
}
fn add_constraint(&mut self, e: &Expr) {
self.path_constraints.push_back(e.clone())
}
fn in_scope(&self) -> HashSet<Id> {
let keys: HashSet<_> = self.refined_env.keys().cloned().collect();
return keys
}
fn snapshot(&self) -> (HashSet<Id>, LinkedList<Expr>) {
(self.in_scope(), self.path_constraints.clone())
}
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct KEnv {
shape: HashMap<Id, explicit::Type>,
env_id: String,
next_id: i32,
}
fn hm_shape(env: &HashMap<Id, explicit::Type>, expr: &Expr) -> explicit::Type {
use implicit::Expr::*;
use explicit::Type::*;
match *expr {
Var(ref id) => env.get(id).unwrap().clone(),
Const(common::Const::Int(_)) => TInt,
Const(common::Const::Bool(_)) => TBool,
Op2(op, _, _) => explicit::opty(op),
Fun(ref id, ref e) => TFun(box env.get(id).unwrap().clone(), box hm_shape(env, e)),
App(ref e1, _) => {
if let TFun(_, e2) = hm_shape(env, e1) {
*e2
} else {
panic!("expected TFun, not {:?}", expr);
}
}
If(_, ref e2, _) => hm_shape(env, e2),
Let(_, _, ref e2) => hm_shape(env, e2),
Fix(_, ref e) => hm_shape(env, e),
MkArray(_, _) => TIntArray,
GetArray(_, _) => TInt,
SetArray(_, _, _) => TIntArray,
Star => panic!("star found when it shouldn't be"),
V => panic!("v found when it shouldn't be"),
}
}
impl KEnv {
fn new(shape: &HashMap<Id, explicit::Type>) -> KEnv {
KEnv {
shape: shape.clone(),
env_id: String::from("κ"), // ν
next_id: 0,
}
}
fn fresh_ty(&mut self, env: &Env, ty: &explicit::Type) -> Type {
let id = self.next_id;
self.next_id += 1;
let base = match *ty {
explicit::Type::TInt => Base::Int,
explicit::Type::TBool => Base::Bool,
_ => panic!("FIXME: handle {:?}", ty),
};
let k = Liquid::K(format!("!k{}", id), box LinkedList::new());
T::Ref(env.in_scope(), base, box k)
}
fn fresh(&mut self, env: &Env, expr: &Expr) -> Type {
if let &Expr::Fun(ref id, ref e) = expr {
let t1 = &self.shape.get(id).unwrap().clone();
let fx = self.fresh_ty(env, t1);
let f = self.fresh(env, e);
T::Fun(id.clone(), box fx, box f)
} else {
let ty = hm_shape(&env.shape, expr);
self.fresh_ty(env, &ty)
}
}
}
fn ty<'a>(_: &mut KEnv, _: &Env, c: &common::Const) -> Type {
use common::Op2;
use common::Const::*;
use self::Liquid::E;
let base = match *c {
Int(_) => Base::Int,
Bool(_) => Base::Bool,
};
println!("ty({:?})", base);
// {ν : int | ν = 3 }
let eq = E(Expr::Op2(Op2::Eq, box Expr::V, box Expr::Const(*c)));
T::Ref(HashSet::new(), base, box eq)
}
fn base(ty: &Type) -> Option<Base> {
match *ty {
T::Ref(_, b, _) => Some(b),
_ => None,
}
}
fn subst(_: &Id, _: &Expr, ty: &Type) -> Type {
println!("TODO: subst");
ty.clone()
}
pub fn cons<'a>(k_env: &mut KEnv, env: &Env, expr: &Expr) -> (Type, LinkedList<Constraint>) {
use implicit::Expr::*;
use common::Op2::Eq;
match *expr {
Var(ref id) => {
let ty: Type = if let Some(b) = base(&env.get(id)) {
let eq = Op2(Eq, box V, box Var(id.clone()));
T::Ref(env.in_scope(), b, box Liquid::E(eq))
} else {
println!("{} not base -- using just env ({:?})", id, env.get(id));
env.get(id)
};
(ty, LinkedList::new())
}
Const(ref c) => {
(ty(k_env, &env, c), LinkedList::new())
}
Op2(op, ref e1, ref e2) => {
let (_, mut c1) = cons(k_env, env, e1);
let (_, mut c2) = cons(k_env, env, e2);
c1.append(&mut c2);
let ty = explicit::opty(op);
let base = match ty {
explicit::Type::TInt => Base::Int,
explicit::Type::TBool => Base::Bool,
_ => panic!("FIXME: handle {:?}", ty),
};
let eq = Op2(Eq, box V, box expr.clone());
let f = T::Ref(env.in_scope(), base, box Liquid::E(eq));
(f, c1)
}
If(ref e1, ref e2, ref e3) => {
let mut env_t = env.clone();
let mut env_f = env.clone();
env_t.add_constraint(&e1.clone());
env_f.add_constraint(&App(box Var(String::from("not")), e1.clone()));
let f = k_env.fresh(&env, expr);
// type of e1 has already been verified to be a bool by HM
let (_, mut c1) = cons(k_env, &env, e1);
let (f2, mut c2) = cons(k_env, &env_t, e2);
let (f3, mut c3) = cons(k_env, &env_f, e3);
c1.append(&mut c2);
c1.append(&mut c3);
// Γ ⊢ (f)
c1.push_back((env.snapshot(), C::WellFormed(box f.clone())));
// Γ,e1 ⊢ (f2 <: f)
c1.push_back((env_t.snapshot(), C::Subtype(box f2.clone(), box f.clone())));
// Γ,¬e1 ⊢ (f3 <: f)
c1.push_back((env_f.snapshot(), C::Subtype(box f3.clone(), box f.clone())));
(f, c1)
}
Fun(ref x, ref e) => {
let mut env = env.clone();
let fx = k_env.fresh(&env, &Var(x.clone()));
env.insert(x, &fx);
let f = k_env.fresh(&env, e);
let (fe, mut c) = cons(k_env, &env, e);
// Γ ⊢ (x:fx → f)
c.push_back((env.snapshot(), C::WellFormed(box f.clone())));
// Γ,x:fx ⊢ (fe <: f)
c.push_back((env.snapshot(), C::Subtype(box fe.clone(), box f.clone())));
(f, c)
}
Fix(ref x, ref e) => {
// const w/ ∀α.(α→α)→α
let mut env = env.clone();
let fx = k_env.fresh(&env, e);
env.insert(x, &fx);
// FIXME
cons(k_env, &env, e)
}
Let(ref id, ref e1, ref e2) => {
let mut env = env.clone();
let f = k_env.fresh(&env, expr);
let (f1, mut c1) = cons(k_env, &env, e1);
env.insert(id, &f1);
let (f2, mut c2) = cons(k_env, &env, e2);
c1.append(&mut c2);
// Γ ⊢ (f)
c1.push_back((env.snapshot(), C::WellFormed(box f.clone())));
// Γ,x:f1 ⊢ (f2 <: f)
c1.push_back((env.snapshot(), C::Subtype(box f2.clone(), box f.clone())));
(f, c1)
}
App(ref e1, ref e2) => {
let (f1, mut c1) = cons(k_env, env, e1);
println!("## {:?}\t:\t{:?}", e1, f1);
let (f2, mut c2) = cons(k_env, env, e2);
c1.append(&mut c2);
if let T::Fun(ref x, ref fx, ref f) = f1 {
let f = subst(x, e2, f);
// Γ ⊢ (f2 <: fx)
c1.push_back((env.snapshot(), C::Subtype(box f2.clone(), fx.clone())));
return (f, c1);
} else {
panic!("expected TFun, not {:?}", f1);
}
// let (x:Fx → F, C1) = Cons(Γ, e1) in
// let (F
// 0
// x, C2) = Cons(Γ, e2) in
// ([e2/x]F, C1 ∪ C2 ∪ {Γ ` F
// 0
// x <: Fx})
}
_ => {
println!("unhandled {:?}", expr);
(T::Ref(env.in_scope(), Base::Bool, box Liquid::E(Const(common::Const::Bool(true)))), LinkedList::new())
}
}
}
fn split(map: &mut HashMap<Idx, Constraint>, constraints: &LinkedList<Constraint>) {
let mut idx = 1;
for c in constraints.iter() {
if let &((ref scope, ref pathc), C::Subtype(box T::Fun(_, ref tx1, ref t1), box T::Fun(ref x2, ref tx2, ref t2))) = c {
let mut contra_cs: LinkedList<Constraint> = LinkedList::new();
contra_cs.push_back(((scope.clone(), pathc.clone()), C::Subtype(tx2.clone(), tx1.clone())));
let mut rscope = scope.clone();
rscope.insert(x2.clone());
contra_cs.push_back(((rscope, pathc.clone()), C::Subtype(t1.clone(), t2.clone())));
// recurse
split(map, &contra_cs);
} else if let &((ref scope, ref pathc), C::WellFormed(box T::Fun(ref id, _, ref t))) = c {
let mut wf_cs: LinkedList<Constraint> = LinkedList::new();
let mut scope = scope.clone();
scope.insert(id.clone());
wf_cs.push_back(((scope, pathc.clone()), C::WellFormed(t.clone())));
// recurse
split(map, &wf_cs);
} else {
map.insert(idx, c.clone());
idx += 1;
}
}
}
fn replace(v: &Id, q: &implicit::Expr) -> Option<implicit::Expr> {
use implicit::Expr as I;
let r = match *q {
I::Var(ref id) => I::Var(id.clone()),
I::Const(ref c) => I::Const(*c),
I::Op2(ref op, ref l, ref r) => I::Op2(*op, box otry!(replace(v, l)), box otry!(replace(v, r))),
I::Fun(ref id, ref e) => I::Fun(id.clone(), box otry!(replace(v, e))),
I::App(ref e1, ref e2) => I::App(box otry!(replace(v, e1)), box otry!(replace(v, e2))),
I::If(ref e1, ref e2, ref e3) => I::If(box otry!(replace(v, e1)), box otry!(replace(v, e2)), box otry!(replace(v, e3))),
I::Let(ref id, ref e1, ref e2) => I::Let(id.clone(), box otry!(replace(v, e1)), box otry!(replace(v, e2))),
I::Fix(ref id, ref e) => I::Fix(id.clone(), box otry!(replace(v, e))),
I::MkArray(ref sz, ref n) => I::MkArray(box otry!(replace(v, sz)), box otry!(replace(v, n))),
I::GetArray(ref id, ref idx) => I::GetArray(box otry!(replace(v, id)), box otry!(replace(v, idx))),
I::SetArray(ref id, ref idx, ref var) => I::SetArray(box otry!(replace(v, id)), box otry!(replace(v, idx)), box otry!(replace(v, var))),
I::V => I::V,
I::Star => I::Var(v.clone()),
};
Some(r)
}
// instantiate Q for k w/ alpha-renamed variables that are in-scope
// and of the right shape at the location of the well-formedness
// constraint
fn qstar(_: &Id, in_scope: &HashSet<Id>, _: &HashMap<Id, explicit::Type>, qset: &[implicit::Expr]) -> HashSet<implicit::Expr> {
let mut qstar: HashSet<implicit::Expr> = HashSet::new();
for tmpl in qset {
for v in in_scope.iter() {
match replace(v, tmpl) {
Some(e) => {
qstar.insert(e);
},
None => {
println!("not used:\t{:?}", tmpl);
},
};
}
}
qstar
}
fn build_a(constraints: &HashMap<Idx, Constraint>, env: &HashMap<Id, explicit::Type>, q: &[implicit::Expr]) -> HashMap<Id, KInfo> {
let mut a: HashMap<Id, KInfo> = HashMap::new();
for (_, c) in constraints.iter() {
if let &((_, _), C::WellFormed(ref ty)) = c {
if let &box T::Ref(ref in_scope, _, box Liquid::K(ref id, _)) = ty {
// TODO: subst?
let all_qs = qstar(id, in_scope, env, q);
a.insert(id.clone(), KInfo{
all_qs: all_qs.clone(),
curr_qs: all_qs,
});
} else {
panic!("WellFormed with E doesn't make sense: {:?}.", ty)
}
}
// TODO: track antecedents that reference each k
}
a
}
// whether the conjunction of all p implies the conjunction of all q
fn implication_holds(p: &[implicit::Expr], q: &[implicit::Expr]) -> bool {
let mut z3 = Z3::new_with_binary("./z3");
let mut solver = SMTLib2::new(Some(LIA));
solver.set_logic(&mut z3);
// Defining the symbolic vars x & y
let x = solver.new_var(Some("x"), integer::Sorts::Int);
let y = solver.new_var(Some("y"), integer::Sorts::Int);
let v = solver.new_var(Some("v"), integer::Sorts::Int);
// Defining the integer constants
//let int0 = solver.new_const(integer::OpCodes::Const(0));
let p1 = solver.assert(integer::OpCodes::Lte, &[x, y]);
let p2 = solver.assert(integer::OpCodes::Cmp, &[v, y]);
let p_all = solver.assert(core::OpCodes::And, &[p1, p2]);
let k1 = solver.assert(integer::OpCodes::Gte, &[v, x]);
let k2 = solver.assert(integer::OpCodes::Gte, &[v, y]);
let k_all = solver.assert(core::OpCodes::And, &[k1, k2]);
let imply = solver.assert(core::OpCodes::Imply, &[p_all, k_all]);
let _ = solver.assert(core::OpCodes::Not, &[imply]);
let (_, sat) = solver.solve(&mut z3, false);
match sat {
SMTRes::Unsat(_, _) => true,
_ => false,
}
}
fn solve(constraints: &LinkedList<Implication>, a: &mut HashMap<Id, KInfo>) -> Result<HashMap<Id, KInfo>> {
for &(ref path, ref a, ref p) in constraints.iter() {
println!("C\t{:?}\n\t\t{:?}\n\t\t\t{:?}", path, a, p);
};
Ok(a.clone())
}
pub fn infer(expr: &Expr, env: &HashMap<Id, explicit::Type>, q: &[implicit::Expr]) -> Result<HashMap<Id, HashSet<implicit::Expr>>> {
let mut k_env = KEnv::new(env);
let (_, constraint_list) = cons(&mut k_env, &Env::new(env), expr);
let mut constraints: HashMap<Idx, Constraint> = HashMap::new();
split(&mut constraints, &constraint_list);
let mut a = build_a(&constraints, env, q);
let mut all_constraints: LinkedList<Implication> = LinkedList::new();
for (_, c) in constraints.iter() {
if let &((_, ref path), C::Subtype(ref p, ref e)) = c {
all_constraints.push_back((path.clone(), p.clone(), e.clone()));
}
}
let min_a = solve(&all_constraints, &mut a)?;
let mut res = HashMap::new();
for (k, v) in min_a {
res.insert(k, v.curr_qs.clone());
}
Ok(res)
}
#[test]
fn z3_works() {
let mut z3 = Z3::new_with_binary("./z3");
let mut solver = SMTLib2::new(Some(LIA));
solver.set_logic(&mut z3);
// Defining the symbolic vars x & y
let x = solver.new_var(Some("x"), integer::Sorts::Int);
let y = solver.new_var(Some("y"), integer::Sorts::Int);
let v = solver.new_var(Some("v"), integer::Sorts::Int);
//let int0 = solver.new_const(integer::OpCodes::Const(0));
let p1 = solver.assert(integer::OpCodes::Lte, &[x, y]);
let p2 = solver.assert(integer::OpCodes::Cmp, &[v, y]);
let p_all = solver.assert(core::OpCodes::And, &[p1, p2]);
let k1 = solver.assert(integer::OpCodes::Gte, &[v, x]);
let k2 = solver.assert(integer::OpCodes::Gte, &[v, y]);
let k_all = solver.assert(core::OpCodes::And, &[k1, k2]);
let imply = solver.assert(core::OpCodes::Imply, &[p_all, k_all]);
let _ = solver.assert(core::OpCodes::Not, &[imply]);
let (_, sat) = solver.solve(&mut z3, false);
match sat {
SMTRes::Unsat(_, _) => {}
_ => {
die!("expected unsat, not {:?}", sat);
}
}
}
|
//! Macro combinators
//!
//! Macros are used to make combination easier,
//! since they often do not depend on the type
//! of the data they manipulate or return.
//!
//! There is a trick to make them easier to assemble,
//! combinators are defined like this:
//!
//! ```ignore
//! macro_rules! tag (
//! ($i:expr, $inp: expr) => (
//! {
//! ...
//! }
//! );
//! );
//! ```
//!
//! But when used in other combinators, are Used
//! like this:
//!
//! ```ignore
//! named!(my_function, tag!("abcd"));
//! ```
//!
//! Internally, other combinators will rewrite
//! that call to pass the input as first argument:
//!
//! ```ignore
//! macro_rules! named (
//! ($name:ident, $submac:ident!( $($args:tt)* )) => (
//! fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<'a,&[u8], &[u8]> {
//! $submac!(i, $($args)*)
//! }
//! );
//! );
//! ```
//!
//! If you want to call a combinator directly, you can
//! do it like this:
//!
//! ```ignore
//! let res = { tag!(input, "abcd"); }
//! ```
//!
//! Combinators must have a specific variant for
//! non-macro arguments. Example: passing a function
//! to take_while! instead of another combinator.
//!
//! ```ignore
//! macro_rules! take_while(
//! ($input:expr, $submac:ident!( $($args:tt)* )) => (
//! {
//! ...
//! }
//! );
//!
//! // wrap the function in a macro to pass it to the main implementation
//! ($input:expr, $f:expr) => (
//! take_while!($input, call!($f));
//! );
//! );
//!
/// Wraps a parser in a closure
#[macro_export]
macro_rules! closure (
($ty:ty, $submac:ident!( $($args:tt)* )) => (
|i: $ty| { $submac!(i, $($args)*) }
);
($submac:ident!( $($args:tt)* )) => (
|i| { $submac!(i, $($args)*) }
);
);
/// Makes a function from a parser combination
///
/// The type can be set up if the compiler needs
/// more information
///
/// ```ignore
/// named!(my_function( &[u8] ) -> &[u8], tag!("abcd"));
/// // first type parameter is input, second is output
/// named!(my_function<&[u8], &[u8]>, tag!("abcd"));
/// // will have &[u8] as input type, &[u8] as output type
/// named!(my_function, tag!("abcd"));
/// // will use &[u8] as input type (use this if the compiler
/// // complains about lifetime issues
/// named!(my_function<&[u8]>, tag!("abcd"));
/// //prefix them with 'pub' to make the functions public
/// named!(pub my_function, tag!("abcd"));
/// ```
#[macro_export]
macro_rules! named (
($name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> $crate::IResult<$i,$o> {
$submac!(i, $($args)*)
}
);
($name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> $crate::IResult<$i, $o> {
$submac!(i, $($args)*)
}
);
($name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name<'a>( i: &'a[u8] ) -> $crate::IResult<&'a [u8], $o> {
$submac!(i, $($args)*)
}
);
($name:ident<$life:item,$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name<$life>( i: $i ) -> $crate::IResult<$life, $i, $o> {
$submac!(i, $($args)*)
}
);
($name:ident, $submac:ident!( $($args:tt)* )) => (
fn $name( i: &[u8] ) -> $crate::IResult<&[u8], &[u8]> {
$submac!(i, $($args)*)
}
);
(pub $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: $i ) -> $crate::IResult<$i,$o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: $i ) -> $crate::IResult<$i, $o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident, $submac:ident!( $($args:tt)* )) => (
pub fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<&[u8], &[u8]> {
$submac!(i, $($args)*)
}
);
);
/// Used to wrap common expressions and function as macros
#[macro_export]
macro_rules! call (
($i:expr, $fun:expr) => ( $fun( $i ) );
($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) );
);
/// emulate function currying: `apply!(my_function, arg1, arg2, ...)` becomes `my_function(input, arg1, arg2, ...)`
///
/// Supports up to 6 arguments
#[macro_export]
macro_rules! apply (
($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) );
);
/// Prevents backtracking if the child parser fails
///
/// This parser will do an early return instead of sending
/// its result to the parent parser.
///
/// If another `error!` combinator is present in the parent
/// chain, the error will be wrapped and another early
/// return will be made.
///
/// This makes it easy to build report on which parser failed,
/// where it failed in the input, and the chain of parsers
/// that led it there.
///
/// Additionally, the error chain contains number identifiers
/// that can be matched to provide useful error messages.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use std::collections;
/// # use nom::IResult::Error;
/// # use nom::Err::{Position,NodePosition};
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(err_test, alt!(
/// tag!("abcd") |
/// preceded!(tag!("efgh"), error!(ErrorKind::Custom(42),
/// chain!(
/// tag!("ijkl") ~
/// res: error!(ErrorKind::Custom(128), tag!("mnop")) ,
/// || { res }
/// )
/// )
/// )
/// ));
/// let a = &b"efghblah"[..];
/// let b = &b"efghijklblah"[..];
/// let c = &b"efghijklmnop"[..];
///
/// let blah = &b"blah"[..];
///
/// let res_a = err_test(a);
/// let res_b = err_test(b);
/// let res_c = err_test(c);
/// assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
/// assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..],
/// Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah))))))
/// );
/// # }
/// ```
///
#[macro_export]
macro_rules! error (
($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => (
{
let cl = || {
$submac!($i, $($args)*)
};
match cl() {
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => {
return $crate::IResult::Error($crate::Err::NodePosition($code, $i, Box::new(e)))
}
}
}
);
($i:expr, $code:expr, $f:expr) => (
error!($i, $code, call!($f));
);
);
/// Add an error if the child parser fails
///
/// While error! does an early return and avoids backtracking,
/// add_error! backtracks normally. It just provides more context
/// for an error
///
#[macro_export]
macro_rules! add_error (
($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => {
$crate::IResult::Error($crate::Err::NodePosition($code, $i, Box::new(e)))
}
}
}
);
($i:expr, $code:expr, $f:expr) => (
add_error!($i, $code, call!($f));
);
);
/// replaces a `Incomplete` returned by the child parser
/// with an `Error`
///
#[macro_export]
macro_rules! complete (
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(_) => {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Complete, $i))
},
}
}
);
($i:expr, $f:expr) => (
complete!($i, call!($f));
);
);
/// `flat_map!(R -> IResult<R,S>, S -> IResult<S,T>) => R -> IResult<R, T>`
///
/// combines a parser R -> IResult<R,S> and
/// a parser S -> IResult<S,T> to return another
/// parser R -> IResult<R,T>
#[macro_export]
macro_rules! flat_map(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
$crate::IResult::Error(e) => {
let err = match e {
$crate::Err::Code(k) | $crate::Err::Node(k, _) | $crate::Err::Position(k, _) | $crate::Err::NodePosition(k, _, _) => {
$crate::Err::Position(k, $i)
}
};
$crate::IResult::Error(err)
},
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(ref i2)) => $crate::IResult::Incomplete($crate::Needed::Size(*i2)),
$crate::IResult::Done(_, o2) => $crate::IResult::Done(i, o2)
}
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
flat_map!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $g:expr) => (
flat_map!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
flat_map!($i, call!($f), $submac!($($args)*));
);
);
/// `map!(I -> IResult<I,O>, O -> P) => I -> IResult<I, P>`
/// maps a function on the result of a parser
#[macro_export]
macro_rules! map(
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! map_impl(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, $submac2!(o, $($args2)*))
}
}
);
);
/// `map_res!(I -> IResult<I,O>, O -> Result<P>) => I -> IResult<I, P>`
/// maps a function returning a Result on the output of a parser
#[macro_export]
macro_rules! map_res (
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_res_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_res_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_res_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_res_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! map_res_impl (
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
Ok(output) => $crate::IResult::Done(i, output),
Err(_) => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::MapRes, $i))
}
}
}
);
);
/// `map_res!(I -> IResult<I,O>, O -> Option<P>) => I -> IResult<I, P>`
/// maps a function returning an Option on the output of a parser
#[macro_export]
macro_rules! map_opt (
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_opt_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_opt_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_opt_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_opt_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! map_opt_impl (
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
Some(output) => $crate::IResult::Done(i, output),
None => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::MapOpt, $i))
}
}
}
);
);
/// `expr_res!(Result<E,O>) => I -> IResult<I, O>`
/// evaluate an expression that returns a Result<T,E> and returns a IResult::Done(I,T) if Ok
///
/// See expr_opt for an example
#[macro_export]
macro_rules! expr_res (
($i:expr, $e:expr) => (
{
match $e {
Ok(output) => $crate::IResult::Done($i, output),
Err(_) => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::ExprRes, $i))
}
}
);
);
/// `expr_opt!(Option<O>) => I -> IResult<I, O>`
/// evaluate an expression that returns a Option<T> and returns a IResult::Done(I,T) if Ok
///
/// Useful when doing computations in a chain
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{self, Done, Error};
/// # use nom::Err::Position;
/// # use nom::{be_u8,ErrorKind};
///
/// fn take_add(input:&[u8], size: u8) -> IResult<&[u8],&[u8]> {
/// chain!(input,
/// sz: be_u8 ~
/// length: expr_opt!(size.checked_add(sz)) ~ // checking for integer overflow (returns an Option)
/// data: take!(length) ,
/// ||{ data }
/// )
/// }
/// # fn main() {
/// let arr1 = [1, 2, 3, 4, 5];
/// let r1 = take_add(&arr1[..], 1);
/// assert_eq!(r1, Done(&[4,5][..], &[2,3][..]));
///
/// let arr2 = [0xFE, 2, 3, 4, 5];
/// // size is overflowing
/// let r1 = take_add(&arr2[..], 42);
/// assert_eq!(r1, Error(Position(ErrorKind::ExprOpt,&[2,3,4,5][..])));
/// # }
/// ```
#[macro_export]
macro_rules! expr_opt (
($i:expr, $e:expr) => (
{
match $e {
Some(output) => $crate::IResult::Done($i, output),
None => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::ExprOpt, $i))
}
}
);
);
/// `chain!(I->IResult<I,A> ~ I->IResult<I,B> ~ ... I->IResult<I,X> , || { return O } ) => I -> IResult<I, O>`
/// chains parsers and assemble the results through a closure
/// the input type I must implement nom::InputLength
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{self, Done, Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// #[derive(PartialEq,Eq,Debug)]
/// struct B {
/// a: u8,
/// b: Option<u8>
/// }
///
/// named!(y, tag!("efgh"));
///
/// fn ret_int(i:&[u8]) -> IResult<&[u8], u8> { Done(i, 1) }
/// named!(ret_y<&[u8], u8>, map!(y, |_| 1)); // return 1 if the "efgh" tag is found
///
/// named!(z<&[u8], B>,
/// chain!(
/// tag!("abcd") ~
/// aa: ret_int ~ // the result of that parser will be used in the closure
/// tag!("abcd")? ~ // this parser is optional
/// bb: ret_y? , // the result of that parser is an option
/// ||{B{a: aa, b: bb}}
/// )
/// );
///
/// # fn main() {
/// // the first "abcd" tag is not present, we have an error
/// let r1 = z(&b"efgh"[..]);
/// assert_eq!(r1, Error(Position(ErrorKind::Tag,&b"efgh"[..])));
///
/// // everything is present, everything is parsed
/// let r2 = z(&b"abcdabcdefgh"[..]);
/// assert_eq!(r2, Done(&b""[..], B{a: 1, b: Some(1)}));
///
/// // the second "abcd" tag is optional
/// let r3 = z(&b"abcdefgh"[..]);
/// assert_eq!(r3, Done(&b""[..], B{a: 1, b: Some(1)}));
///
/// // the result of ret_y is optional, as seen in the B structure
/// let r4 = z(&b"abcdabcdwxyz"[..]);
/// assert_eq!(r4, Done(&b"wxyz"[..], B{a: 1, b: None}));
/// # }
/// ```
#[macro_export]
macro_rules! chain (
($i:expr, $($rest:tt)*) => (
{
use $crate::InputLength;
chaining_parser!($i, 0usize, $($rest)*)
}
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! chaining_parser (
($i:expr, $consumed:expr, $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,_) => {
chaining_parser!(i, $consumed + (($i).input_len() - i.input_len()), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let input = if let $crate::IResult::Done(i,_) = res {
i
} else {
$i
};
chaining_parser!(input, $consumed + (($i).input_len() - input.input_len()), $($rest)*)
}
});
($i:expr, $consumed:expr, $field:ident : $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, $field: call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let $field = o;
chaining_parser!(i, $consumed + (($i).input_len() - i.input_len()), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, mut $field:ident : $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, mut $field: call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let mut $field = o;
chaining_parser!(i, $consumed + ($i).input_len() - i.input_len(), $($rest)*)
}
}
);
($i:expr, $consumed:expr, $field:ident : $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, $field : call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let ($field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o),i)
} else {
(None,$i)
};
chaining_parser!(input, $consumed + ($i).input_len() - input.input_len(), $($rest)*)
}
});
($i:expr, $consumed:expr, mut $field:ident : $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, mut $field : call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let (mut $field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o),i)
} else {
(None,$i)
};
chaining_parser!(input, $consumed + ($i).input_len() - input.input_len(), $($rest)*)
}
});
// ending the chain
($i:expr, $consumed:expr, $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, call!($e), $assemble);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,_) => {
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, $e:ident ?, $assemble:expr) => (
chaining_parser!($i, $consumed, call!($e) ?, $assemble);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ?, $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let input = if let $crate::IResult::Done(i,_) = res {
i
} else {
$i
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, $field:ident : $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, $field: call!($e), $assemble);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let $field = o;
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, mut $field:ident : $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, mut $field: call!($e), $assemble);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let mut $field = o;
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, $field:ident : $e:ident ? , $assemble:expr) => (
chaining_parser!($i, $consumed, $field : call!($e) ? , $assemble);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? , $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let ($field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o), i)
} else {
(None, $i)
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, mut $field:ident : $e:ident ? , $assemble:expr) => (
chaining_parser!($i, $consumed, $field : call!($e) ? , $assemble);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ? , $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let (mut $field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o), i)
} else {
(None, $i)
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, $assemble:expr) => (
$crate::IResult::Done($i, $assemble())
)
);
/// `alt!(I -> IResult<I,O> | I -> IResult<I,O> | ... | I -> IResult<I,O> ) => I -> IResult<I, O>`
/// try a list of parsers, return the result of the first successful one
///
/// If one of the parser returns Incomplete, alt will return Incomplete, to retry
/// once you get more input. Note that it is better for performance to know the
/// minimum size of data you need before you get into alt.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!( test, alt!( tag!( "abcd" ) | tag!( "efgh" ) ) );
/// let r1 = test(b"abcdefgh");
/// assert_eq!(r1, Done(&b"efgh"[..], &b"abcd"[..]));
/// let r2 = test(&b"efghijkl"[..]);
/// assert_eq!(r2, Done(&b"ijkl"[..], &b"efgh"[..]));
/// # }
/// ```
///
/// There is another syntax for alt allowing a block to manipulate the result:
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// #[derive(Debug,PartialEq,Eq)]
/// enum Tagged {
/// Abcd,
/// Efgh,
/// Took(usize)
/// }
/// named!(test<Tagged>, alt!(
/// tag!("abcd") => { |_| Tagged::Abcd }
/// | tag!("efgh") => { |_| Tagged::Efgh }
/// | take!(5) => { |res: &[u8]| Tagged::Took(res.len()) } // the closure takes the result as argument if the parser is successful
/// ));
/// let r1 = test(b"abcdefgh");
/// assert_eq!(r1, Done(&b"efgh"[..], Tagged::Abcd));
/// let r2 = test(&b"efghijkl"[..]);
/// assert_eq!(r2, Done(&b"ijkl"[..], Tagged::Efgh));
/// let r3 = test(&b"mnopqrst"[..]);
/// assert_eq!(r3, Done(&b"rst"[..], Tagged::Took(5)));
/// # }
/// ```
#[macro_export]
macro_rules! alt (
($i:expr, $($rest:tt)*) => (
{
alt_parser!($i, $($rest)*)
}
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! alt_parser (
($i:expr, $e:ident | $($rest:tt)*) => (
alt_parser!($i, call!($e) | $($rest)*);
);
($i:expr, $subrule:ident!( $($args:tt)*) | $($rest:tt)*) => (
{
let res = $subrule!($i, $($args)*);
match res {
$crate::IResult::Done(_,_) => res,
$crate::IResult::Incomplete(_) => res,
_ => alt_parser!($i, $($rest)*)
}
}
);
($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr } | $($rest:tt)+) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,$gen(o)),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i, $($rest)*)
}
}
}
);
($i:expr, $e:ident => { $gen:expr } | $($rest:tt)*) => (
alt_parser!($i, call!($e) => { $gen } | $($rest)*);
);
($i:expr, $e:ident => { $gen:expr }) => (
alt_parser!($i, call!($e) => { $gen });
);
($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr }) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,$gen(o)),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i)
}
}
}
);
($i:expr, $e:ident) => (
alt_parser!($i, call!($e));
);
($i:expr, $subrule:ident!( $($args:tt)*)) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,o),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i)
}
}
}
);
($i:expr) => (
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Alt,$i))
);
);
/// `switch!(I -> IResult<I,P>, P => I -> IResult<I,O> | ... | P => I -> IResult<I,O> ) => I -> IResult<I, O>`
/// choose the next parser depending on the result of the first one, if successful
///
#[macro_export]
macro_rules! switch (
($i:expr, $submac:ident!( $($args:tt)*), $($p:pat => $subrule:ident!( $($args2:tt)* ))|*) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i, o) => {
match o {
$($p => $subrule!(i, $($args2)*)),*,
_ => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Switch,i))
}
}
}
}
);
($i:expr, $e:ident, $($rest:tt)*) => (
{
switch!($i, call!(e), $($rest)*)
}
);
);
/// `opt!(I -> IResult<I,O>) => I -> IResult<I, Option<O>>`
/// make the underlying parser optional
///
/// returns an Option of the returned type. This parser never fails
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!( o<&[u8], Option<&[u8]> >, opt!( tag!( "abcd" ) ) );
///
/// let a = b"abcdef";
/// let b = b"bcdefg";
/// assert_eq!(o(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
/// assert_eq!(o(&b[..]), Done(&b"bcdefg"[..], None));
/// # }
/// ```
#[macro_export]
macro_rules! opt(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Some(o)),
$crate::IResult::Error(_) => $crate::IResult::Done($i, None),
$crate::IResult::Incomplete(_) => $crate::IResult::Done($i, None)
}
}
);
($i:expr, $f:expr) => (
opt!($i, call!($f));
);
);
/// `opt_res!(I -> IResult<I,O>) => I -> IResult<I, Result<nom::Err,O>>`
/// make the underlying parser optional
///
/// returns a Result, with Err containing the parsing error
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!( o<&[u8], Result<&[u8], nom::Err<&[u8]> > >, opt_res!( tag!( "abcd" ) ) );
///
/// let a = b"abcdef";
/// let b = b"bcdefg";
/// assert_eq!(o(&a[..]), Done(&b"ef"[..], Ok(&b"abcd"[..])));
/// assert_eq!(o(&b[..]), Done(&b"bcdefg"[..], Err(Position(ErrorKind::Tag, &b[..]))));
/// # }
/// ```
#[macro_export]
macro_rules! opt_res (
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Ok(o)),
$crate::IResult::Error(e) => $crate::IResult::Done($i, Err(e)),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
opt_res!($i, call!($f));
);
);
/// `cond!(bool, I -> IResult<I,O>) => I -> IResult<I, Option<O>>`
/// Conditional combinator
///
/// Wraps another parser and calls it if the
/// condition is met. This combinator returns
/// an Option of the return type of the child
/// parser.
///
/// This is especially useful if a parser depends
/// on the value return by a preceding parser in
/// a `chain!`.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use nom::IResult;
/// # fn main() {
/// let b = true;
/// let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8],
/// cond!( b, tag!("abcd") ))
/// );
///
/// let a = b"abcdef";
/// assert_eq!(f(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
///
/// let b2 = false;
/// let f2:Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8],
/// cond!( b2, tag!("abcd") ))
/// );
/// assert_eq!(f2(&a[..]), Done(&b"abcdef"[..], None));
/// # }
/// ```
///
#[macro_export]
macro_rules! cond(
($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => (
{
if $cond {
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Some(o)),
$crate::IResult::Error(_) => $crate::IResult::Done($i, None),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
} else {
$crate::IResult::Done($i, None)
}
}
);
($i:expr, $cond:expr, $f:expr) => (
cond!($i, $cond, call!($f));
);
);
/// `cond_reduce!(bool, I -> IResult<I,O>) => I -> IResult<I, O>`
/// Conditional combinator with error
///
/// Wraps another parser and calls it if the
/// condition is met. This combinator returns
/// an error if the condition is false
///
/// This is especially useful if a parser depends
/// on the value return by a preceding parser in
/// a `chain!`.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::{Err,ErrorKind};
/// # fn main() {
/// let b = true;
/// let f = closure!(&'static[u8],
/// cond_reduce!( b, tag!("abcd") )
/// );
///
/// let a = b"abcdef";
/// assert_eq!(f(&a[..]), Done(&b"ef"[..], &b"abcd"[..]));
///
/// let b2 = false;
/// let f2 = closure!(&'static[u8],
/// cond_reduce!( b2, tag!("abcd") )
/// );
/// assert_eq!(f2(&a[..]), Error(Err::Position(ErrorKind::CondReduce, &a[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! cond_reduce(
($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => (
{
if $cond {
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
} else {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::CondReduce, $i))
}
}
);
($i:expr, $cond:expr, $f:expr) => (
cond_reduce!($i, $cond, call!($f));
);
);
/// `peek!(I -> IResult<I,O>) => I -> IResult<I, O>`
/// returns a result without consuming the input
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!(ptag, peek!( tag!( "abcd" ) ) );
///
/// let r = ptag(&b"abcdefgh"[..]);
/// assert_eq!(r, Done(&b"abcdefgh"[..], &b"abcd"[..]));
/// # }
/// ```
#[macro_export]
macro_rules! peek(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(_,o) => $crate::IResult::Done($i, o),
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
peek!($i, call!(f));
);
);
/// `tap!(name: I -> IResult<I,O> => { block }) => I -> IResult<I, O>`
/// allows access to the parser's result without affecting it
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use std::str;
/// # fn main() {
/// named!(ptag, tap!(res: tag!( "abcd" ) => { println!("recognized {}", str::from_utf8(res).unwrap()) } ) );
///
/// let r = ptag(&b"abcdefgh"[..]);
/// assert_eq!(r, Done(&b"efgh"[..], &b"abcd"[..]));
/// # }
/// ```
#[macro_export]
macro_rules! tap (
($i:expr, $name:ident : $submac:ident!( $($args:tt)* ) => $e:expr) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => {
let $name = o;
$e;
$crate::IResult::Done(i, $name)
},
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $name: ident: $f:expr => $e:expr) => (
tap!($i, $name: call!($f) => $e);
);
);
/// `pair!(I -> IResult<I,O>, I -> IResult<I,P>) => I -> IResult<I, (O,P)>`
/// pair(X,Y), returns (x,y)
///
#[macro_export]
macro_rules! pair(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
$crate::IResult::Done(i2, (o1, o2))
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
pair!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
pair!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
pair!($i, call!($f), call!($g));
);
);
/// `separated_pair!(I -> IResult<I,O>, I -> IResult<I, T>, I -> IResult<I,P>) => I -> IResult<I, (O,P)>`
/// separated_pair(X,sep,Y) returns (x,y)
#[macro_export]
macro_rules! separated_pair(
($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
separated_pair1!(i1, o1, $($rest)*)
}
}
}
);
($i:expr, $f:expr, $($rest:tt)+) => (
separated_pair!($i, call!($f), $($rest)*);
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! separated_pair1(
($i:expr, $res1:ident, $submac2:ident!( $($args2:tt)* ), $($rest:tt)+) => (
{
match $submac2!($i, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,_) => {
separated_pair2!(i2, $res1, $($rest)*)
}
}
}
);
($i:expr, $res1:ident, $g:expr, $($rest:tt)+) => (
separated_pair1!($i, $res1, call!($g), $($rest)*);
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! separated_pair2(
($i:expr, $res1:ident, $submac3:ident!( $($args3:tt)* )) => (
{
match $submac3!($i, $($args3)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i3,o3) => {
$crate::IResult::Done(i3, ($res1, o3))
}
}
}
);
($i:expr, $res1:ident, $h:expr) => (
separated_pair2!($i, $res1, call!($h));
);
);
/// `preceded!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, O>`
/// preceded(opening, X) returns X
#[macro_export]
macro_rules! preceded(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,_) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
$crate::IResult::Done(i2, o2)
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
preceded!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
preceded!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
preceded!($i, call!($f), call!($g));
);
);
/// `terminated!(I -> IResult<I,O>, I -> IResult<I,T>) => I -> IResult<I, O>`
/// terminated(X, closing) returns X
#[macro_export]
macro_rules! terminated(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,_) => {
$crate::IResult::Done(i2, o1)
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
terminated!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
terminated!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
terminated!($i, call!($f), call!($g));
);
);
/// `delimited!(I -> IResult<I,T>, I -> IResult<I,O>, I -> IResult<I,U>) => I -> IResult<I, O>`
/// delimited(opening, X, closing) returns X
#[macro_export]
macro_rules! delimited(
($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,_) => {
delimited1!(i1, $($rest)*)
}
}
}
);
($i:expr, $f:expr, $($rest:tt)+) => (
delimited!($i, call!($f), $($rest)*);
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! delimited1(
($i:expr, $submac2:ident!( $($args2:tt)* ), $($rest:tt)+) => (
{
match $submac2!($i, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
delimited2!(i2, o2, $($rest)*)
}
}
}
);
($i:expr, $g:expr, $($rest:tt)+) => (
delimited1!($i, call!($g), $($rest)*);
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! delimited2(
($i:expr, $res2:ident, $submac3:ident!( $($args3:tt)* )) => (
{
match $submac3!($i, $($args3)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i3,_) => {
$crate::IResult::Done(i3, $res2)
}
}
}
);
($i:expr, $res2:ident, $h:expr) => (
delimited2!($i, $res2, call!($h));
);
);
/// `separated_list!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// separated_list(sep, X) returns Vec<X>
#[macro_export]
macro_rules! separated_list(
($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
// get the first element
match $submac!(input, $($args2)*) {
$crate::IResult::Error(_) => $crate::IResult::Done(input, Vec::new()),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i,o) => {
if i.len() == input.len() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::SeparatedList,input))
} else {
res.push(o);
input = i;
loop {
// get the separator first
if let $crate::IResult::Done(i2,_) = $sep!(input, $($args)*) {
if i2.len() == input.len() {
break;
}
input = i2;
// get the element next
if let $crate::IResult::Done(i3,o3) = $submac!(input, $($args2)*) {
if i3.len() == input.len() {
break;
}
res.push(o3);
input = i3;
} else {
break;
}
} else {
break;
}
}
$crate::IResult::Done(input, res)
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
separated_list!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
separated_list!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
separated_list!($i, call!($f), call!($g));
);
);
/// `separated_nonempty_list!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// separated_nonempty_list(sep, X) returns Vec<X>
#[macro_export]
macro_rules! separated_nonempty_list(
($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
// get the first element
match $submac!(input, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i,o) => {
if i.len() == input.len() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::SeparatedNonEmptyList,input))
} else {
res.push(o);
input = i;
loop {
if let $crate::IResult::Done(i2,_) = $sep!(input, $($args)*) {
if i2.len() == input.len() {
break;
}
input = i2;
if let $crate::IResult::Done(i3,o3) = $submac!(input, $($args2)*) {
if i3.len() == input.len() {
break;
}
res.push(o3);
input = i3;
} else {
break;
}
} else {
break;
}
}
$crate::IResult::Done(input, res)
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
separated_nonempty_list!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
separated_nonempty_list!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
separated_nonempty_list!($i, call!($f), call!($g));
);
);
/// `many0!(I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// Applies the parser 0 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!(multi<&[u8], Vec<&[u8]> >, many0!( tag!( "abcd" ) ) );
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
/// assert_eq!(multi(&a[..]), Done(&b"ef"[..], res));
/// assert_eq!(multi(&b[..]), Done(&b"azerty"[..], Vec::new()));
/// # }
/// ```
/// 0 or more
#[macro_export]
macro_rules! many0(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
while let $crate::IResult::Done(i,o) = $submac!(input, $($args)*) {
if i.len() == input.len() {
break;
}
res.push(o);
input = i;
}
$crate::IResult::Done(input, res)
}
);
($i:expr, $f:expr) => (
many0!($i, call!($f));
);
);
/// `many1!(I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// Applies the parser 1 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done, Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(multi<&[u8], Vec<&[u8]> >, many1!( tag!( "abcd" ) ) );
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
/// assert_eq!(multi(&a[..]), Done(&b"ef"[..], res));
/// assert_eq!(multi(&b[..]), Error(Position(ErrorKind::Many1,&b[..])));
/// # }
/// ```
#[macro_export]
macro_rules! many1(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
while let $crate::IResult::Done(i,o) = $submac!(input, $($args)*) {
if i.len() == input.len() {
break;
}
res.push(o);
input = i;
}
if res.is_empty() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Many1,$i))
} else {
$crate::IResult::Done(input, res)
}
}
);
($i:expr, $f:expr) => (
many1!($i, call!($f));
);
);
/// `count!(I -> IResult<I,O>, nb) => I -> IResult<I, Vec<O>>`
/// Applies the child parser a specified number of times
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(counter< Vec<&[u8]> >, count!( tag!( "abcd" ), 2 ) );
///
/// let a = b"abcdabcdabcdef";
/// let b = b"abcdefgh";
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
///
/// assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
/// assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! count(
($i:expr, $submac:ident!( $($args:tt)* ), $count: expr) => (
{
let mut input = $i;
let mut res = Vec::with_capacity($count);
let mut cnt: usize = 0;
let mut err = false;
loop {
if cnt == $count {
break
}
match $submac!(input, $($args)*) {
$crate::IResult::Done(i,o) => {
res.push(o);
input = i;
cnt = cnt + 1;
},
$crate::IResult::Error(_) => {
err = true;
break;
},
$crate::IResult::Incomplete(_) => {
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Count,$i))
} else if cnt == $count {
$crate::IResult::Done(input, res)
} else {
$crate::IResult::Incomplete($crate::Needed::Unknown)
}
}
);
($i:expr, $f:expr, $count: expr) => (
count!($i, call!($f), $count);
);
);
/// `count_fixed!(O, I -> IResult<I,O>, nb) => I -> IResult<I, [O; nb]>`
/// Applies the child parser a fixed number of times and returns a fixed size array
/// The type must be specified and it must be `Copy`
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(counter< [&[u8]; 2] >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
/// // can omit the type specifier if returning slices
/// // named!(counter< [&[u8]; 2] >, count_fixed!( tag!( "abcd" ), 2 ) );
///
/// let a = b"abcdabcdabcdef";
/// let b = b"abcdefgh";
/// let res = [&b"abcd"[..], &b"abcd"[..]];
///
/// assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
/// assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! count_fixed (
($i:expr, $typ:ty, $submac:ident!( $($args:tt)* ), $count: expr) => (
{
let mut input = $i;
// `$typ` must be Copy, and thus having no destructor, this is panic safe
let mut res: [$typ; $count] = unsafe{[::std::mem::uninitialized(); $count as usize]};
let mut cnt: usize = 0;
let mut err = false;
loop {
if cnt == $count {
break
}
match $submac!(input, $($args)*) {
$crate::IResult::Done(i,o) => {
res[cnt] = o;
input = i;
cnt = cnt + 1;
},
$crate::IResult::Error(_) => {
err = true;
break;
},
$crate::IResult::Incomplete(_) => {
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Count,$i))
} else if cnt == $count {
$crate::IResult::Done(input, res)
} else {
$crate::IResult::Incomplete($crate::Needed::Unknown)
}
}
);
($i:expr, $typ: ty, $f:ident, $count: expr) => (
count_fixed!($i, $typ, call!($f), $count);
);
);
/// `length_value!(I -> IResult<I, nb>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// gets a number from the first parser, then applies the second parser that many times
#[macro_export]
macro_rules! length_value(
($i:expr, $f:expr, $g:expr) => (
{
match $f($i) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,nb) => {
let length_token = $i.len() - i1.len();
let mut input = i1;
let mut res = Vec::new();
let mut err = false;
let mut inc = $crate::Needed::Unknown;
loop {
if res.len() == nb as usize {
break;
}
match $g(input) {
$crate::IResult::Done(i2,o2) => {
res.push(o2);
input = i2;
},
$crate::IResult::Error(_) => {
err = true;
},
$crate::IResult::Incomplete(a) => {
inc = a;
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::LengthValue,$i))
} else if res.len() < nb as usize {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(length) => $crate::IResult::Incomplete($crate::Needed::Size(length_token + nb as usize * length))
}
} else {
$crate::IResult::Done(input, res)
}
}
}
}
);
($i:expr, $f:expr, $g:expr, $length:expr) => (
{
match $f($i) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,nb) => {
let length_token = $i.len() - i1.len();
let mut input = i1;
let mut res = Vec::new();
let mut err = false;
let mut inc = $crate::Needed::Unknown;
loop {
if res.len() == nb as usize {
break;
}
match $g(input) {
$crate::IResult::Done(i2,o2) => {
res.push(o2);
input = i2;
},
$crate::IResult::Error(_) => {
err = true;
},
$crate::IResult::Incomplete(a) => {
inc = a;
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::LengthValue,$i))
} else if res.len() < nb as usize {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(_) => $crate::IResult::Incomplete($crate::Needed::Size(length_token + nb as usize * $length))
}
} else {
$crate::IResult::Done(input, res)
}
}
}
}
);
);
#[cfg(test)]
mod tests {
use internal::{Needed,IResult,Err};
use internal::IResult::*;
use internal::Err::*;
use util::ErrorKind;
// reproduce the tag and take macros, because of module import order
macro_rules! tag (
($i:expr, $inp: expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
let res : $crate::IResult<&[u8],&[u8]> = if bytes.len() > $i.len() {
$crate::IResult::Incomplete($crate::Needed::Size(bytes.len()))
} else if &$i[0..bytes.len()] == bytes {
$crate::IResult::Done(&$i[bytes.len()..], &$i[0..bytes.len()])
} else {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Tag, $i))
};
res
}
);
);
macro_rules! take(
($i:expr, $count:expr) => (
{
let cnt = $count as usize;
let res:$crate::IResult<&[u8],&[u8]> = if $i.len() < cnt {
$crate::IResult::Incomplete($crate::Needed::Size(cnt))
} else {
$crate::IResult::Done(&$i[cnt..],&$i[0..cnt])
};
res
}
);
);
mod pub_named_mod {
named!(pub tst, tag!("abcd"));
}
#[test]
fn pub_named_test() {
let a = &b"abcd"[..];
let res = pub_named_mod::tst(a);
assert_eq!(res, Done(&b""[..], a));
}
#[test]
fn apply_test() {
fn sum2(a:u8, b:u8) -> u8 { a + b }
fn sum3(a:u8, b:u8, c:u8) -> u8 { a + b + c }
let a = apply!(1, sum2, 2);
let b = apply!(1, sum3, 2, 3);
assert_eq!(a, 3);
assert_eq!(b, 6);
}
#[derive(PartialEq,Eq,Debug)]
struct B {
a: u8,
b: u8
}
#[test]
fn chain2() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
tag!("abcd") ~
tag!("abcd")? ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[test]
fn nested_chain() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
chain!(
tag!("abcd") ~
tag!("abcd")? ,
|| {}
) ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[derive(PartialEq,Eq,Debug)]
struct C {
a: u8,
b: Option<u8>
}
#[test]
fn chain_mut() {
fn ret_b1_2(i:&[u8]) -> IResult<&[u8], B> { Done(i,B{a:1,b:2}) };
named!(f<&[u8],B>,
chain!(
tag!("abcd") ~
tag!("abcd")? ~
tag!("efgh") ~
mut bb: ret_b1_2 ~
tag!("efgh") ,
||{
bb.b = 3;
bb
}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 3}));
}
#[test]
fn chain_opt() {
named!(y, tag!("efgh"));
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
named!(ret_y<&[u8], u8>, map!(y, |_| 2));
named!(f<&[u8],C>,
chain!(
tag!("abcd") ~
aa: ret_int1 ~
bb: ret_y? ,
||{C{a: aa, b: bb}}
)
);
let r = f(&b"abcdefghX"[..]);
assert_eq!(r, Done(&b"X"[..], C{a: 1, b: Some(2)}));
let r2 = f(&b"abcdWXYZ"[..]);
assert_eq!(r2, Done(&b"WXYZ"[..], C{a: 1, b: None}));
let r3 = f(&b"abcdX"[..]);
assert_eq!(r3, Incomplete(Needed::Size(8)));
}
use util::{error_to_list, add_error_pattern, print_error};
fn error_to_string<P>(e: &Err<P>) -> &'static str {
let v:Vec<ErrorKind> = error_to_list(e);
// do it this way if you can use slice patterns
/*
match &v[..] {
[ErrorKind::Custom(42), ErrorKind::Tag] => "missing `ijkl` tag",
[ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] => "missing `mnop` tag after `ijkl`",
_ => "unrecognized error"
}
*/
if &v[..] == [ErrorKind::Custom(42),ErrorKind::Tag] {
"missing `ijkl` tag"
} else if &v[..] == [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] {
"missing `mnop` tag after `ijkl`"
} else {
"unrecognized error"
}
}
// do it this way if you can use box patterns
/*use std::str;
fn error_to_string(e:Err) -> String
match e {
NodePosition(ErrorKind::Custom(42), i1, box Position(ErrorKind::Tag, i2)) => {
format!("missing `ijkl` tag, found '{}' instead", str::from_utf8(i2).unwrap())
},
NodePosition(ErrorKind::Custom(42), i1, box NodePosition(ErrorKind::Custom(128), i2, box Position(ErrorKind::Tag, i3))) => {
format!("missing `mnop` tag after `ijkl`, found '{}' instead", str::from_utf8(i3).unwrap())
},
_ => "unrecognized error".to_string()
}
}*/
use std::collections;
#[test]
fn err() {
named!(err_test, alt!(
tag!("abcd") |
preceded!(tag!("efgh"), error!(ErrorKind::Custom(42),
chain!(
tag!("ijkl") ~
res: error!(ErrorKind::Custom(128), tag!("mnop")) ,
|| { res }
)
)
)
));
let a = &b"efghblah"[..];
let b = &b"efghijklblah"[..];
let c = &b"efghijklmnop"[..];
let blah = &b"blah"[..];
let res_a = err_test(a);
let res_b = err_test(b);
let res_c = err_test(c);
assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..], Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah)))))));
assert_eq!(res_c, Done(&b""[..], &b"mnop"[..]));
// Merr-like error matching
let mut err_map = collections::HashMap::new();
assert!(add_error_pattern(&mut err_map, err_test(&b"efghpouet"[..]), "missing `ijkl` tag"));
assert!(add_error_pattern(&mut err_map, err_test(&b"efghijklpouet"[..]), "missing `mnop` tag after `ijkl`"));
let res_a2 = res_a.clone();
match res_a {
Error(e) => {
assert_eq!(error_to_list(&e), [ErrorKind::Custom(42), ErrorKind::Tag]);
assert_eq!(error_to_string(&e), "missing `ijkl` tag");
assert_eq!(err_map.get(&error_to_list(&e)), Some(&"missing `ijkl` tag"));
},
_ => panic!()
};
let res_b2 = res_b.clone();
match res_b {
Error(e) => {
assert_eq!(error_to_list(&e), [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag]);
assert_eq!(error_to_string(&e), "missing `mnop` tag after `ijkl`");
assert_eq!(err_map.get(&error_to_list(&e)), Some(&"missing `mnop` tag after `ijkl`"));
},
_ => panic!()
};
print_error(a, res_a2);
print_error(b, res_b2);
}
#[test]
fn add_err() {
named!(err_test,
preceded!(tag!("efgh"), add_error!(ErrorKind::Custom(42),
chain!(
tag!("ijkl") ~
res: add_error!(ErrorKind::Custom(128), tag!("mnop")) ,
|| { res }
)
)
));
let a = &b"efghblah"[..];
let b = &b"efghijklblah"[..];
let c = &b"efghijklmnop"[..];
let blah = &b"blah"[..];
let res_a = err_test(a);
let res_b = err_test(b);
let res_c = err_test(c);
assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..], Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah)))))));
assert_eq!(res_c, Done(&b""[..], &b"mnop"[..]));
}
#[test]
fn complete() {
named!(err_test,
chain!(
tag!("ijkl") ~
res: complete!(tag!("mnop")) ,
|| { res }
)
);
let a = &b"ijklmn"[..];
let res_a = err_test(a);
assert_eq!(res_a, Error(Position(ErrorKind::Complete, &b"mn"[..])));
}
#[test]
fn alt() {
fn work(input: &[u8]) -> IResult<&[u8],&[u8], &'static str> {
Done(&b""[..], input)
}
#[allow(unused_variables)]
fn dont_work(input: &[u8]) -> IResult<&[u8],&[u8],&'static str> {
Error(Code(ErrorKind::Custom("abcd")))
}
fn work2(input: &[u8]) -> IResult<&[u8],&[u8], &'static str> {
Done(input, &b""[..])
}
fn alt1(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | dont_work)
}
fn alt2(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | work)
}
fn alt3(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | dont_work | work2 | dont_work)
}
//named!(alt1, alt!(dont_work | dont_work));
//named!(alt2, alt!(dont_work | work));
//named!(alt3, alt!(dont_work | dont_work | work2 | dont_work));
let a = &b"abcd"[..];
assert_eq!(alt1(a), Error(Position(ErrorKind::Alt, a)));
assert_eq!(alt2(a), Done(&b""[..], a));
assert_eq!(alt3(a), Done(a, &b""[..]));
named!(alt4, alt!(tag!("abcd") | tag!("efgh")));
let b = &b"efgh"[..];
assert_eq!(alt4(a), Done(&b""[..], a));
assert_eq!(alt4(b), Done(&b""[..], b));
// test the alternative syntax
named!(alt5<bool>, alt!(tag!("abcd") => { |_| false } | tag!("efgh") => { |_| true }));
assert_eq!(alt5(a), Done(&b""[..], false));
assert_eq!(alt5(b), Done(&b""[..], true));
}
#[test]
fn alt_incomplete() {
named!(alt1, alt!(tag!("a") | tag!("bc") | tag!("def")));
let a = &b""[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(1)));
let a = &b"b"[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(2)));
let a = &b"bcd"[..];
assert_eq!(alt1(a), Done(&b"d"[..], &b"bc"[..]));
let a = &b"cde"[..];
assert_eq!(alt1(a), Error(Position(ErrorKind::Alt, a)));
let a = &b"de"[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(3)));
let a = &b"defg"[..];
assert_eq!(alt1(a), Done(&b"g"[..], &b"def"[..]));
}
#[test]
fn switch() {
named!(sw,
switch!(take!(4),
b"abcd" => take!(2) |
b"efgh" => take!(4)
)
);
let a = &b"abcdefgh"[..];
assert_eq!(sw(a), Done(&b"gh"[..], &b"ef"[..]));
let b = &b"efghijkl"[..];
assert_eq!(sw(b), Done(&b""[..], &b"ijkl"[..]));
let c = &b"afghijkl"[..];
assert_eq!(sw(c), Error(Position(ErrorKind::Switch, &b"ijkl"[..])));
}
#[test]
fn opt() {
named!(o<&[u8],Option<&[u8]> >, opt!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"bcdefg"[..];
assert_eq!(o(a), Done(&b"ef"[..], Some(&b"abcd"[..])));
assert_eq!(o(b), Done(&b"bcdefg"[..], None));
}
#[test]
fn opt_res() {
named!(o<&[u8], Result<&[u8], Err<&[u8]>> >, opt_res!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"bcdefg"[..];
assert_eq!(o(a), Done(&b"ef"[..], Ok(&b"abcd"[..])));
assert_eq!(o(b), Done(&b"bcdefg"[..], Err(Position(ErrorKind::Tag, b))));
}
#[test]
fn cond() {
let b = true;
let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b, tag!("abcd") ) ));
let a = b"abcdef";
assert_eq!(f(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
let b2 = false;
let f2: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b2, tag!("abcd") ) ));
//let f2 = closure!(&'static [u8], cond!( b2, tag!("abcd") ) );
assert_eq!(f2(&a[..]), Done(&b"abcdef"[..], None));
}
#[test]
fn cond_wrapping() {
// Test that cond!() will wrap a given identifier in the call!() macro.
named!(silly, tag!("foo"));
let b = true;
//let f = closure!(&'static [u8], cond!( b, silly ) );
let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b, silly ) ));
assert_eq!(f(b"foobar"), Done(&b"bar"[..], Some(&b"foo"[..])));
}
#[test]
fn peek() {
named!(ptag<&[u8],&[u8]>, peek!(tag!("abcd")));
let r1 = ptag(&b"abcdefgh"[..]);
assert_eq!(r1, Done(&b"abcdefgh"[..], &b"abcd"[..]));
let r1 = ptag(&b"efgh"[..]);
assert_eq!(r1, Error(Position(ErrorKind::Tag,&b"efgh"[..])));
}
#[test]
fn pair() {
named!(p<&[u8],(&[u8], &[u8])>, pair!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], (&b"abcd"[..], &b"efgh"[..])));
}
#[test]
fn separated_pair() {
named!(p<&[u8],(&[u8], &[u8])>, separated_pair!(tag!("abcd"), tag!(","), tag!("efgh")));
let r1 = p(&b"abcd,efghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], (&b"abcd"[..], &b"efgh"[..])));
}
#[test]
fn preceded() {
named!(p<&[u8], &[u8]>, preceded!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], &b"efgh"[..]));
}
#[test]
fn terminated() {
named!(p<&[u8], &[u8]>, terminated!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], &b"abcd"[..]));
}
#[test]
fn delimited() {
named!(p<&[u8], &[u8]>, delimited!(tag!("abcd"), tag!("efgh"), tag!("ij")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"kl"[..], &b"efgh"[..]));
}
#[test]
fn separated_list() {
named!(multi<&[u8],Vec<&[u8]> >, separated_list!(tag!(","), tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcd,abcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Done(&b"azerty"[..], Vec::new()));
}
#[test]
fn separated_nonempty_list() {
named!(multi<&[u8],Vec<&[u8]> >, separated_nonempty_list!(tag!(","), tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcd,abcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Error(Position(ErrorKind::Tag,c)));
}
#[test]
fn many0() {
named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Done(&b"azerty"[..], Vec::new()));
}
#[cfg(feature = "nightly")]
use test::Bencher;
#[cfg(feature = "nightly")]
#[bench]
fn many0_bench(b: &mut Bencher) {
named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd")));
b.iter(|| {
multi(&b"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"[..])
});
}
#[test]
fn many1() {
named!(multi<&[u8],Vec<&[u8]> >, many1!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Error(Position(ErrorKind::Many1,c)));
}
#[test]
fn infinite_many() {
fn tst(input: &[u8]) -> IResult<&[u8], &[u8]> {
println!("input: {:?}", input);
Error(Position(ErrorKind::Custom(0),input))
}
// should not go into an infinite loop
named!(multi0<&[u8],Vec<&[u8]> >, many0!(tst));
let a = &b"abcdef"[..];
assert_eq!(multi0(a), Done(a, Vec::new()));
named!(multi1<&[u8],Vec<&[u8]> >, many1!(tst));
let a = &b"abcdef"[..];
assert_eq!(multi1(a), Error(Position(ErrorKind::Many1,a)));
}
#[test]
fn count() {
fn counter(input: &[u8]) -> IResult<&[u8], Vec<&[u8]>> {
let size: usize = 2;
count!(input, tag!( "abcd" ), size )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
#[test]
fn count_zero() {
fn counter(input: &[u8]) -> IResult<&[u8], Vec<&[u8]>> {
let size: usize = 0;
count!(input, tag!( "abcd" ), size )
}
let a = b"abcdabcdabcdef";
let res: Vec<&[u8]> = Vec::new();
assert_eq!(counter(&a[..]), Done(&b"abcdabcdabcdef"[..], res));
}
#[test]
fn count_fixed() {
//named!(counter< [&[u8]; 2], u32 >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
fn counter(input:&[u8]) -> IResult<&[u8], [&[u8]; 2], () > {
count_fixed!(input, &[u8], tag!( "abcd" ), 2 )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = [&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
use nom::{le_u16,eof};
#[allow(dead_code)]
pub fn compile_count_fixed(input: &[u8]) -> IResult<&[u8], ()> {
chain!(input,
tag!("abcd") ~
count_fixed!( u16, le_u16, 4 ) ~
eof ,
|| { () }
)
}
#[test]
fn count_fixed_no_type() {
//named!(counter< [&[u8]; 2], u32 >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
fn counter(input:&[u8]) -> IResult<&[u8], [&[u8]; 2], () > {
count_fixed!(input, &[u8], tag!( "abcd" ), 2 )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = [&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
use nom::{be_u8,be_u16};
#[test]
fn length_value_test() {
named!(tst1<&[u8], Vec<u16> >, length_value!(be_u8, be_u16));
named!(tst2<&[u8], Vec<u16> >, length_value!(be_u8, be_u16, 2));
let i1 = vec![0, 5, 6];
let i2 = vec![1, 5, 6, 3];
let i3 = vec![2, 5, 6, 3];
let i4 = vec![2, 5, 6, 3, 4, 5, 7];
let i5 = vec![3, 5, 6, 3, 4, 5];
let r1: Vec<u16> = Vec::new();
let r2: Vec<u16> = vec![1286];
let r4: Vec<u16> = vec![1286, 772];
assert_eq!(tst1(&i1), IResult::Done(&i1[1..], r1));
assert_eq!(tst1(&i2), IResult::Done(&i2[3..], r2));
assert_eq!(tst1(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst1(&i4), IResult::Done(&i4[5..], r4));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
let r6: Vec<u16> = Vec::new();
let r7: Vec<u16> = vec![1286];
let r9: Vec<u16> = vec![1286, 772];
assert_eq!(tst2(&i1), IResult::Done(&i1[1..], r6));
assert_eq!(tst2(&i2), IResult::Done(&i2[3..], r7));
assert_eq!(tst2(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst2(&i4), IResult::Done(&i4[5..], r9));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
}
#[test]
fn chain_incomplete() {
let res = chain!(&b"abcdefgh"[..],
a: take!(4) ~
b: take!(8),
||{(a,b )}
);
assert_eq!(res, IResult::Incomplete(Needed::Size(12)));
}
}
remove a warning
//! Macro combinators
//!
//! Macros are used to make combination easier,
//! since they often do not depend on the type
//! of the data they manipulate or return.
//!
//! There is a trick to make them easier to assemble,
//! combinators are defined like this:
//!
//! ```ignore
//! macro_rules! tag (
//! ($i:expr, $inp: expr) => (
//! {
//! ...
//! }
//! );
//! );
//! ```
//!
//! But when used in other combinators, are Used
//! like this:
//!
//! ```ignore
//! named!(my_function, tag!("abcd"));
//! ```
//!
//! Internally, other combinators will rewrite
//! that call to pass the input as first argument:
//!
//! ```ignore
//! macro_rules! named (
//! ($name:ident, $submac:ident!( $($args:tt)* )) => (
//! fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<'a,&[u8], &[u8]> {
//! $submac!(i, $($args)*)
//! }
//! );
//! );
//! ```
//!
//! If you want to call a combinator directly, you can
//! do it like this:
//!
//! ```ignore
//! let res = { tag!(input, "abcd"); }
//! ```
//!
//! Combinators must have a specific variant for
//! non-macro arguments. Example: passing a function
//! to take_while! instead of another combinator.
//!
//! ```ignore
//! macro_rules! take_while(
//! ($input:expr, $submac:ident!( $($args:tt)* )) => (
//! {
//! ...
//! }
//! );
//!
//! // wrap the function in a macro to pass it to the main implementation
//! ($input:expr, $f:expr) => (
//! take_while!($input, call!($f));
//! );
//! );
//!
/// Wraps a parser in a closure
#[macro_export]
macro_rules! closure (
($ty:ty, $submac:ident!( $($args:tt)* )) => (
|i: $ty| { $submac!(i, $($args)*) }
);
($submac:ident!( $($args:tt)* )) => (
|i| { $submac!(i, $($args)*) }
);
);
/// Makes a function from a parser combination
///
/// The type can be set up if the compiler needs
/// more information
///
/// ```ignore
/// named!(my_function( &[u8] ) -> &[u8], tag!("abcd"));
/// // first type parameter is input, second is output
/// named!(my_function<&[u8], &[u8]>, tag!("abcd"));
/// // will have &[u8] as input type, &[u8] as output type
/// named!(my_function, tag!("abcd"));
/// // will use &[u8] as input type (use this if the compiler
/// // complains about lifetime issues
/// named!(my_function<&[u8]>, tag!("abcd"));
/// //prefix them with 'pub' to make the functions public
/// named!(pub my_function, tag!("abcd"));
/// ```
#[macro_export]
macro_rules! named (
($name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> $crate::IResult<$i,$o> {
$submac!(i, $($args)*)
}
);
($name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> $crate::IResult<$i, $o> {
$submac!(i, $($args)*)
}
);
($name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name<'a>( i: &'a[u8] ) -> $crate::IResult<&'a [u8], $o> {
$submac!(i, $($args)*)
}
);
($name:ident<$life:item,$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name<$life>( i: $i ) -> $crate::IResult<$life, $i, $o> {
$submac!(i, $($args)*)
}
);
($name:ident, $submac:ident!( $($args:tt)* )) => (
fn $name( i: &[u8] ) -> $crate::IResult<&[u8], &[u8]> {
$submac!(i, $($args)*)
}
);
(pub $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: $i ) -> $crate::IResult<$i,$o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: $i ) -> $crate::IResult<$i, $o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident, $submac:ident!( $($args:tt)* )) => (
pub fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<&[u8], &[u8]> {
$submac!(i, $($args)*)
}
);
);
/// Used to wrap common expressions and function as macros
#[macro_export]
macro_rules! call (
($i:expr, $fun:expr) => ( $fun( $i ) );
($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) );
);
/// emulate function currying: `apply!(my_function, arg1, arg2, ...)` becomes `my_function(input, arg1, arg2, ...)`
///
/// Supports up to 6 arguments
#[macro_export]
macro_rules! apply (
($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) );
);
/// Prevents backtracking if the child parser fails
///
/// This parser will do an early return instead of sending
/// its result to the parent parser.
///
/// If another `error!` combinator is present in the parent
/// chain, the error will be wrapped and another early
/// return will be made.
///
/// This makes it easy to build report on which parser failed,
/// where it failed in the input, and the chain of parsers
/// that led it there.
///
/// Additionally, the error chain contains number identifiers
/// that can be matched to provide useful error messages.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use std::collections;
/// # use nom::IResult::Error;
/// # use nom::Err::{Position,NodePosition};
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(err_test, alt!(
/// tag!("abcd") |
/// preceded!(tag!("efgh"), error!(ErrorKind::Custom(42),
/// chain!(
/// tag!("ijkl") ~
/// res: error!(ErrorKind::Custom(128), tag!("mnop")) ,
/// || { res }
/// )
/// )
/// )
/// ));
/// let a = &b"efghblah"[..];
/// let b = &b"efghijklblah"[..];
/// let c = &b"efghijklmnop"[..];
///
/// let blah = &b"blah"[..];
///
/// let res_a = err_test(a);
/// let res_b = err_test(b);
/// let res_c = err_test(c);
/// assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
/// assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..],
/// Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah))))))
/// );
/// # }
/// ```
///
#[macro_export]
macro_rules! error (
($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => (
{
let cl = || {
$submac!($i, $($args)*)
};
match cl() {
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => {
return $crate::IResult::Error($crate::Err::NodePosition($code, $i, Box::new(e)))
}
}
}
);
($i:expr, $code:expr, $f:expr) => (
error!($i, $code, call!($f));
);
);
/// Add an error if the child parser fails
///
/// While error! does an early return and avoids backtracking,
/// add_error! backtracks normally. It just provides more context
/// for an error
///
#[macro_export]
macro_rules! add_error (
($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => {
$crate::IResult::Error($crate::Err::NodePosition($code, $i, Box::new(e)))
}
}
}
);
($i:expr, $code:expr, $f:expr) => (
add_error!($i, $code, call!($f));
);
);
/// replaces a `Incomplete` returned by the child parser
/// with an `Error`
///
#[macro_export]
macro_rules! complete (
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(_) => {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Complete, $i))
},
}
}
);
($i:expr, $f:expr) => (
complete!($i, call!($f));
);
);
/// `flat_map!(R -> IResult<R,S>, S -> IResult<S,T>) => R -> IResult<R, T>`
///
/// combines a parser R -> IResult<R,S> and
/// a parser S -> IResult<S,T> to return another
/// parser R -> IResult<R,T>
#[macro_export]
macro_rules! flat_map(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
$crate::IResult::Error(e) => {
let err = match e {
$crate::Err::Code(k) | $crate::Err::Node(k, _) | $crate::Err::Position(k, _) | $crate::Err::NodePosition(k, _, _) => {
$crate::Err::Position(k, $i)
}
};
$crate::IResult::Error(err)
},
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(ref i2)) => $crate::IResult::Incomplete($crate::Needed::Size(*i2)),
$crate::IResult::Done(_, o2) => $crate::IResult::Done(i, o2)
}
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
flat_map!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $g:expr) => (
flat_map!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
flat_map!($i, call!($f), $submac!($($args)*));
);
);
/// `map!(I -> IResult<I,O>, O -> P) => I -> IResult<I, P>`
/// maps a function on the result of a parser
#[macro_export]
macro_rules! map(
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! map_impl(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, $submac2!(o, $($args2)*))
}
}
);
);
/// `map_res!(I -> IResult<I,O>, O -> Result<P>) => I -> IResult<I, P>`
/// maps a function returning a Result on the output of a parser
#[macro_export]
macro_rules! map_res (
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_res_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_res_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_res_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_res_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! map_res_impl (
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
Ok(output) => $crate::IResult::Done(i, output),
Err(_) => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::MapRes, $i))
}
}
}
);
);
/// `map_res!(I -> IResult<I,O>, O -> Option<P>) => I -> IResult<I, P>`
/// maps a function returning an Option on the output of a parser
#[macro_export]
macro_rules! map_opt (
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_opt_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_opt_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_opt_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_opt_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! map_opt_impl (
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
Some(output) => $crate::IResult::Done(i, output),
None => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::MapOpt, $i))
}
}
}
);
);
/// `expr_res!(Result<E,O>) => I -> IResult<I, O>`
/// evaluate an expression that returns a Result<T,E> and returns a IResult::Done(I,T) if Ok
///
/// See expr_opt for an example
#[macro_export]
macro_rules! expr_res (
($i:expr, $e:expr) => (
{
match $e {
Ok(output) => $crate::IResult::Done($i, output),
Err(_) => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::ExprRes, $i))
}
}
);
);
/// `expr_opt!(Option<O>) => I -> IResult<I, O>`
/// evaluate an expression that returns a Option<T> and returns a IResult::Done(I,T) if Ok
///
/// Useful when doing computations in a chain
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{self, Done, Error};
/// # use nom::Err::Position;
/// # use nom::{be_u8,ErrorKind};
///
/// fn take_add(input:&[u8], size: u8) -> IResult<&[u8],&[u8]> {
/// chain!(input,
/// sz: be_u8 ~
/// length: expr_opt!(size.checked_add(sz)) ~ // checking for integer overflow (returns an Option)
/// data: take!(length) ,
/// ||{ data }
/// )
/// }
/// # fn main() {
/// let arr1 = [1, 2, 3, 4, 5];
/// let r1 = take_add(&arr1[..], 1);
/// assert_eq!(r1, Done(&[4,5][..], &[2,3][..]));
///
/// let arr2 = [0xFE, 2, 3, 4, 5];
/// // size is overflowing
/// let r1 = take_add(&arr2[..], 42);
/// assert_eq!(r1, Error(Position(ErrorKind::ExprOpt,&[2,3,4,5][..])));
/// # }
/// ```
#[macro_export]
macro_rules! expr_opt (
($i:expr, $e:expr) => (
{
match $e {
Some(output) => $crate::IResult::Done($i, output),
None => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::ExprOpt, $i))
}
}
);
);
/// `chain!(I->IResult<I,A> ~ I->IResult<I,B> ~ ... I->IResult<I,X> , || { return O } ) => I -> IResult<I, O>`
/// chains parsers and assemble the results through a closure
/// the input type I must implement nom::InputLength
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{self, Done, Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// #[derive(PartialEq,Eq,Debug)]
/// struct B {
/// a: u8,
/// b: Option<u8>
/// }
///
/// named!(y, tag!("efgh"));
///
/// fn ret_int(i:&[u8]) -> IResult<&[u8], u8> { Done(i, 1) }
/// named!(ret_y<&[u8], u8>, map!(y, |_| 1)); // return 1 if the "efgh" tag is found
///
/// named!(z<&[u8], B>,
/// chain!(
/// tag!("abcd") ~
/// aa: ret_int ~ // the result of that parser will be used in the closure
/// tag!("abcd")? ~ // this parser is optional
/// bb: ret_y? , // the result of that parser is an option
/// ||{B{a: aa, b: bb}}
/// )
/// );
///
/// # fn main() {
/// // the first "abcd" tag is not present, we have an error
/// let r1 = z(&b"efgh"[..]);
/// assert_eq!(r1, Error(Position(ErrorKind::Tag,&b"efgh"[..])));
///
/// // everything is present, everything is parsed
/// let r2 = z(&b"abcdabcdefgh"[..]);
/// assert_eq!(r2, Done(&b""[..], B{a: 1, b: Some(1)}));
///
/// // the second "abcd" tag is optional
/// let r3 = z(&b"abcdefgh"[..]);
/// assert_eq!(r3, Done(&b""[..], B{a: 1, b: Some(1)}));
///
/// // the result of ret_y is optional, as seen in the B structure
/// let r4 = z(&b"abcdabcdwxyz"[..]);
/// assert_eq!(r4, Done(&b"wxyz"[..], B{a: 1, b: None}));
/// # }
/// ```
#[macro_export]
macro_rules! chain (
($i:expr, $($rest:tt)*) => (
{
//use $crate::InputLength;
chaining_parser!($i, 0usize, $($rest)*)
}
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! chaining_parser (
($i:expr, $consumed:expr, $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
use $crate::InputLength;
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,_) => {
chaining_parser!(i, $consumed + (($i).input_len() - i.input_len()), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
{
use $crate::InputLength;
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let input = if let $crate::IResult::Done(i,_) = res {
i
} else {
$i
};
chaining_parser!(input, $consumed + (($i).input_len() - input.input_len()), $($rest)*)
}
}
});
($i:expr, $consumed:expr, $field:ident : $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, $field: call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
use $crate::InputLength;
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let $field = o;
chaining_parser!(i, $consumed + (($i).input_len() - i.input_len()), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, mut $field:ident : $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, mut $field: call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
use $crate::InputLength;
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let mut $field = o;
chaining_parser!(i, $consumed + ($i).input_len() - i.input_len(), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, $field:ident : $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, $field : call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
{
use $crate::InputLength;
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let ($field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o),i)
} else {
(None,$i)
};
chaining_parser!(input, $consumed + ($i).input_len() - input.input_len(), $($rest)*)
}
}
});
($i:expr, $consumed:expr, mut $field:ident : $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, mut $field : call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
{
use $crate::InputLength;
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let (mut $field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o),i)
} else {
(None,$i)
};
chaining_parser!(input, $consumed + ($i).input_len() - input.input_len(), $($rest)*)
}
}
});
// ending the chain
($i:expr, $consumed:expr, $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, call!($e), $assemble);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,_) => {
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, $e:ident ?, $assemble:expr) => (
chaining_parser!($i, $consumed, call!($e) ?, $assemble);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ?, $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let input = if let $crate::IResult::Done(i,_) = res {
i
} else {
$i
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, $field:ident : $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, $field: call!($e), $assemble);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let $field = o;
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, mut $field:ident : $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, mut $field: call!($e), $assemble);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let mut $field = o;
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, $field:ident : $e:ident ? , $assemble:expr) => (
chaining_parser!($i, $consumed, $field : call!($e) ? , $assemble);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? , $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let ($field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o), i)
} else {
(None, $i)
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, mut $field:ident : $e:ident ? , $assemble:expr) => (
chaining_parser!($i, $consumed, $field : call!($e) ? , $assemble);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ? , $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let (mut $field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o), i)
} else {
(None, $i)
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, $assemble:expr) => (
$crate::IResult::Done($i, $assemble())
)
);
/// `alt!(I -> IResult<I,O> | I -> IResult<I,O> | ... | I -> IResult<I,O> ) => I -> IResult<I, O>`
/// try a list of parsers, return the result of the first successful one
///
/// If one of the parser returns Incomplete, alt will return Incomplete, to retry
/// once you get more input. Note that it is better for performance to know the
/// minimum size of data you need before you get into alt.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!( test, alt!( tag!( "abcd" ) | tag!( "efgh" ) ) );
/// let r1 = test(b"abcdefgh");
/// assert_eq!(r1, Done(&b"efgh"[..], &b"abcd"[..]));
/// let r2 = test(&b"efghijkl"[..]);
/// assert_eq!(r2, Done(&b"ijkl"[..], &b"efgh"[..]));
/// # }
/// ```
///
/// There is another syntax for alt allowing a block to manipulate the result:
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// #[derive(Debug,PartialEq,Eq)]
/// enum Tagged {
/// Abcd,
/// Efgh,
/// Took(usize)
/// }
/// named!(test<Tagged>, alt!(
/// tag!("abcd") => { |_| Tagged::Abcd }
/// | tag!("efgh") => { |_| Tagged::Efgh }
/// | take!(5) => { |res: &[u8]| Tagged::Took(res.len()) } // the closure takes the result as argument if the parser is successful
/// ));
/// let r1 = test(b"abcdefgh");
/// assert_eq!(r1, Done(&b"efgh"[..], Tagged::Abcd));
/// let r2 = test(&b"efghijkl"[..]);
/// assert_eq!(r2, Done(&b"ijkl"[..], Tagged::Efgh));
/// let r3 = test(&b"mnopqrst"[..]);
/// assert_eq!(r3, Done(&b"rst"[..], Tagged::Took(5)));
/// # }
/// ```
#[macro_export]
macro_rules! alt (
($i:expr, $($rest:tt)*) => (
{
alt_parser!($i, $($rest)*)
}
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! alt_parser (
($i:expr, $e:ident | $($rest:tt)*) => (
alt_parser!($i, call!($e) | $($rest)*);
);
($i:expr, $subrule:ident!( $($args:tt)*) | $($rest:tt)*) => (
{
let res = $subrule!($i, $($args)*);
match res {
$crate::IResult::Done(_,_) => res,
$crate::IResult::Incomplete(_) => res,
_ => alt_parser!($i, $($rest)*)
}
}
);
($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr } | $($rest:tt)+) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,$gen(o)),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i, $($rest)*)
}
}
}
);
($i:expr, $e:ident => { $gen:expr } | $($rest:tt)*) => (
alt_parser!($i, call!($e) => { $gen } | $($rest)*);
);
($i:expr, $e:ident => { $gen:expr }) => (
alt_parser!($i, call!($e) => { $gen });
);
($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr }) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,$gen(o)),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i)
}
}
}
);
($i:expr, $e:ident) => (
alt_parser!($i, call!($e));
);
($i:expr, $subrule:ident!( $($args:tt)*)) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,o),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i)
}
}
}
);
($i:expr) => (
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Alt,$i))
);
);
/// `switch!(I -> IResult<I,P>, P => I -> IResult<I,O> | ... | P => I -> IResult<I,O> ) => I -> IResult<I, O>`
/// choose the next parser depending on the result of the first one, if successful
///
#[macro_export]
macro_rules! switch (
($i:expr, $submac:ident!( $($args:tt)*), $($p:pat => $subrule:ident!( $($args2:tt)* ))|*) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i, o) => {
match o {
$($p => $subrule!(i, $($args2)*)),*,
_ => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Switch,i))
}
}
}
}
);
($i:expr, $e:ident, $($rest:tt)*) => (
{
switch!($i, call!(e), $($rest)*)
}
);
);
/// `opt!(I -> IResult<I,O>) => I -> IResult<I, Option<O>>`
/// make the underlying parser optional
///
/// returns an Option of the returned type. This parser never fails
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!( o<&[u8], Option<&[u8]> >, opt!( tag!( "abcd" ) ) );
///
/// let a = b"abcdef";
/// let b = b"bcdefg";
/// assert_eq!(o(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
/// assert_eq!(o(&b[..]), Done(&b"bcdefg"[..], None));
/// # }
/// ```
#[macro_export]
macro_rules! opt(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Some(o)),
$crate::IResult::Error(_) => $crate::IResult::Done($i, None),
$crate::IResult::Incomplete(_) => $crate::IResult::Done($i, None)
}
}
);
($i:expr, $f:expr) => (
opt!($i, call!($f));
);
);
/// `opt_res!(I -> IResult<I,O>) => I -> IResult<I, Result<nom::Err,O>>`
/// make the underlying parser optional
///
/// returns a Result, with Err containing the parsing error
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!( o<&[u8], Result<&[u8], nom::Err<&[u8]> > >, opt_res!( tag!( "abcd" ) ) );
///
/// let a = b"abcdef";
/// let b = b"bcdefg";
/// assert_eq!(o(&a[..]), Done(&b"ef"[..], Ok(&b"abcd"[..])));
/// assert_eq!(o(&b[..]), Done(&b"bcdefg"[..], Err(Position(ErrorKind::Tag, &b[..]))));
/// # }
/// ```
#[macro_export]
macro_rules! opt_res (
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Ok(o)),
$crate::IResult::Error(e) => $crate::IResult::Done($i, Err(e)),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
opt_res!($i, call!($f));
);
);
/// `cond!(bool, I -> IResult<I,O>) => I -> IResult<I, Option<O>>`
/// Conditional combinator
///
/// Wraps another parser and calls it if the
/// condition is met. This combinator returns
/// an Option of the return type of the child
/// parser.
///
/// This is especially useful if a parser depends
/// on the value return by a preceding parser in
/// a `chain!`.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use nom::IResult;
/// # fn main() {
/// let b = true;
/// let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8],
/// cond!( b, tag!("abcd") ))
/// );
///
/// let a = b"abcdef";
/// assert_eq!(f(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
///
/// let b2 = false;
/// let f2:Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8],
/// cond!( b2, tag!("abcd") ))
/// );
/// assert_eq!(f2(&a[..]), Done(&b"abcdef"[..], None));
/// # }
/// ```
///
#[macro_export]
macro_rules! cond(
($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => (
{
if $cond {
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Some(o)),
$crate::IResult::Error(_) => $crate::IResult::Done($i, None),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
} else {
$crate::IResult::Done($i, None)
}
}
);
($i:expr, $cond:expr, $f:expr) => (
cond!($i, $cond, call!($f));
);
);
/// `cond_reduce!(bool, I -> IResult<I,O>) => I -> IResult<I, O>`
/// Conditional combinator with error
///
/// Wraps another parser and calls it if the
/// condition is met. This combinator returns
/// an error if the condition is false
///
/// This is especially useful if a parser depends
/// on the value return by a preceding parser in
/// a `chain!`.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::{Err,ErrorKind};
/// # fn main() {
/// let b = true;
/// let f = closure!(&'static[u8],
/// cond_reduce!( b, tag!("abcd") )
/// );
///
/// let a = b"abcdef";
/// assert_eq!(f(&a[..]), Done(&b"ef"[..], &b"abcd"[..]));
///
/// let b2 = false;
/// let f2 = closure!(&'static[u8],
/// cond_reduce!( b2, tag!("abcd") )
/// );
/// assert_eq!(f2(&a[..]), Error(Err::Position(ErrorKind::CondReduce, &a[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! cond_reduce(
($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => (
{
if $cond {
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
} else {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::CondReduce, $i))
}
}
);
($i:expr, $cond:expr, $f:expr) => (
cond_reduce!($i, $cond, call!($f));
);
);
/// `peek!(I -> IResult<I,O>) => I -> IResult<I, O>`
/// returns a result without consuming the input
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!(ptag, peek!( tag!( "abcd" ) ) );
///
/// let r = ptag(&b"abcdefgh"[..]);
/// assert_eq!(r, Done(&b"abcdefgh"[..], &b"abcd"[..]));
/// # }
/// ```
#[macro_export]
macro_rules! peek(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(_,o) => $crate::IResult::Done($i, o),
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
peek!($i, call!(f));
);
);
/// `tap!(name: I -> IResult<I,O> => { block }) => I -> IResult<I, O>`
/// allows access to the parser's result without affecting it
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use std::str;
/// # fn main() {
/// named!(ptag, tap!(res: tag!( "abcd" ) => { println!("recognized {}", str::from_utf8(res).unwrap()) } ) );
///
/// let r = ptag(&b"abcdefgh"[..]);
/// assert_eq!(r, Done(&b"efgh"[..], &b"abcd"[..]));
/// # }
/// ```
#[macro_export]
macro_rules! tap (
($i:expr, $name:ident : $submac:ident!( $($args:tt)* ) => $e:expr) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => {
let $name = o;
$e;
$crate::IResult::Done(i, $name)
},
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $name: ident: $f:expr => $e:expr) => (
tap!($i, $name: call!($f) => $e);
);
);
/// `pair!(I -> IResult<I,O>, I -> IResult<I,P>) => I -> IResult<I, (O,P)>`
/// pair(X,Y), returns (x,y)
///
#[macro_export]
macro_rules! pair(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
$crate::IResult::Done(i2, (o1, o2))
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
pair!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
pair!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
pair!($i, call!($f), call!($g));
);
);
/// `separated_pair!(I -> IResult<I,O>, I -> IResult<I, T>, I -> IResult<I,P>) => I -> IResult<I, (O,P)>`
/// separated_pair(X,sep,Y) returns (x,y)
#[macro_export]
macro_rules! separated_pair(
($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
separated_pair1!(i1, o1, $($rest)*)
}
}
}
);
($i:expr, $f:expr, $($rest:tt)+) => (
separated_pair!($i, call!($f), $($rest)*);
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! separated_pair1(
($i:expr, $res1:ident, $submac2:ident!( $($args2:tt)* ), $($rest:tt)+) => (
{
match $submac2!($i, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,_) => {
separated_pair2!(i2, $res1, $($rest)*)
}
}
}
);
($i:expr, $res1:ident, $g:expr, $($rest:tt)+) => (
separated_pair1!($i, $res1, call!($g), $($rest)*);
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! separated_pair2(
($i:expr, $res1:ident, $submac3:ident!( $($args3:tt)* )) => (
{
match $submac3!($i, $($args3)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i3,o3) => {
$crate::IResult::Done(i3, ($res1, o3))
}
}
}
);
($i:expr, $res1:ident, $h:expr) => (
separated_pair2!($i, $res1, call!($h));
);
);
/// `preceded!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, O>`
/// preceded(opening, X) returns X
#[macro_export]
macro_rules! preceded(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,_) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
$crate::IResult::Done(i2, o2)
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
preceded!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
preceded!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
preceded!($i, call!($f), call!($g));
);
);
/// `terminated!(I -> IResult<I,O>, I -> IResult<I,T>) => I -> IResult<I, O>`
/// terminated(X, closing) returns X
#[macro_export]
macro_rules! terminated(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,_) => {
$crate::IResult::Done(i2, o1)
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
terminated!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
terminated!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
terminated!($i, call!($f), call!($g));
);
);
/// `delimited!(I -> IResult<I,T>, I -> IResult<I,O>, I -> IResult<I,U>) => I -> IResult<I, O>`
/// delimited(opening, X, closing) returns X
#[macro_export]
macro_rules! delimited(
($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,_) => {
delimited1!(i1, $($rest)*)
}
}
}
);
($i:expr, $f:expr, $($rest:tt)+) => (
delimited!($i, call!($f), $($rest)*);
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! delimited1(
($i:expr, $submac2:ident!( $($args2:tt)* ), $($rest:tt)+) => (
{
match $submac2!($i, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
delimited2!(i2, o2, $($rest)*)
}
}
}
);
($i:expr, $g:expr, $($rest:tt)+) => (
delimited1!($i, call!($g), $($rest)*);
);
);
/// Internal parser, do not use directly
#[macro_export]
macro_rules! delimited2(
($i:expr, $res2:ident, $submac3:ident!( $($args3:tt)* )) => (
{
match $submac3!($i, $($args3)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i3,_) => {
$crate::IResult::Done(i3, $res2)
}
}
}
);
($i:expr, $res2:ident, $h:expr) => (
delimited2!($i, $res2, call!($h));
);
);
/// `separated_list!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// separated_list(sep, X) returns Vec<X>
#[macro_export]
macro_rules! separated_list(
($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
// get the first element
match $submac!(input, $($args2)*) {
$crate::IResult::Error(_) => $crate::IResult::Done(input, Vec::new()),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i,o) => {
if i.len() == input.len() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::SeparatedList,input))
} else {
res.push(o);
input = i;
loop {
// get the separator first
if let $crate::IResult::Done(i2,_) = $sep!(input, $($args)*) {
if i2.len() == input.len() {
break;
}
input = i2;
// get the element next
if let $crate::IResult::Done(i3,o3) = $submac!(input, $($args2)*) {
if i3.len() == input.len() {
break;
}
res.push(o3);
input = i3;
} else {
break;
}
} else {
break;
}
}
$crate::IResult::Done(input, res)
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
separated_list!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
separated_list!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
separated_list!($i, call!($f), call!($g));
);
);
/// `separated_nonempty_list!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// separated_nonempty_list(sep, X) returns Vec<X>
#[macro_export]
macro_rules! separated_nonempty_list(
($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
// get the first element
match $submac!(input, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i,o) => {
if i.len() == input.len() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::SeparatedNonEmptyList,input))
} else {
res.push(o);
input = i;
loop {
if let $crate::IResult::Done(i2,_) = $sep!(input, $($args)*) {
if i2.len() == input.len() {
break;
}
input = i2;
if let $crate::IResult::Done(i3,o3) = $submac!(input, $($args2)*) {
if i3.len() == input.len() {
break;
}
res.push(o3);
input = i3;
} else {
break;
}
} else {
break;
}
}
$crate::IResult::Done(input, res)
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
separated_nonempty_list!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
separated_nonempty_list!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
separated_nonempty_list!($i, call!($f), call!($g));
);
);
/// `many0!(I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// Applies the parser 0 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!(multi<&[u8], Vec<&[u8]> >, many0!( tag!( "abcd" ) ) );
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
/// assert_eq!(multi(&a[..]), Done(&b"ef"[..], res));
/// assert_eq!(multi(&b[..]), Done(&b"azerty"[..], Vec::new()));
/// # }
/// ```
/// 0 or more
#[macro_export]
macro_rules! many0(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
while let $crate::IResult::Done(i,o) = $submac!(input, $($args)*) {
if i.len() == input.len() {
break;
}
res.push(o);
input = i;
}
$crate::IResult::Done(input, res)
}
);
($i:expr, $f:expr) => (
many0!($i, call!($f));
);
);
/// `many1!(I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// Applies the parser 1 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done, Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(multi<&[u8], Vec<&[u8]> >, many1!( tag!( "abcd" ) ) );
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
/// assert_eq!(multi(&a[..]), Done(&b"ef"[..], res));
/// assert_eq!(multi(&b[..]), Error(Position(ErrorKind::Many1,&b[..])));
/// # }
/// ```
#[macro_export]
macro_rules! many1(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
while let $crate::IResult::Done(i,o) = $submac!(input, $($args)*) {
if i.len() == input.len() {
break;
}
res.push(o);
input = i;
}
if res.is_empty() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Many1,$i))
} else {
$crate::IResult::Done(input, res)
}
}
);
($i:expr, $f:expr) => (
many1!($i, call!($f));
);
);
/// `count!(I -> IResult<I,O>, nb) => I -> IResult<I, Vec<O>>`
/// Applies the child parser a specified number of times
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(counter< Vec<&[u8]> >, count!( tag!( "abcd" ), 2 ) );
///
/// let a = b"abcdabcdabcdef";
/// let b = b"abcdefgh";
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
///
/// assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
/// assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! count(
($i:expr, $submac:ident!( $($args:tt)* ), $count: expr) => (
{
let mut input = $i;
let mut res = Vec::with_capacity($count);
let mut cnt: usize = 0;
let mut err = false;
loop {
if cnt == $count {
break
}
match $submac!(input, $($args)*) {
$crate::IResult::Done(i,o) => {
res.push(o);
input = i;
cnt = cnt + 1;
},
$crate::IResult::Error(_) => {
err = true;
break;
},
$crate::IResult::Incomplete(_) => {
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Count,$i))
} else if cnt == $count {
$crate::IResult::Done(input, res)
} else {
$crate::IResult::Incomplete($crate::Needed::Unknown)
}
}
);
($i:expr, $f:expr, $count: expr) => (
count!($i, call!($f), $count);
);
);
/// `count_fixed!(O, I -> IResult<I,O>, nb) => I -> IResult<I, [O; nb]>`
/// Applies the child parser a fixed number of times and returns a fixed size array
/// The type must be specified and it must be `Copy`
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(counter< [&[u8]; 2] >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
/// // can omit the type specifier if returning slices
/// // named!(counter< [&[u8]; 2] >, count_fixed!( tag!( "abcd" ), 2 ) );
///
/// let a = b"abcdabcdabcdef";
/// let b = b"abcdefgh";
/// let res = [&b"abcd"[..], &b"abcd"[..]];
///
/// assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
/// assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! count_fixed (
($i:expr, $typ:ty, $submac:ident!( $($args:tt)* ), $count: expr) => (
{
let mut input = $i;
// `$typ` must be Copy, and thus having no destructor, this is panic safe
let mut res: [$typ; $count] = unsafe{[::std::mem::uninitialized(); $count as usize]};
let mut cnt: usize = 0;
let mut err = false;
loop {
if cnt == $count {
break
}
match $submac!(input, $($args)*) {
$crate::IResult::Done(i,o) => {
res[cnt] = o;
input = i;
cnt = cnt + 1;
},
$crate::IResult::Error(_) => {
err = true;
break;
},
$crate::IResult::Incomplete(_) => {
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Count,$i))
} else if cnt == $count {
$crate::IResult::Done(input, res)
} else {
$crate::IResult::Incomplete($crate::Needed::Unknown)
}
}
);
($i:expr, $typ: ty, $f:ident, $count: expr) => (
count_fixed!($i, $typ, call!($f), $count);
);
);
/// `length_value!(I -> IResult<I, nb>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// gets a number from the first parser, then applies the second parser that many times
#[macro_export]
macro_rules! length_value(
($i:expr, $f:expr, $g:expr) => (
{
match $f($i) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,nb) => {
let length_token = $i.len() - i1.len();
let mut input = i1;
let mut res = Vec::new();
let mut err = false;
let mut inc = $crate::Needed::Unknown;
loop {
if res.len() == nb as usize {
break;
}
match $g(input) {
$crate::IResult::Done(i2,o2) => {
res.push(o2);
input = i2;
},
$crate::IResult::Error(_) => {
err = true;
},
$crate::IResult::Incomplete(a) => {
inc = a;
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::LengthValue,$i))
} else if res.len() < nb as usize {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(length) => $crate::IResult::Incomplete($crate::Needed::Size(length_token + nb as usize * length))
}
} else {
$crate::IResult::Done(input, res)
}
}
}
}
);
($i:expr, $f:expr, $g:expr, $length:expr) => (
{
match $f($i) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,nb) => {
let length_token = $i.len() - i1.len();
let mut input = i1;
let mut res = Vec::new();
let mut err = false;
let mut inc = $crate::Needed::Unknown;
loop {
if res.len() == nb as usize {
break;
}
match $g(input) {
$crate::IResult::Done(i2,o2) => {
res.push(o2);
input = i2;
},
$crate::IResult::Error(_) => {
err = true;
},
$crate::IResult::Incomplete(a) => {
inc = a;
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::LengthValue,$i))
} else if res.len() < nb as usize {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(_) => $crate::IResult::Incomplete($crate::Needed::Size(length_token + nb as usize * $length))
}
} else {
$crate::IResult::Done(input, res)
}
}
}
}
);
);
#[cfg(test)]
mod tests {
use internal::{Needed,IResult,Err};
use internal::IResult::*;
use internal::Err::*;
use util::ErrorKind;
// reproduce the tag and take macros, because of module import order
macro_rules! tag (
($i:expr, $inp: expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
let res : $crate::IResult<&[u8],&[u8]> = if bytes.len() > $i.len() {
$crate::IResult::Incomplete($crate::Needed::Size(bytes.len()))
} else if &$i[0..bytes.len()] == bytes {
$crate::IResult::Done(&$i[bytes.len()..], &$i[0..bytes.len()])
} else {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Tag, $i))
};
res
}
);
);
macro_rules! take(
($i:expr, $count:expr) => (
{
let cnt = $count as usize;
let res:$crate::IResult<&[u8],&[u8]> = if $i.len() < cnt {
$crate::IResult::Incomplete($crate::Needed::Size(cnt))
} else {
$crate::IResult::Done(&$i[cnt..],&$i[0..cnt])
};
res
}
);
);
mod pub_named_mod {
named!(pub tst, tag!("abcd"));
}
#[test]
fn pub_named_test() {
let a = &b"abcd"[..];
let res = pub_named_mod::tst(a);
assert_eq!(res, Done(&b""[..], a));
}
#[test]
fn apply_test() {
fn sum2(a:u8, b:u8) -> u8 { a + b }
fn sum3(a:u8, b:u8, c:u8) -> u8 { a + b + c }
let a = apply!(1, sum2, 2);
let b = apply!(1, sum3, 2, 3);
assert_eq!(a, 3);
assert_eq!(b, 6);
}
#[derive(PartialEq,Eq,Debug)]
struct B {
a: u8,
b: u8
}
#[test]
fn chain2() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
tag!("abcd") ~
tag!("abcd")? ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[test]
fn nested_chain() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
chain!(
tag!("abcd") ~
tag!("abcd")? ,
|| {}
) ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[derive(PartialEq,Eq,Debug)]
struct C {
a: u8,
b: Option<u8>
}
#[test]
fn chain_mut() {
fn ret_b1_2(i:&[u8]) -> IResult<&[u8], B> { Done(i,B{a:1,b:2}) };
named!(f<&[u8],B>,
chain!(
tag!("abcd") ~
tag!("abcd")? ~
tag!("efgh") ~
mut bb: ret_b1_2 ~
tag!("efgh") ,
||{
bb.b = 3;
bb
}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 3}));
}
#[test]
fn chain_opt() {
named!(y, tag!("efgh"));
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
named!(ret_y<&[u8], u8>, map!(y, |_| 2));
named!(f<&[u8],C>,
chain!(
tag!("abcd") ~
aa: ret_int1 ~
bb: ret_y? ,
||{C{a: aa, b: bb}}
)
);
let r = f(&b"abcdefghX"[..]);
assert_eq!(r, Done(&b"X"[..], C{a: 1, b: Some(2)}));
let r2 = f(&b"abcdWXYZ"[..]);
assert_eq!(r2, Done(&b"WXYZ"[..], C{a: 1, b: None}));
let r3 = f(&b"abcdX"[..]);
assert_eq!(r3, Incomplete(Needed::Size(8)));
}
use util::{error_to_list, add_error_pattern, print_error};
fn error_to_string<P>(e: &Err<P>) -> &'static str {
let v:Vec<ErrorKind> = error_to_list(e);
// do it this way if you can use slice patterns
/*
match &v[..] {
[ErrorKind::Custom(42), ErrorKind::Tag] => "missing `ijkl` tag",
[ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] => "missing `mnop` tag after `ijkl`",
_ => "unrecognized error"
}
*/
if &v[..] == [ErrorKind::Custom(42),ErrorKind::Tag] {
"missing `ijkl` tag"
} else if &v[..] == [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] {
"missing `mnop` tag after `ijkl`"
} else {
"unrecognized error"
}
}
// do it this way if you can use box patterns
/*use std::str;
fn error_to_string(e:Err) -> String
match e {
NodePosition(ErrorKind::Custom(42), i1, box Position(ErrorKind::Tag, i2)) => {
format!("missing `ijkl` tag, found '{}' instead", str::from_utf8(i2).unwrap())
},
NodePosition(ErrorKind::Custom(42), i1, box NodePosition(ErrorKind::Custom(128), i2, box Position(ErrorKind::Tag, i3))) => {
format!("missing `mnop` tag after `ijkl`, found '{}' instead", str::from_utf8(i3).unwrap())
},
_ => "unrecognized error".to_string()
}
}*/
use std::collections;
#[test]
fn err() {
named!(err_test, alt!(
tag!("abcd") |
preceded!(tag!("efgh"), error!(ErrorKind::Custom(42),
chain!(
tag!("ijkl") ~
res: error!(ErrorKind::Custom(128), tag!("mnop")) ,
|| { res }
)
)
)
));
let a = &b"efghblah"[..];
let b = &b"efghijklblah"[..];
let c = &b"efghijklmnop"[..];
let blah = &b"blah"[..];
let res_a = err_test(a);
let res_b = err_test(b);
let res_c = err_test(c);
assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..], Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah)))))));
assert_eq!(res_c, Done(&b""[..], &b"mnop"[..]));
// Merr-like error matching
let mut err_map = collections::HashMap::new();
assert!(add_error_pattern(&mut err_map, err_test(&b"efghpouet"[..]), "missing `ijkl` tag"));
assert!(add_error_pattern(&mut err_map, err_test(&b"efghijklpouet"[..]), "missing `mnop` tag after `ijkl`"));
let res_a2 = res_a.clone();
match res_a {
Error(e) => {
assert_eq!(error_to_list(&e), [ErrorKind::Custom(42), ErrorKind::Tag]);
assert_eq!(error_to_string(&e), "missing `ijkl` tag");
assert_eq!(err_map.get(&error_to_list(&e)), Some(&"missing `ijkl` tag"));
},
_ => panic!()
};
let res_b2 = res_b.clone();
match res_b {
Error(e) => {
assert_eq!(error_to_list(&e), [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag]);
assert_eq!(error_to_string(&e), "missing `mnop` tag after `ijkl`");
assert_eq!(err_map.get(&error_to_list(&e)), Some(&"missing `mnop` tag after `ijkl`"));
},
_ => panic!()
};
print_error(a, res_a2);
print_error(b, res_b2);
}
#[test]
fn add_err() {
named!(err_test,
preceded!(tag!("efgh"), add_error!(ErrorKind::Custom(42),
chain!(
tag!("ijkl") ~
res: add_error!(ErrorKind::Custom(128), tag!("mnop")) ,
|| { res }
)
)
));
let a = &b"efghblah"[..];
let b = &b"efghijklblah"[..];
let c = &b"efghijklmnop"[..];
let blah = &b"blah"[..];
let res_a = err_test(a);
let res_b = err_test(b);
let res_c = err_test(c);
assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..], Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah)))))));
assert_eq!(res_c, Done(&b""[..], &b"mnop"[..]));
}
#[test]
fn complete() {
named!(err_test,
chain!(
tag!("ijkl") ~
res: complete!(tag!("mnop")) ,
|| { res }
)
);
let a = &b"ijklmn"[..];
let res_a = err_test(a);
assert_eq!(res_a, Error(Position(ErrorKind::Complete, &b"mn"[..])));
}
#[test]
fn alt() {
fn work(input: &[u8]) -> IResult<&[u8],&[u8], &'static str> {
Done(&b""[..], input)
}
#[allow(unused_variables)]
fn dont_work(input: &[u8]) -> IResult<&[u8],&[u8],&'static str> {
Error(Code(ErrorKind::Custom("abcd")))
}
fn work2(input: &[u8]) -> IResult<&[u8],&[u8], &'static str> {
Done(input, &b""[..])
}
fn alt1(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | dont_work)
}
fn alt2(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | work)
}
fn alt3(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | dont_work | work2 | dont_work)
}
//named!(alt1, alt!(dont_work | dont_work));
//named!(alt2, alt!(dont_work | work));
//named!(alt3, alt!(dont_work | dont_work | work2 | dont_work));
let a = &b"abcd"[..];
assert_eq!(alt1(a), Error(Position(ErrorKind::Alt, a)));
assert_eq!(alt2(a), Done(&b""[..], a));
assert_eq!(alt3(a), Done(a, &b""[..]));
named!(alt4, alt!(tag!("abcd") | tag!("efgh")));
let b = &b"efgh"[..];
assert_eq!(alt4(a), Done(&b""[..], a));
assert_eq!(alt4(b), Done(&b""[..], b));
// test the alternative syntax
named!(alt5<bool>, alt!(tag!("abcd") => { |_| false } | tag!("efgh") => { |_| true }));
assert_eq!(alt5(a), Done(&b""[..], false));
assert_eq!(alt5(b), Done(&b""[..], true));
}
#[test]
fn alt_incomplete() {
named!(alt1, alt!(tag!("a") | tag!("bc") | tag!("def")));
let a = &b""[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(1)));
let a = &b"b"[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(2)));
let a = &b"bcd"[..];
assert_eq!(alt1(a), Done(&b"d"[..], &b"bc"[..]));
let a = &b"cde"[..];
assert_eq!(alt1(a), Error(Position(ErrorKind::Alt, a)));
let a = &b"de"[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(3)));
let a = &b"defg"[..];
assert_eq!(alt1(a), Done(&b"g"[..], &b"def"[..]));
}
#[test]
fn switch() {
named!(sw,
switch!(take!(4),
b"abcd" => take!(2) |
b"efgh" => take!(4)
)
);
let a = &b"abcdefgh"[..];
assert_eq!(sw(a), Done(&b"gh"[..], &b"ef"[..]));
let b = &b"efghijkl"[..];
assert_eq!(sw(b), Done(&b""[..], &b"ijkl"[..]));
let c = &b"afghijkl"[..];
assert_eq!(sw(c), Error(Position(ErrorKind::Switch, &b"ijkl"[..])));
}
#[test]
fn opt() {
named!(o<&[u8],Option<&[u8]> >, opt!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"bcdefg"[..];
assert_eq!(o(a), Done(&b"ef"[..], Some(&b"abcd"[..])));
assert_eq!(o(b), Done(&b"bcdefg"[..], None));
}
#[test]
fn opt_res() {
named!(o<&[u8], Result<&[u8], Err<&[u8]>> >, opt_res!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"bcdefg"[..];
assert_eq!(o(a), Done(&b"ef"[..], Ok(&b"abcd"[..])));
assert_eq!(o(b), Done(&b"bcdefg"[..], Err(Position(ErrorKind::Tag, b))));
}
#[test]
fn cond() {
let b = true;
let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b, tag!("abcd") ) ));
let a = b"abcdef";
assert_eq!(f(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
let b2 = false;
let f2: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b2, tag!("abcd") ) ));
//let f2 = closure!(&'static [u8], cond!( b2, tag!("abcd") ) );
assert_eq!(f2(&a[..]), Done(&b"abcdef"[..], None));
}
#[test]
fn cond_wrapping() {
// Test that cond!() will wrap a given identifier in the call!() macro.
named!(silly, tag!("foo"));
let b = true;
//let f = closure!(&'static [u8], cond!( b, silly ) );
let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b, silly ) ));
assert_eq!(f(b"foobar"), Done(&b"bar"[..], Some(&b"foo"[..])));
}
#[test]
fn peek() {
named!(ptag<&[u8],&[u8]>, peek!(tag!("abcd")));
let r1 = ptag(&b"abcdefgh"[..]);
assert_eq!(r1, Done(&b"abcdefgh"[..], &b"abcd"[..]));
let r1 = ptag(&b"efgh"[..]);
assert_eq!(r1, Error(Position(ErrorKind::Tag,&b"efgh"[..])));
}
#[test]
fn pair() {
named!(p<&[u8],(&[u8], &[u8])>, pair!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], (&b"abcd"[..], &b"efgh"[..])));
}
#[test]
fn separated_pair() {
named!(p<&[u8],(&[u8], &[u8])>, separated_pair!(tag!("abcd"), tag!(","), tag!("efgh")));
let r1 = p(&b"abcd,efghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], (&b"abcd"[..], &b"efgh"[..])));
}
#[test]
fn preceded() {
named!(p<&[u8], &[u8]>, preceded!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], &b"efgh"[..]));
}
#[test]
fn terminated() {
named!(p<&[u8], &[u8]>, terminated!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], &b"abcd"[..]));
}
#[test]
fn delimited() {
named!(p<&[u8], &[u8]>, delimited!(tag!("abcd"), tag!("efgh"), tag!("ij")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"kl"[..], &b"efgh"[..]));
}
#[test]
fn separated_list() {
named!(multi<&[u8],Vec<&[u8]> >, separated_list!(tag!(","), tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcd,abcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Done(&b"azerty"[..], Vec::new()));
}
#[test]
fn separated_nonempty_list() {
named!(multi<&[u8],Vec<&[u8]> >, separated_nonempty_list!(tag!(","), tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcd,abcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Error(Position(ErrorKind::Tag,c)));
}
#[test]
fn many0() {
named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Done(&b"azerty"[..], Vec::new()));
}
#[cfg(feature = "nightly")]
use test::Bencher;
#[cfg(feature = "nightly")]
#[bench]
fn many0_bench(b: &mut Bencher) {
named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd")));
b.iter(|| {
multi(&b"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"[..])
});
}
#[test]
fn many1() {
named!(multi<&[u8],Vec<&[u8]> >, many1!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Error(Position(ErrorKind::Many1,c)));
}
#[test]
fn infinite_many() {
fn tst(input: &[u8]) -> IResult<&[u8], &[u8]> {
println!("input: {:?}", input);
Error(Position(ErrorKind::Custom(0),input))
}
// should not go into an infinite loop
named!(multi0<&[u8],Vec<&[u8]> >, many0!(tst));
let a = &b"abcdef"[..];
assert_eq!(multi0(a), Done(a, Vec::new()));
named!(multi1<&[u8],Vec<&[u8]> >, many1!(tst));
let a = &b"abcdef"[..];
assert_eq!(multi1(a), Error(Position(ErrorKind::Many1,a)));
}
#[test]
fn count() {
fn counter(input: &[u8]) -> IResult<&[u8], Vec<&[u8]>> {
let size: usize = 2;
count!(input, tag!( "abcd" ), size )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
#[test]
fn count_zero() {
fn counter(input: &[u8]) -> IResult<&[u8], Vec<&[u8]>> {
let size: usize = 0;
count!(input, tag!( "abcd" ), size )
}
let a = b"abcdabcdabcdef";
let res: Vec<&[u8]> = Vec::new();
assert_eq!(counter(&a[..]), Done(&b"abcdabcdabcdef"[..], res));
}
#[test]
fn count_fixed() {
//named!(counter< [&[u8]; 2], u32 >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
fn counter(input:&[u8]) -> IResult<&[u8], [&[u8]; 2], () > {
count_fixed!(input, &[u8], tag!( "abcd" ), 2 )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = [&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
use nom::{le_u16,eof};
#[allow(dead_code)]
pub fn compile_count_fixed(input: &[u8]) -> IResult<&[u8], ()> {
chain!(input,
tag!("abcd") ~
count_fixed!( u16, le_u16, 4 ) ~
eof ,
|| { () }
)
}
#[test]
fn count_fixed_no_type() {
//named!(counter< [&[u8]; 2], u32 >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
fn counter(input:&[u8]) -> IResult<&[u8], [&[u8]; 2], () > {
count_fixed!(input, &[u8], tag!( "abcd" ), 2 )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = [&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
use nom::{be_u8,be_u16};
#[test]
fn length_value_test() {
named!(tst1<&[u8], Vec<u16> >, length_value!(be_u8, be_u16));
named!(tst2<&[u8], Vec<u16> >, length_value!(be_u8, be_u16, 2));
let i1 = vec![0, 5, 6];
let i2 = vec![1, 5, 6, 3];
let i3 = vec![2, 5, 6, 3];
let i4 = vec![2, 5, 6, 3, 4, 5, 7];
let i5 = vec![3, 5, 6, 3, 4, 5];
let r1: Vec<u16> = Vec::new();
let r2: Vec<u16> = vec![1286];
let r4: Vec<u16> = vec![1286, 772];
assert_eq!(tst1(&i1), IResult::Done(&i1[1..], r1));
assert_eq!(tst1(&i2), IResult::Done(&i2[3..], r2));
assert_eq!(tst1(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst1(&i4), IResult::Done(&i4[5..], r4));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
let r6: Vec<u16> = Vec::new();
let r7: Vec<u16> = vec![1286];
let r9: Vec<u16> = vec![1286, 772];
assert_eq!(tst2(&i1), IResult::Done(&i1[1..], r6));
assert_eq!(tst2(&i2), IResult::Done(&i2[3..], r7));
assert_eq!(tst2(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst2(&i4), IResult::Done(&i4[5..], r9));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
}
#[test]
fn chain_incomplete() {
let res = chain!(&b"abcdefgh"[..],
a: take!(4) ~
b: take!(8),
||{(a,b )}
);
assert_eq!(res, IResult::Incomplete(Needed::Size(12)));
}
}
|
extern crate collections;
use std::fmt::Debug;
use internal::*;
use internal::IResult::*;
#[macro_export]
macro_rules! closure (
($ty:ty, $submac:ident!( $($args:tt)* )) => (
|i: $ty| { $submac!(i, $($args)*) }
);
($submac:ident!( $($args:tt)* )) => (
|i| { $submac!(i, $($args)*) }
);
);
#[macro_export]
macro_rules! named (
($name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> $o {
$submac!(i, $($args)*)
}
);
($name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> IResult<$i, $o> {
$submac!(i, $($args)*)
}
);
($name:ident, $submac:ident!( $($args:tt)* )) => (
fn $name( i: &[u8] ) -> IResult<&[u8], &[u8]> {
$submac!(i, $($args)*)
}
);
);
#[macro_export]
macro_rules! call (
($i:expr, $fun:expr) => ( $fun( $i ) );
);
/// declares a byte array as a suite to recognize
///
/// consumes the recognized characters
///
/// ```ignore
/// tag!(x "abcd");
/// let r = Done((), b"abcdabcdefgh").flat_map(x);
/// assert_eq!(r, Done(b"efgh", b"abcd"));
/// ```
#[macro_export]
macro_rules! tag (
($i:expr, $inp: expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
if bytes.len() > $i.len() {
Incomplete(Needed::Size(bytes.len() as u32))
} else if &$i[0..bytes.len()] == bytes {
Done(&$i[bytes.len()..], &$i[0..bytes.len()])
} else {
Error(0)
}
}
);
);
#[macro_export]
macro_rules! map(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
Error(ref e) => Error(*e),
Incomplete(Needed::Unknown) => Incomplete(Needed::Unknown),
Incomplete(Needed::Size(i)) => Incomplete(Needed::Size(i)),
Done(i, o) => Done(i, $submac2!(o, $($args2)*))
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $g:expr) => (
map!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map!($i, call!($f), $submac!($($args)*));
);
);
#[macro_export]
macro_rules! map_res(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
Error(ref e) => Error(*e),
Incomplete(Needed::Unknown) => Incomplete(Needed::Unknown),
Incomplete(Needed::Size(i)) => Incomplete(Needed::Size(i)),
Done(i, o) => match $submac2!(o, $($args2)*) {
Ok(output) => Done(i, output),
Err(_) => Error(0)
}
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_res!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $g:expr) => (
map_res!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_res!($i, call!($f), $submac!($($args)*));
);
);
#[macro_export]
macro_rules! map_opt(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
Error(ref e) => Error(*e),
Incomplete(Needed::Unknown) => Incomplete(Needed::Unknown),
Incomplete(Needed::Size(i)) => Incomplete(Needed::Size(i)),
Done(i, o) => match $submac2!(o, $($args2)*) {
Some(output) => Done(i, output),
None => Error(0)
}
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_opt!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $g:expr) => (
map_opt!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_opt!($i, call!($f), $submac!($($args)*));
);
);
/// chains parsers and assemble the results through a closure
///
/// ```ignore
/// #[derive(PartialEq,Eq,Debug)]
/// struct B {
/// a: u8,
/// b: Option<u8>
/// }
///
/// tag!(x "abcd");
/// tag!(y "efgh");
///
/// fn ret_int(i:&[u8]) -> IResult<&[u8], u8> { Done(i, 1) };
/// fn ret_y(i:&[u8]) -> IResult<&[u8], u8> { y(i).map(|_| 1) }; // return 1 if the "efgh" tag is found
///
/// chain!(z<&[u8], u8>,
/// x ~
/// aa: ret_int ~ // the result of that parser will be used in the closure
/// x? ~ // this parser is optional
/// bb: ret_y? , // the result of that parser is an option
/// ||{B{a: aa, b: bb}}
/// );
///
/// // the first "abcd" tag is not present, we have an error
/// let r1 = z(b"efgh");
/// assert_eq!(r1, Error(0));
///
/// // everything is present, everything is parsed
/// let r2 = z(b"abcdabcdefgh");
/// assert_eq!(r2, Done(b"", B{a: 1, b: Some(1)}));
///
/// // the second "abcd" tag is optional
/// let r3 = z(b"abcdefgh");
/// assert_eq!(r3, Done(b"", B{a: 1, b: Some(1)}));
///
/// // the result of ret_y is optional, as seen in the B structure
/// let r4 = z(b"abcdabcd");
/// assert_eq!(r4, Done(b"", B{a: 1, b: None}));
/// ```
#[macro_export]
macro_rules! chain (
($i:expr, $($rest:tt)*) => (
chaining_parser!($i, $($rest)*)
);
);
#[macro_export]
macro_rules! chaining_parser (
($i:expr, $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, call!($e) ~ $($rest)*);
);
($i:expr, $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
match $submac!($i, $($args)*) {
IResult::Error(e) => IResult::Error(e),
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Done(i,_) => {
chaining_parser!(i, $($rest)*)
}
}
);
($i:expr, $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, call!($e) ? ~ $($rest)*);
);
($i:expr, $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => (
match $submac!($i, $($args)*) {
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Error(_) => {
chaining_parser!($i, $($rest)*)
},
IResult::Done(i,_) => {
chaining_parser!(i, $($rest)*)
}
}
);
($i:expr, $field:ident : $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $field: call!($e) ~ $($rest)*);
);
($i:expr, $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
match $submac!($i, $($args)*) {
IResult::Error(e) => IResult::Error(e),
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Done(i,o) => {
let $field = o;
chaining_parser!(i, $($rest)*)
}
}
);
($i:expr, $field:ident : $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $field : call!($e) ? ~ $($rest)*);
);
($i:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => (
match $submac!($i, $($args)*) {
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Error(_) => {
let $field = None;
chaining_parser!($i, $($rest)*)
},
IResult::Done(i,o) => {
let $field = Some(o);
chaining_parser!(i, $($rest)*)
}
}
);
// ending the chain
($i:expr, $e:ident, $assemble:expr) => (
chaining_parser!($i, call!($e), $assemble);
);
($i:expr, $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
IResult::Error(e) => IResult::Error(e),
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Done(i,_) => {
IResult::Done(i, $assemble())
}
}
);
($i:expr, $e:ident ?, $assemble:expr) => (
chaining_parser!($i, call!($e) ?, $assemble);
);
($i:expr, $submac:ident!( $($args:tt)* ) ?, $assemble:expr) => (
match $submac!($i, $($args)*) {
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Error(_) => {
IResult::Done($i, $assemble())
},
IResult::Done(i,_) => {
IResult::Done(i, $assemble())
}
}
);
($i:expr, $field:ident : $e:ident, $assemble:expr) => (
chaining_parser!($i, $field: call!($e), $assemble);
);
($i:expr, $field:ident : $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
IResult::Error(e) => IResult::Error(e),
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Done(i,o) => {
let $field = o;
IResult::Done(i, $assemble())
}
}
);
($i:expr, $field:ident : $e:ident ? , $assemble:expr) => (
chaining_parser!($i, $field : call!($e) ? , $assemble);
);
($i:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? , $assemble:expr) => (
match $submac!($i, $($args)*) {
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Error(_) => {
let $field = None;
IResult::Done($i, $assemble())
},
IResult::Done(i,o) => {
let $field = Some(o);
IResult::Done(i, $assemble())
}
}
);
($i:expr, $assemble:expr) => (
IResult::Done($i, $assemble())
)
);
/// try a list of parser, return the result of the first successful one
///
/// Incomplete results are ignored
///
/// ```ignore
/// tag!(x "abcd");
/// tag!(y "efgh");
/// named!(test, alt!(x | y));
/// let r1 = test(b"abcdefgh"));
/// assert_eq!(r1, Done(b"efgh", b"abcd"));
/// let r2 = test(b"efghijkl"));
/// assert_eq!(r2, Done(b"ijkl", b"efgh"));
/// ```
#[macro_export]
macro_rules! alt (
($i:expr, $($rest:tt)*) => (
{
alt_parser!($i, $($rest)*)
}
);
);
#[macro_export]
macro_rules! alt_parser (
($i:expr, $e:ident | $($rest:tt)*) => (
alt_parser!($i, call!($e) | $($rest)*);
);
($i:expr, $submac:ident!( $($args:tt)*) | $($rest:tt)*) => (
{
match $submac!($i, $($args)*) {
IResult::Error(_) => alt_parser!($i, $($rest)*),
IResult::Incomplete(_) => alt_parser!($i, $($rest)*),
IResult::Done(i,o) => IResult::Done(i,o)
}
}
);
($i:expr, $e:ident) => (
alt_parser!($i, call!($e));
);
($i:expr, $submac:ident!( $($args:tt)*)) => (
match $submac!($i, $($args)*) {
IResult::Error(_) => alt_parser!($i),
IResult::Incomplete(_) => alt_parser!($i),
IResult::Done(i,o) => IResult::Done(i,o)
}
);
($i:expr) => (
IResult::Error(1)
);
);
/// returns the longest list of bytes that do not appear in the provided array
///
/// ```ignore
/// is_not!(not_space b" \t\r\n");
/// let r = not_space(b"abcdefgh\nijkl"));
/// assert_eq!(r, Done(b"\nijkl", b"abcdefgh"));
/// ```
#[macro_export]
macro_rules! is_not(
($input:expr, $arr:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $arr;
let bytes = as_bytes(&expected);
for idx in 0..$input.len() {
for &i in bytes.iter() {
if $input[idx] == i {
return IResult::Done(&$input[idx..], &$input[0..idx])
}
}
}
IResult::Done(b"", $input)
}
);
);
/// returns the longest list of bytes that appear in the provided array
///
/// ```ignore
/// is_a!(abcd b"abcd");
/// let r1 = abcd(b"aaaaefgh"));
/// assert_eq!(r1, Done(b"efgh", b"aaaa"));
///
/// let r2 = abcd(b"dcbaefgh"));
/// assert_eq!(r2, Done(b"efgh", b"dcba"));
/// ```
#[macro_export]
macro_rules! is_a(
($input:expr, $arr:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $arr;
let bytes = as_bytes(&expected);
for idx in 0..$input.len() {
let mut res = false;
for &i in bytes.iter() {
if $input[idx] == i {
res = true;
break;
}
}
if !res {
return IResult::Done(&$input[idx..], &$input[0..idx])
}
}
IResult::Done(b"", $input)
}
);
);
/// returns the longest list of bytes until the provided parser fails
///
/// ```ignore
/// filter!(alpha is_alphanumeric);
/// let r = alpha(b"abcd\nefgh"));
/// assert_eq!(r, Done(b"\nefgh", b"abcd"));
/// ```
#[macro_export]
macro_rules! filter(
($input:expr, $arr:expr) => (
filter!($i, call!($f));
);
($input:expr, $submac:ident!( $($args:tt)* )) => (
{
for idx in 0..$input.len() {
if !$submac!($input[idx], $($args)*) {
return IResult::Done(&$input[idx..], &$input[0..idx])
}
}
IResult::Done(b"", $input)
}
);
);
/// make the underlying parser optional
///
/// returns an Option of the returned type
///
/// ```ignore
/// tag!(x "abcd");
/// opt!(o<&[u8],&[u8]> x);
///
/// let a = b"abcdef";
/// let b = b"bcdefg";
/// assert_eq!(o(a), Done(b"ef", Some(b"abcd")));
/// assert_eq!(o(b), Done(b"bcdefg", None));
/// ```
#[macro_export]
macro_rules! opt(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
IResult::Done(i,o) => IResult::Done(i, Some(o)),
IResult::Error(_) => IResult::Done($i, None),
IResult::Incomplete(i) => IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
opt($i, call($f));
);
);
/// returns a result without consuming the input
///
/// the embedded parser may return Incomplete
///
/// ```ignore
/// tag!(x "abcd");
/// peek!(ptag<&[u8], &[u8]> x);
/// let r = ptag(b"abcdefgh"));
/// assert_eq!(r, Done(b"abcdefgh", b"abcd"));
/// ```
#[macro_export]
macro_rules! peek(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
IResult::Done(i,o) => IResult::Done($i, o),
IResult::Error(a) => IResult::Error(a),
IResult::Incomplete(i) => IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
peek!($i, call!(f));
);
);
/// Applies the parser 0 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```ignore
/// tag!(x "abcd");
/// many0!(multi<&[u8],&[u8]> x);
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![b"abcd", b"abcd"];
/// assert_eq!(multi(a), Done(b"ef", res));
/// assert_eq!(multi(b), Done(b"azerty", Vec::new()));
/// ```
/// 0 or more
#[macro_export]
macro_rules! many0(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut begin = 0;
let mut remaining = $i.len();
let mut res = Vec::new();
loop {
match $submac!(&$i[begin..], $($args)*) {
IResult::Done(i,o) => {
res.push(o);
begin += remaining - i.len();
remaining = i.len();
},
_ => {
break;
}
}
}
IResult::Done(&$i[begin..], res)
}
);
($i:expr, $f:expr) => (
many0!($i, call!($f));
);
);
/// Applies the parser 0 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```ignore
/// tag!(x "abcd");
/// many1!(multi<&[u8],&[u8]> x);
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![b"abcd", b"abcd"];
/// assert_eq!(multi(a), Done(b"ef", res));
/// assert_eq!(multi(b), Error(0));
/// ```
#[macro_export]
macro_rules! many1(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut begin = 0;
let mut remaining = $i.len();
let mut res = Vec::new();
loop {
match $submac!(&$i[begin..], $($args)*) {
IResult::Done(i,o) => {
res.push(o);
begin += remaining - i.len();
remaining = i.len();
},
_ => {
break;
}
}
}
if begin == 0 {
IResult::Error(0)
} else {
IResult::Done(&$i[begin..], res)
}
}
);
($i:expr, $f:expr) => (
many1!($i, call!($f));
);
);
/// generates a parser consuming the specified number of bytes
///
/// ```ignore
/// take!(take5 5);
///
/// let a = b"abcdefgh";
///
/// assert_eq!(take5(a), Done(b"fgh", b"abcde"));
/// ```
#[macro_export]
macro_rules! take(
($i:expr, $count:expr) => (
{
if $i.len() < $count {
Incomplete(Needed::Size($count))
} else {
Done(&$i[$count..],&$i[0..$count])
}
}
);
);
/// generates a parser consuming bytes until the specified byte sequence is found
#[macro_export]
macro_rules! take_until(
($i:expr, $inp:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
for idx in 0..$i.len() {
if idx + bytes.len() > $i.len() {
return Incomplete(Needed::Size((idx + bytes.len()) as u32))
}
if &$i[idx..idx + bytes.len()] == bytes {
if idx + bytes.len() > $i.len() {
return Done(b"", &$i[0..idx])
} else {
return Done(&$i[(idx + bytes.len())..], &$i[0..idx])
}
}
}
return Error(0)
}
);
);
#[macro_export]
macro_rules! take_until_and_leave(
($i:expr, $inp:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
for idx in 0..$i.len() {
if idx + bytes.len() > $i.len() {
return Incomplete(Needed::Size((idx + bytes.len()) as u32))
}
if &$i[idx..idx+bytes.len()] == bytes {
return Done(&$i[idx..], &$i[0..idx])
}
}
return Error(0)
}
);
);
#[macro_export]
macro_rules! take_until_either(
($i:expr, $inp:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
for idx in 0..$i.len() {
if idx + 1 > $i.len() {
return Incomplete(Needed::Size(1 + idx as u32))
}
for &t in bytes.iter() {
if $i[idx] == t {
if idx + 1 > $i.len() {
return Done(b"", &$i[0..idx])
} else {
return Done(&$i[(idx+1)..], &$i[0..idx])
}
}
}
}
return Error(0)
}
);
);
#[macro_export]
macro_rules! take_until_either_and_leave(
($i:expr, $inp:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
for idx in 0..$i.len() {
if idx + 1 > $i.len() {
return Incomplete(Needed::Size(1 + idx as u32))
}
for &t in bytes.iter() {
if $i[idx] == t {
return Done(&$i[idx..], &$i[0..idx])
}
}
}
return Error(0)
}
);
);
/// returns
#[macro_export]
macro_rules! length_value(
($name:ident<$i:ty,$o:ty> $f:ident $g:ident) => (
fn $name(input:$i) -> IResult<$i, Vec<$o>> {
match $f(input) {
Error(a) => Error(a),
Incomplete(i) => Incomplete(i),
Done(i1,nb) => {
let length_token = input.len() - i1.len();
let mut begin = 0;
let mut remaining = i1.len();
let mut res: Vec<$o> = Vec::new();
loop {
if res.len() == nb as usize {
return Done(&i1[begin..], res);
}
match $g(&i1[begin..]) {
Done(i2,o2) => {
res.push(o2);
let parsed = remaining - i2.len();
begin += parsed;
remaining = i2.len();
if begin >= i1.len() {
return Incomplete(Needed::Size((length_token + nb as usize * parsed) as u32));
}
},
Error(a) => return Error(a),
Incomplete(Needed::Unknown) => {
return Incomplete(Needed::Unknown)
},
Incomplete(Needed::Size(a)) => {
return Incomplete(Needed::Size(length_token as u32 + a * nb as u32))
}
}
}
}
}
}
);
($name:ident<$i:ty,$o:ty> $f:ident $g:ident $length:expr) => (
fn $name(input:$i) -> IResult<$i, Vec<$o>> {
match $f(input) {
Error(a) => Error(a),
Incomplete(i) => Incomplete(i),
Done(i1,nb) => {
let length_token = input.len() - i1.len();
let mut begin = 0;
let mut remaining = i1.len();
let mut res: Vec<$o> = Vec::new();
loop {
if res.len() == nb as usize {
return Done(&i1[begin..], res);
}
match $g(&i1[begin..]) {
Done(i2,o2) => {
res.push(o2);
let parsed = remaining - i2.len();
begin += parsed;
remaining = i2.len();
if begin >= i1.len() {
return Incomplete(Needed::Size((length_token + nb as usize * $length) as u32));
}
},
Error(a) => return Error(a),
Incomplete(Needed::Unknown) => {
return Incomplete(Needed::Unknown)
},
Incomplete(Needed::Size(_)) => {
return Incomplete(Needed::Size(length_token as u32 + $length * nb as u32))
}
}
}
}
}
}
);
);
#[cfg(test)]
mod tests {
use super::*;
use map::*;
use internal::Needed;
use internal::IResult;
use internal::IResult::*;
#[test]
fn is_a() {
named!(a_or_b, is_a!(&b"ab"[..]));
let a = &b"abcd"[..];
assert_eq!(a_or_b(a), Done(&b"cd"[..], &b"ab"[..]));
let b = &b"bcde"[..];
assert_eq!(a_or_b(b), Done(&b"cde"[..], &b"b"[..]));
let c = &b"cdef"[..];
assert_eq!(a_or_b(c), Done(&b"cdef"[..], &b""[..]));
let d = &b"bacdef"[..];
assert_eq!(a_or_b(d), Done(&b"cdef"[..], &b"ba"[..]));
}
#[derive(PartialEq,Eq,Debug)]
struct B {
a: u8,
b: u8
}
#[test]
fn chain2() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
tag!("abcd") ~
tag!("abcd")? ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[test]
fn nested_chain() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
chain!(
tag!("abcd") ~
tag!("abcd")? ,
|| {}
) ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[derive(PartialEq,Eq,Debug)]
struct C {
a: u8,
b: Option<u8>
}
#[test]
fn chain_opt() {
named!(y, tag!("efgh"));
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_y(i:&[u8]) -> IResult<&[u8], u8> {
y(i).map(|_| 2)
};
named!(f<&[u8],C>,
chain!(
tag!("abcd") ~
aa: ret_int1 ~
bb: ret_y? ,
||{C{a: aa, b: bb}}
)
);
let r = f(&b"abcdefghX"[..]);
assert_eq!(r, Done(&b"X"[..], C{a: 1, b: Some(2)}));
let r2 = f(&b"abcdWXYZ"[..]);
assert_eq!(r2, Done(&b"WXYZ"[..], C{a: 1, b: None}));
let r3 = f(&b"abcdX"[..]);
assert_eq!(r3, Incomplete(Needed::Size(4)));
}
#[test]
fn alt() {
fn work(input: &[u8]) -> IResult<&[u8],&[u8]> {
Done(&b""[..], input)
}
#[allow(unused_variables)]
fn dont_work(input: &[u8]) -> IResult<&[u8],&[u8]> {
Error(3)
}
fn work2(input: &[u8]) -> IResult<&[u8],&[u8]> {
Done(input, &b""[..])
}
named!(alt1, alt!(dont_work | dont_work));
named!(alt2, alt!(dont_work | work));
named!(alt3, alt!(dont_work | dont_work | work2 | dont_work));
let a = &b"abcd"[..];
assert_eq!(alt1(a), Error(1));
assert_eq!(alt2(a), Done(&b""[..], a));
assert_eq!(alt3(a), Done(a, &b""[..]));
named!(alt4, alt!(tag!("abcd") | tag!("efgh")));
let b = &b"efgh"[..];
assert_eq!(alt4(a), Done(&b""[..], a));
assert_eq!(alt4(b), Done(&b""[..], b));
}
#[test]
fn opt() {
named!(o<&[u8],Option<&[u8]> >, opt!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"bcdefg"[..];
assert_eq!(o(a), Done(&b"ef"[..], Some(&b"abcd"[..])));
assert_eq!(o(b), Done(&b"bcdefg"[..], None));
}
#[test]
fn peek() {
named!(ptag<&[u8],&[u8]>, peek!(tag!("abcd")));
let r1 = ptag(&b"abcdefgh"[..]);
assert_eq!(r1, Done(&b"abcdefgh"[..], &b"abcd"[..]));
let r1 = ptag(&b"efgh"[..]);
assert_eq!(r1, Error(0));
}
#[test]
fn many0() {
named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Done(&b"azerty"[..], Vec::new()));
}
#[test]
fn many1() {
named!(multi<&[u8],Vec<&[u8]> >, many1!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Error(0));
}
#[test]
fn take_until_test() {
named!(x, take_until!("efgh"));
let r = x(&b"abcdabcdefghijkl"[..]);
assert_eq!(r, Done(&b"ijkl"[..], &b"abcdabcd"[..]));
println!("Done 1\n");
let r2 = x(&b"abcdabcdefgh"[..]);
assert_eq!(r2, Done(&b""[..], &b"abcdabcd"[..]));
println!("Done 2\n");
let r3 = x(&b"abcefg"[..]);
assert_eq!(r3, Incomplete(Needed::Size(7)));
}
use nom::{be_u8,be_u16};
#[test]
fn length_value_test() {
length_value!(tst1<&[u8], u16 > be_u8 be_u16);
length_value!(tst2<&[u8], u16 > be_u8 be_u16 2);
let i1 = vec![0, 5, 6];
let i2 = vec![1, 5, 6, 3];
let i3 = vec![2, 5, 6, 3];
let i4 = vec![2, 5, 6, 3, 4, 5, 7];
let i5 = vec![3, 5, 6, 3, 4, 5];
let r1: Vec<u16> = Vec::new();
let r2: Vec<u16> = vec![1286];
let r4: Vec<u16> = vec![1286, 772];
assert_eq!(tst1(&i1), IResult::Done(&i1[1..], r1));
assert_eq!(tst1(&i2), IResult::Done(&i2[3..], r2));
assert_eq!(tst1(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst1(&i4), IResult::Done(&i4[5..], r4));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
let r6: Vec<u16> = Vec::new();
let r7: Vec<u16> = vec![1286];
let r9: Vec<u16> = vec![1286, 772];
assert_eq!(tst2(&i1), IResult::Done(&i1[1..], r6));
assert_eq!(tst2(&i2), IResult::Done(&i2[3..], r7));
assert_eq!(tst2(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst2(&i4), IResult::Done(&i4[5..], r9));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
}
}
alt parsers can embed a closure now
extern crate collections;
use std::fmt::Debug;
use internal::*;
use internal::IResult::*;
#[macro_export]
macro_rules! closure (
($ty:ty, $submac:ident!( $($args:tt)* )) => (
|i: $ty| { $submac!(i, $($args)*) }
);
($submac:ident!( $($args:tt)* )) => (
|i| { $submac!(i, $($args)*) }
);
);
#[macro_export]
macro_rules! named (
($name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> $o {
$submac!(i, $($args)*)
}
);
($name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> IResult<$i, $o> {
$submac!(i, $($args)*)
}
);
($name:ident, $submac:ident!( $($args:tt)* )) => (
fn $name( i: &[u8] ) -> IResult<&[u8], &[u8]> {
$submac!(i, $($args)*)
}
);
);
#[macro_export]
macro_rules! call (
($i:expr, $fun:expr) => ( $fun( $i ) );
);
/// declares a byte array as a suite to recognize
///
/// consumes the recognized characters
///
/// ```ignore
/// tag!(x "abcd");
/// let r = Done((), b"abcdabcdefgh").flat_map(x);
/// assert_eq!(r, Done(b"efgh", b"abcd"));
/// ```
#[macro_export]
macro_rules! tag (
($i:expr, $inp: expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
if bytes.len() > $i.len() {
Incomplete(Needed::Size(bytes.len() as u32))
} else if &$i[0..bytes.len()] == bytes {
Done(&$i[bytes.len()..], &$i[0..bytes.len()])
} else {
Error(0)
}
}
);
);
#[macro_export]
macro_rules! map(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
Error(ref e) => Error(*e),
Incomplete(Needed::Unknown) => Incomplete(Needed::Unknown),
Incomplete(Needed::Size(i)) => Incomplete(Needed::Size(i)),
Done(i, o) => Done(i, $submac2!(o, $($args2)*))
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $g:expr) => (
map!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map!($i, call!($f), $submac!($($args)*));
);
);
#[macro_export]
macro_rules! map_res(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
Error(ref e) => Error(*e),
Incomplete(Needed::Unknown) => Incomplete(Needed::Unknown),
Incomplete(Needed::Size(i)) => Incomplete(Needed::Size(i)),
Done(i, o) => match $submac2!(o, $($args2)*) {
Ok(output) => Done(i, output),
Err(_) => Error(0)
}
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_res!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $g:expr) => (
map_res!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_res!($i, call!($f), $submac!($($args)*));
);
);
#[macro_export]
macro_rules! map_opt(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
Error(ref e) => Error(*e),
Incomplete(Needed::Unknown) => Incomplete(Needed::Unknown),
Incomplete(Needed::Size(i)) => Incomplete(Needed::Size(i)),
Done(i, o) => match $submac2!(o, $($args2)*) {
Some(output) => Done(i, output),
None => Error(0)
}
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_opt!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $g:expr) => (
map_opt!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_opt!($i, call!($f), $submac!($($args)*));
);
);
/// chains parsers and assemble the results through a closure
///
/// ```ignore
/// #[derive(PartialEq,Eq,Debug)]
/// struct B {
/// a: u8,
/// b: Option<u8>
/// }
///
/// tag!(x "abcd");
/// tag!(y "efgh");
///
/// fn ret_int(i:&[u8]) -> IResult<&[u8], u8> { Done(i, 1) };
/// fn ret_y(i:&[u8]) -> IResult<&[u8], u8> { y(i).map(|_| 1) }; // return 1 if the "efgh" tag is found
///
/// chain!(z<&[u8], u8>,
/// x ~
/// aa: ret_int ~ // the result of that parser will be used in the closure
/// x? ~ // this parser is optional
/// bb: ret_y? , // the result of that parser is an option
/// ||{B{a: aa, b: bb}}
/// );
///
/// // the first "abcd" tag is not present, we have an error
/// let r1 = z(b"efgh");
/// assert_eq!(r1, Error(0));
///
/// // everything is present, everything is parsed
/// let r2 = z(b"abcdabcdefgh");
/// assert_eq!(r2, Done(b"", B{a: 1, b: Some(1)}));
///
/// // the second "abcd" tag is optional
/// let r3 = z(b"abcdefgh");
/// assert_eq!(r3, Done(b"", B{a: 1, b: Some(1)}));
///
/// // the result of ret_y is optional, as seen in the B structure
/// let r4 = z(b"abcdabcd");
/// assert_eq!(r4, Done(b"", B{a: 1, b: None}));
/// ```
#[macro_export]
macro_rules! chain (
($i:expr, $($rest:tt)*) => (
chaining_parser!($i, $($rest)*)
);
);
#[macro_export]
macro_rules! chaining_parser (
($i:expr, $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, call!($e) ~ $($rest)*);
);
($i:expr, $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
match $submac!($i, $($args)*) {
IResult::Error(e) => IResult::Error(e),
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Done(i,_) => {
chaining_parser!(i, $($rest)*)
}
}
);
($i:expr, $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, call!($e) ? ~ $($rest)*);
);
($i:expr, $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => (
match $submac!($i, $($args)*) {
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Error(_) => {
chaining_parser!($i, $($rest)*)
},
IResult::Done(i,_) => {
chaining_parser!(i, $($rest)*)
}
}
);
($i:expr, $field:ident : $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $field: call!($e) ~ $($rest)*);
);
($i:expr, $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
match $submac!($i, $($args)*) {
IResult::Error(e) => IResult::Error(e),
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Done(i,o) => {
let $field = o;
chaining_parser!(i, $($rest)*)
}
}
);
($i:expr, $field:ident : $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $field : call!($e) ? ~ $($rest)*);
);
($i:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => (
match $submac!($i, $($args)*) {
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Error(_) => {
let $field = None;
chaining_parser!($i, $($rest)*)
},
IResult::Done(i,o) => {
let $field = Some(o);
chaining_parser!(i, $($rest)*)
}
}
);
// ending the chain
($i:expr, $e:ident, $assemble:expr) => (
chaining_parser!($i, call!($e), $assemble);
);
($i:expr, $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
IResult::Error(e) => IResult::Error(e),
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Done(i,_) => {
IResult::Done(i, $assemble())
}
}
);
($i:expr, $e:ident ?, $assemble:expr) => (
chaining_parser!($i, call!($e) ?, $assemble);
);
($i:expr, $submac:ident!( $($args:tt)* ) ?, $assemble:expr) => (
match $submac!($i, $($args)*) {
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Error(_) => {
IResult::Done($i, $assemble())
},
IResult::Done(i,_) => {
IResult::Done(i, $assemble())
}
}
);
($i:expr, $field:ident : $e:ident, $assemble:expr) => (
chaining_parser!($i, $field: call!($e), $assemble);
);
($i:expr, $field:ident : $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
IResult::Error(e) => IResult::Error(e),
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Done(i,o) => {
let $field = o;
IResult::Done(i, $assemble())
}
}
);
($i:expr, $field:ident : $e:ident ? , $assemble:expr) => (
chaining_parser!($i, $field : call!($e) ? , $assemble);
);
($i:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? , $assemble:expr) => (
match $submac!($i, $($args)*) {
IResult::Incomplete(i) => IResult::Incomplete(i),
IResult::Error(_) => {
let $field = None;
IResult::Done($i, $assemble())
},
IResult::Done(i,o) => {
let $field = Some(o);
IResult::Done(i, $assemble())
}
}
);
($i:expr, $assemble:expr) => (
IResult::Done($i, $assemble())
)
);
/// try a list of parser, return the result of the first successful one
///
/// Incomplete results are ignored
///
/// ```ignore
/// tag!(x "abcd");
/// tag!(y "efgh");
/// named!(test, alt!(x | y));
/// let r1 = test(b"abcdefgh"));
/// assert_eq!(r1, Done(b"efgh", b"abcd"));
/// let r2 = test(b"efghijkl"));
/// assert_eq!(r2, Done(b"ijkl", b"efgh"));
/// ```
#[macro_export]
macro_rules! alt (
($i:expr, $($rest:tt)*) => (
{
alt_parser!($i, $($rest)*)
}
);
);
#[macro_export]
macro_rules! alt_parser (
($i:expr, $e:ident | $($rest:tt)*) => (
alt_parser!($i, call!($e) | $($rest)*);
);
($i:expr, $submac:ident!( $($args:tt)*) | $($rest:tt)*) => (
{
match $submac!($i, $($args)*) {
IResult::Error(_) => alt_parser!($i, $($rest)*),
IResult::Incomplete(_) => alt_parser!($i, $($rest)*),
IResult::Done(i,o) => IResult::Done(i,o)
}
}
);
($i:expr, $e:ident => { $gen:expr } | $($rest:tt)*) => (
alt_parser!($i, call!($e) => { $gen } | $($rest)*);
);
($i:expr, $subrule:ident!( $args:tt ) => { $gen:expr } | $($rest:tt)+) => (
match $subrule!( $i, $args ) {
IResult::Error(_) => alt!( $i, $($rest)+ ),
IResult::Incomplete(_) => alt!( $i, $($rest)+ ),
IResult::Done(i,o) => IResult::Done(i, $gen( o ))
}
);
($i:expr, $e:ident => { $gen:expr }) => (
alt_parser!($i, call!($e) => { $gen });
);
($i:expr, $subrule:ident!( $args:tt ) => { $gen:expr }) => (
match $subrule!( $i, $args ) {
IResult::Incomplete(x) => IResult::Incomplete(x),
IResult::Error(e) => IResult::Error(e),
IResult::Done(i,o) => IResult::Done(i, $gen( o )),
}
);
($i:expr, $e:ident) => (
alt_parser!($i, call!($e));
);
($i:expr, $submac:ident!( $($args:tt)*)) => (
match $submac!($i, $($args)*) {
IResult::Error(_) => alt_parser!($i),
IResult::Incomplete(_) => alt_parser!($i),
IResult::Done(i,o) => IResult::Done(i,o)
}
);
($i:expr) => (
IResult::Error(1)
);
);
/// returns the longest list of bytes that do not appear in the provided array
///
/// ```ignore
/// is_not!(not_space b" \t\r\n");
/// let r = not_space(b"abcdefgh\nijkl"));
/// assert_eq!(r, Done(b"\nijkl", b"abcdefgh"));
/// ```
#[macro_export]
macro_rules! is_not(
($input:expr, $arr:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $arr;
let bytes = as_bytes(&expected);
for idx in 0..$input.len() {
for &i in bytes.iter() {
if $input[idx] == i {
return IResult::Done(&$input[idx..], &$input[0..idx])
}
}
}
IResult::Done(b"", $input)
}
);
);
/// returns the longest list of bytes that appear in the provided array
///
/// ```ignore
/// is_a!(abcd b"abcd");
/// let r1 = abcd(b"aaaaefgh"));
/// assert_eq!(r1, Done(b"efgh", b"aaaa"));
///
/// let r2 = abcd(b"dcbaefgh"));
/// assert_eq!(r2, Done(b"efgh", b"dcba"));
/// ```
#[macro_export]
macro_rules! is_a(
($input:expr, $arr:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $arr;
let bytes = as_bytes(&expected);
for idx in 0..$input.len() {
let mut res = false;
for &i in bytes.iter() {
if $input[idx] == i {
res = true;
break;
}
}
if !res {
return IResult::Done(&$input[idx..], &$input[0..idx])
}
}
IResult::Done(b"", $input)
}
);
);
/// returns the longest list of bytes until the provided parser fails
///
/// ```ignore
/// filter!(alpha is_alphanumeric);
/// let r = alpha(b"abcd\nefgh"));
/// assert_eq!(r, Done(b"\nefgh", b"abcd"));
/// ```
#[macro_export]
macro_rules! filter(
($input:expr, $arr:expr) => (
filter!($i, call!($f));
);
($input:expr, $submac:ident!( $($args:tt)* )) => (
{
for idx in 0..$input.len() {
if !$submac!($input[idx], $($args)*) {
return IResult::Done(&$input[idx..], &$input[0..idx])
}
}
IResult::Done(b"", $input)
}
);
);
/// make the underlying parser optional
///
/// returns an Option of the returned type
///
/// ```ignore
/// tag!(x "abcd");
/// opt!(o<&[u8],&[u8]> x);
///
/// let a = b"abcdef";
/// let b = b"bcdefg";
/// assert_eq!(o(a), Done(b"ef", Some(b"abcd")));
/// assert_eq!(o(b), Done(b"bcdefg", None));
/// ```
#[macro_export]
macro_rules! opt(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
IResult::Done(i,o) => IResult::Done(i, Some(o)),
IResult::Error(_) => IResult::Done($i, None),
IResult::Incomplete(i) => IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
opt($i, call($f));
);
);
/// returns a result without consuming the input
///
/// the embedded parser may return Incomplete
///
/// ```ignore
/// tag!(x "abcd");
/// peek!(ptag<&[u8], &[u8]> x);
/// let r = ptag(b"abcdefgh"));
/// assert_eq!(r, Done(b"abcdefgh", b"abcd"));
/// ```
#[macro_export]
macro_rules! peek(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
IResult::Done(i,o) => IResult::Done($i, o),
IResult::Error(a) => IResult::Error(a),
IResult::Incomplete(i) => IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
peek!($i, call!(f));
);
);
/// Applies the parser 0 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```ignore
/// tag!(x "abcd");
/// many0!(multi<&[u8],&[u8]> x);
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![b"abcd", b"abcd"];
/// assert_eq!(multi(a), Done(b"ef", res));
/// assert_eq!(multi(b), Done(b"azerty", Vec::new()));
/// ```
/// 0 or more
#[macro_export]
macro_rules! many0(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut begin = 0;
let mut remaining = $i.len();
let mut res = Vec::new();
loop {
match $submac!(&$i[begin..], $($args)*) {
IResult::Done(i,o) => {
res.push(o);
begin += remaining - i.len();
remaining = i.len();
},
_ => {
break;
}
}
}
IResult::Done(&$i[begin..], res)
}
);
($i:expr, $f:expr) => (
many0!($i, call!($f));
);
);
/// Applies the parser 0 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```ignore
/// tag!(x "abcd");
/// many1!(multi<&[u8],&[u8]> x);
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![b"abcd", b"abcd"];
/// assert_eq!(multi(a), Done(b"ef", res));
/// assert_eq!(multi(b), Error(0));
/// ```
#[macro_export]
macro_rules! many1(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut begin = 0;
let mut remaining = $i.len();
let mut res = Vec::new();
loop {
match $submac!(&$i[begin..], $($args)*) {
IResult::Done(i,o) => {
res.push(o);
begin += remaining - i.len();
remaining = i.len();
},
_ => {
break;
}
}
}
if begin == 0 {
IResult::Error(0)
} else {
IResult::Done(&$i[begin..], res)
}
}
);
($i:expr, $f:expr) => (
many1!($i, call!($f));
);
);
/// generates a parser consuming the specified number of bytes
///
/// ```ignore
/// take!(take5 5);
///
/// let a = b"abcdefgh";
///
/// assert_eq!(take5(a), Done(b"fgh", b"abcde"));
/// ```
#[macro_export]
macro_rules! take(
($i:expr, $count:expr) => (
{
if $i.len() < $count {
Incomplete(Needed::Size($count))
} else {
Done(&$i[$count..],&$i[0..$count])
}
}
);
);
/// generates a parser consuming bytes until the specified byte sequence is found
#[macro_export]
macro_rules! take_until(
($i:expr, $inp:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
for idx in 0..$i.len() {
if idx + bytes.len() > $i.len() {
return Incomplete(Needed::Size((idx + bytes.len()) as u32))
}
if &$i[idx..idx + bytes.len()] == bytes {
if idx + bytes.len() > $i.len() {
return Done(b"", &$i[0..idx])
} else {
return Done(&$i[(idx + bytes.len())..], &$i[0..idx])
}
}
}
return Error(0)
}
);
);
#[macro_export]
macro_rules! take_until_and_leave(
($i:expr, $inp:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
for idx in 0..$i.len() {
if idx + bytes.len() > $i.len() {
return Incomplete(Needed::Size((idx + bytes.len()) as u32))
}
if &$i[idx..idx+bytes.len()] == bytes {
return Done(&$i[idx..], &$i[0..idx])
}
}
return Error(0)
}
);
);
#[macro_export]
macro_rules! take_until_either(
($i:expr, $inp:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
for idx in 0..$i.len() {
if idx + 1 > $i.len() {
return Incomplete(Needed::Size(1 + idx as u32))
}
for &t in bytes.iter() {
if $i[idx] == t {
if idx + 1 > $i.len() {
return Done(b"", &$i[0..idx])
} else {
return Done(&$i[(idx+1)..], &$i[0..idx])
}
}
}
}
return Error(0)
}
);
);
#[macro_export]
macro_rules! take_until_either_and_leave(
($i:expr, $inp:expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::util::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
for idx in 0..$i.len() {
if idx + 1 > $i.len() {
return Incomplete(Needed::Size(1 + idx as u32))
}
for &t in bytes.iter() {
if $i[idx] == t {
return Done(&$i[idx..], &$i[0..idx])
}
}
}
return Error(0)
}
);
);
/// returns
#[macro_export]
macro_rules! length_value(
($name:ident<$i:ty,$o:ty> $f:ident $g:ident) => (
fn $name(input:$i) -> IResult<$i, Vec<$o>> {
match $f(input) {
Error(a) => Error(a),
Incomplete(i) => Incomplete(i),
Done(i1,nb) => {
let length_token = input.len() - i1.len();
let mut begin = 0;
let mut remaining = i1.len();
let mut res: Vec<$o> = Vec::new();
loop {
if res.len() == nb as usize {
return Done(&i1[begin..], res);
}
match $g(&i1[begin..]) {
Done(i2,o2) => {
res.push(o2);
let parsed = remaining - i2.len();
begin += parsed;
remaining = i2.len();
if begin >= i1.len() {
return Incomplete(Needed::Size((length_token + nb as usize * parsed) as u32));
}
},
Error(a) => return Error(a),
Incomplete(Needed::Unknown) => {
return Incomplete(Needed::Unknown)
},
Incomplete(Needed::Size(a)) => {
return Incomplete(Needed::Size(length_token as u32 + a * nb as u32))
}
}
}
}
}
}
);
($name:ident<$i:ty,$o:ty> $f:ident $g:ident $length:expr) => (
fn $name(input:$i) -> IResult<$i, Vec<$o>> {
match $f(input) {
Error(a) => Error(a),
Incomplete(i) => Incomplete(i),
Done(i1,nb) => {
let length_token = input.len() - i1.len();
let mut begin = 0;
let mut remaining = i1.len();
let mut res: Vec<$o> = Vec::new();
loop {
if res.len() == nb as usize {
return Done(&i1[begin..], res);
}
match $g(&i1[begin..]) {
Done(i2,o2) => {
res.push(o2);
let parsed = remaining - i2.len();
begin += parsed;
remaining = i2.len();
if begin >= i1.len() {
return Incomplete(Needed::Size((length_token + nb as usize * $length) as u32));
}
},
Error(a) => return Error(a),
Incomplete(Needed::Unknown) => {
return Incomplete(Needed::Unknown)
},
Incomplete(Needed::Size(_)) => {
return Incomplete(Needed::Size(length_token as u32 + $length * nb as u32))
}
}
}
}
}
}
);
);
#[cfg(test)]
mod tests {
use super::*;
use map::*;
use internal::Needed;
use internal::IResult;
use internal::IResult::*;
#[test]
fn is_a() {
named!(a_or_b, is_a!(&b"ab"[..]));
let a = &b"abcd"[..];
assert_eq!(a_or_b(a), Done(&b"cd"[..], &b"ab"[..]));
let b = &b"bcde"[..];
assert_eq!(a_or_b(b), Done(&b"cde"[..], &b"b"[..]));
let c = &b"cdef"[..];
assert_eq!(a_or_b(c), Done(&b"cdef"[..], &b""[..]));
let d = &b"bacdef"[..];
assert_eq!(a_or_b(d), Done(&b"cdef"[..], &b"ba"[..]));
}
#[derive(PartialEq,Eq,Debug)]
struct B {
a: u8,
b: u8
}
#[test]
fn chain2() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
tag!("abcd") ~
tag!("abcd")? ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[test]
fn nested_chain() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
chain!(
tag!("abcd") ~
tag!("abcd")? ,
|| {}
) ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[derive(PartialEq,Eq,Debug)]
struct C {
a: u8,
b: Option<u8>
}
#[test]
fn chain_opt() {
named!(y, tag!("efgh"));
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_y(i:&[u8]) -> IResult<&[u8], u8> {
y(i).map(|_| 2)
};
named!(f<&[u8],C>,
chain!(
tag!("abcd") ~
aa: ret_int1 ~
bb: ret_y? ,
||{C{a: aa, b: bb}}
)
);
let r = f(&b"abcdefghX"[..]);
assert_eq!(r, Done(&b"X"[..], C{a: 1, b: Some(2)}));
let r2 = f(&b"abcdWXYZ"[..]);
assert_eq!(r2, Done(&b"WXYZ"[..], C{a: 1, b: None}));
let r3 = f(&b"abcdX"[..]);
assert_eq!(r3, Incomplete(Needed::Size(4)));
}
#[test]
fn alt() {
fn work(input: &[u8]) -> IResult<&[u8],&[u8]> {
Done(&b""[..], input)
}
#[allow(unused_variables)]
fn dont_work(input: &[u8]) -> IResult<&[u8],&[u8]> {
Error(3)
}
fn work2(input: &[u8]) -> IResult<&[u8],&[u8]> {
Done(input, &b""[..])
}
named!(alt1, alt!(dont_work | dont_work));
named!(alt2, alt!(dont_work | work));
named!(alt3, alt!(dont_work | dont_work | work2 | dont_work));
let a = &b"abcd"[..];
assert_eq!(alt1(a), Error(1));
assert_eq!(alt2(a), Done(&b""[..], a));
assert_eq!(alt3(a), Done(a, &b""[..]));
named!(alt4, alt!(tag!("abcd") | tag!("efgh")));
let b = &b"efgh"[..];
assert_eq!(alt4(a), Done(&b""[..], a));
assert_eq!(alt4(b), Done(&b""[..], b));
}
#[test]
fn opt() {
named!(o<&[u8],Option<&[u8]> >, opt!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"bcdefg"[..];
assert_eq!(o(a), Done(&b"ef"[..], Some(&b"abcd"[..])));
assert_eq!(o(b), Done(&b"bcdefg"[..], None));
}
#[test]
fn peek() {
named!(ptag<&[u8],&[u8]>, peek!(tag!("abcd")));
let r1 = ptag(&b"abcdefgh"[..]);
assert_eq!(r1, Done(&b"abcdefgh"[..], &b"abcd"[..]));
let r1 = ptag(&b"efgh"[..]);
assert_eq!(r1, Error(0));
}
#[test]
fn many0() {
named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Done(&b"azerty"[..], Vec::new()));
}
#[test]
fn many1() {
named!(multi<&[u8],Vec<&[u8]> >, many1!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Error(0));
}
#[test]
fn take_until_test() {
named!(x, take_until!("efgh"));
let r = x(&b"abcdabcdefghijkl"[..]);
assert_eq!(r, Done(&b"ijkl"[..], &b"abcdabcd"[..]));
println!("Done 1\n");
let r2 = x(&b"abcdabcdefgh"[..]);
assert_eq!(r2, Done(&b""[..], &b"abcdabcd"[..]));
println!("Done 2\n");
let r3 = x(&b"abcefg"[..]);
assert_eq!(r3, Incomplete(Needed::Size(7)));
}
use nom::{be_u8,be_u16};
#[test]
fn length_value_test() {
length_value!(tst1<&[u8], u16 > be_u8 be_u16);
length_value!(tst2<&[u8], u16 > be_u8 be_u16 2);
let i1 = vec![0, 5, 6];
let i2 = vec![1, 5, 6, 3];
let i3 = vec![2, 5, 6, 3];
let i4 = vec![2, 5, 6, 3, 4, 5, 7];
let i5 = vec![3, 5, 6, 3, 4, 5];
let r1: Vec<u16> = Vec::new();
let r2: Vec<u16> = vec![1286];
let r4: Vec<u16> = vec![1286, 772];
assert_eq!(tst1(&i1), IResult::Done(&i1[1..], r1));
assert_eq!(tst1(&i2), IResult::Done(&i2[3..], r2));
assert_eq!(tst1(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst1(&i4), IResult::Done(&i4[5..], r4));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
let r6: Vec<u16> = Vec::new();
let r7: Vec<u16> = vec![1286];
let r9: Vec<u16> = vec![1286, 772];
assert_eq!(tst2(&i1), IResult::Done(&i1[1..], r6));
assert_eq!(tst2(&i2), IResult::Done(&i2[3..], r7));
assert_eq!(tst2(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst2(&i4), IResult::Done(&i4[5..], r9));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
}
}
|
// Copyright 2014 The Algebra Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[macro_export]
macro_rules! impl_marker(
// Finds the generic parameters of the type and implements the trait for it
(@para_rec
[$M:ty, ($($G:tt)+), ($($F:tt)*)]
(< $($R:tt)*)
) => {
impl< $($R)* $M for $($F)*< $($R)*
where $($G)+
{}
};
// Munches some token trees for searching generic parameters of the type
(@para_rec
[$M:ty, ($($G:tt)+), ($($F:tt)*)]
($C:tt $($R:tt)*)
) => {
impl_marker!(@para_rec
[$M, ($($G)+), ($($F)* $C)]
($($R)*)
);
};
// Handles the trailing separator after where clause
(@where_rec
[$M:ty, ($($P:tt)+), ($($G:tt)+)]
($(;)*)
) => {
impl_marker!(@para_rec
[$M, ($($G)+), ()]
($($P)+)
);
};
// Implements the trait for the generic type and continues searching other types
(@where_rec
[$M:ty, ($($P:tt)+), ($($G:tt)+)]
(; $($R:tt)+)
) => {
impl_marker!(@para_rec
[$M, ($($G)+), ()]
($($P)+)
);
impl_marker!(@rec
[$M, ()]
($($R)+)
);
};
// Munches some token trees for searching the end of the where clause
(@where_rec
[$M:ty, ($($P:tt)+), ($($F:tt)*)]
($C:tt $($R:tt)*)
) => {
impl_marker!(@where_rec
[$M, ($($P)+), ($($F)* $C)]
($($R)*)
);
};
// Handles the trailing separator for non-generic type and implements the trait
(@rec
[$M:ty, ($($F:tt)*)]
($(;)*)
) => {
impl $M for $($F)* { }
};
// Implements the trait for the non-generic type and continues searching other types
(@rec
[$M:ty, ($($F:tt)*)]
(; $($R:tt)+)
) => {
impl $M for $($F)* { }
impl_marker!(@rec
[$M, ()]
($($R)+)
);
};
// Detects that there is indeed a where clause for the type and tries to find where it ends.
(@rec
[$M:ty, ($($F:tt)+)]
(where $($G:tt)+)
) => {
impl_marker!(@where_rec
[$M, ($($F)+), ()]
($($G)+)
);
};
// Munches some token trees for detecting if we have where clause or not
(@rec
[$M:ty, ($($F:tt)*)]
($C:tt $($R:tt)*)
) => {
impl_marker!(@rec
[$M, ($($F)* $C)]
($($R)*)
);
};
// Entry point to the macro
($M:ty; $($R:tt)+) => {
impl_marker!(@rec
[$M, ()]
($($R)+)
);
};
);
macro_rules! impl_ident {
($M:ty; $V:expr; $($T:ty),* $(,)*) => {
$(impl Identity<$M> for $T { #[inline] fn identity() -> $T {$V} })+
}
}
macro_rules! impl_approx_eq {
($V:expr; $($T:ty),* $(,)*) => {
$(impl ApproxEq for $T {
type Eps = $T;
#[inline]
fn default_epsilon() -> Self::Eps { $V }
#[inline]
fn approx_eq_eps(&self, b: &$T, epsilon: &$T) -> bool {
if self < b {
*b - *self <= *epsilon
} else {
*self - *b <= *epsilon
}
}
})+
}
}
Improved macro variable names.
// Copyright 2014 The Algebra Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[macro_export]
macro_rules! impl_marker(
// Finds the generic parameters of the type and implements the trait for it
(@para_rec
[$tra1t:ty, ($($clause:tt)+), ($($type_constr:tt)*)]
(< $($params:tt)*)
) => {
impl< $($params)* $tra1t for $($type_constr)*< $($params)*
where $($clause)+
{}
};
// Munches some token trees for searching generic parameters of the type
(@para_rec
[$tra1t:ty, ($($clause:tt)+), ($($prev:tt)*)]
($cur:tt $($rest:tt)*)
) => {
impl_marker!(@para_rec
[$tra1t, ($($clause)+), ($($prev)* $cur)]
($($rest)*)
);
};
// Handles the trailing separator after where clause
(@where_rec
[$tra1t:ty, ($($typ3:tt)+), ($($clause:tt)+)]
($(;)*)
) => {
impl_marker!(@para_rec
[$tra1t, ($($clause)+), ()]
($($typ3)+)
);
};
// Implements the trait for the generic type and continues searching other types
(@where_rec
[$tra1t:ty, ($($typ3:tt)+), ($($clause:tt)+)]
(; $($rest:tt)+)
) => {
impl_marker!(@para_rec
[$tra1t, ($($clause)+), ()]
($($typ3)+)
);
impl_marker!(@rec
[$tra1t, ()]
($($rest)+)
);
};
// Munches some token trees for searching the end of the where clause
(@where_rec
[$tra1t:ty, ($($typ3:tt)+), ($($prev:tt)*)]
($cur:tt $($rest:tt)*)
) => {
impl_marker!(@where_rec
[$tra1t, ($($typ3)+), ($($prev)* $cur)]
($($rest)*)
);
};
// Handles the trailing separator for non-generic type and implements the trait
(@rec
[$tra1t:ty, ($($typ3:tt)*)]
($(;)*)
) => {
impl $tra1t for $($typ3)* { }
};
// Implements the trait for the non-generic type and continues searching other types
(@rec
[$tra1t:ty, ($($typ3:tt)*)]
(; $($rest:tt)+)
) => {
impl $tra1t for $($typ3)* { }
impl_marker!(@rec
[$tra1t, ()]
($($rest)+)
);
};
// Detects that there is indeed a where clause for the type and tries to find where it ends.
(@rec
[$tra1t:ty, ($($prev:tt)+)]
(where $($rest:tt)+)
) => {
impl_marker!(@where_rec
[$tra1t, ($($prev)+), ()]
($($rest)+)
);
};
// Munches some token trees for detecting if we have where clause or not
(@rec
[$tra1t:ty, ($($prev:tt)*)]
($cur:tt $($rest:tt)*)
) => {
impl_marker!(@rec
[$tra1t, ($($prev)* $cur)]
($($rest)*)
);
};
// Entry point to the macro
($tra1t:ty; $($rest:tt)+) => {
impl_marker!(@rec
[$tra1t, ()]
($($rest)+)
);
};
);
macro_rules! impl_ident {
($M:ty; $V:expr; $($T:ty),* $(,)*) => {
$(impl Identity<$M> for $T { #[inline] fn identity() -> $T {$V} })+
}
}
macro_rules! impl_approx_eq {
($V:expr; $($T:ty),* $(,)*) => {
$(impl ApproxEq for $T {
type Eps = $T;
#[inline]
fn default_epsilon() -> Self::Eps { $V }
#[inline]
fn approx_eq_eps(&self, b: &$T, epsilon: &$T) -> bool {
if self < b {
*b - *self <= *epsilon
} else {
*self - *b <= *epsilon
}
}
})+
}
}
|
//! Warning: extreme macros
/// This macro creates structs that implement `ToTable` and `FromTable`.
///
/// # Examples:
///
/// ```rust
/// lua_convertible! {
/// #[derive(Debug)]
/// // #[attribtue]
/// struct Point {
/// x: i32,
/// y: i32
/// }
/// }
/// ```
#[macro_export]
macro_rules! lua_convertible {
( $(#[$attr:meta])*
struct $name:ident { $($fname:ident : $ftype:ty),+ } ) => {
$(#[$attr])*
pub struct $name {
$($fname: $ftype),+
}
impl $crate::convert::ToTable for $name {
fn to_table(self) -> hlua::any::AnyLuaValue {
hlua::any::AnyLuaValue::LuaArray(vec![
$( (hlua::any::AnyLuaValue::LuaString(stringify!($fname).to_string()),
self.$fname.to_table()) ),+
])
}
}
impl $crate::convert::FromTable for $name {
#[allow(unused_variables)]
fn from_table(decoder: $crate::convert::LuaDecoder) ->
$crate::convert::ConvertResult<$name> {
$( let (decoder, $fname) =
try!(decoder.read_field(stringify!($fname).to_string())); )+
Ok($name {
$( $fname: $fname ),+
})
}
}
}
}
/// Creates a struct and implements `ToJson` and `Decodeable` from
/// rustc_serialize.
#[macro_export]
macro_rules! json_convertible {
( $(#[$attr:meta])*
struct $name:ident { $($fname:ident : $ftype:ty),+ } ) => {
$(#[$attr])*
pub struct $name {
$($fname: $ftype),+
}
impl ::rustc_serialize::json::ToJson for $name {
fn to_json(&self) -> ::rustc_serialize::json::Json {
let mut tree = ::std::collections::BTreeMap::new();
$( tree.insert(stringify!($fname).to_string(),
self.$fname.to_json()); )+
::rustc_serialize::json::Json::Object(tree)
}
}
impl ::rustc_serialize::Decodable for $name {
fn decode<D: ::rustc_serialize::Decoder>(d: &mut D) -> Result<$name, D::Error> {
$( let $fname = try!(d.read_struct_field(
stringify!($fname), 0usize,
|f| ::rustc_serialize::Decodable::decode(f))); )+
Ok($name {
$( $fname: $fname ),+
})
}
}
}
}
/// Create a keypress using fewer keystrokes. Provides a custom panic method.
#[macro_export]
macro_rules! keypress {
($modifier:expr, $key:expr) => {
$crate::keys::KeyPress::from_key_names(&[$modifier],
$key)
.expect(concat!("Unable to create keypress from macro with ",
$modifier, " and ", $key))
};
}
/// Return from a test method if DUMMY_RUSTWLC is defined.
#[cfg(test)]
macro_rules! require_rustwlc {
() => {
if cfg!(test) {
return;
}
}
}
/// Create a dbus interface object.
///
/// Given the path and name of a dbus interface
/// and a series of methods with type ipc::DBusResult, this macro
/// generates a big function `setup(&mut DBusFactory) -> DBusObjPath`
/// which will call the `Factory`'s `add_*` methods properly.
///
/// Currently limited to one dbus interface per invocation and requires setting
/// the name of the outputs.
macro_rules! dbus_interface {
( path: $obj_path:expr; name: $obj_name:expr;
$(fn $fn_name:ident($($in_name:ident : $in_ty:ty),*)
-> $out_name:ident : DBusResult< $out_ty_inner:ty > { $($inner:tt)* })+ ) => {
#[warn(dead_code)]
pub fn setup(factory: &mut $crate::ipc::DBusFactory) -> $crate::ipc::DBusObjPath {
return factory.object_path($obj_path, ()).introspectable()
.add(factory.interface($obj_name, ())
$(
.add_m(factory.method(stringify!($fn_name), (),
move |msg| {
let mut args_iter = msg.msg.iter_init();
$(
let $in_name: $in_ty = args_iter.read::<$in_ty>()
.expect("oopslol");
)*
let result = $fn_name($($in_name),*);
match result {
Ok(value) => {
let dbus_return = msg.msg.method_return().append(value);
return Ok(vec![dbus_return])
},
Err(err) => {
return Err(err)
}
}
}).outarg::<$out_ty_inner, _>(stringify!($out_name))
)
)*
);
}
$(
#[allow(non_snake_case)]
#[warn(dead_code)]
fn $fn_name( $($in_name: $in_ty),* )
-> $crate::ipc::DBusResult<$out_ty_inner> {
$($inner)*
}
)*
};
}
#[cfg(test)]
mod tests {
use super::super::convert::{ToTable, FromTable, LuaDecoder};
use hlua;
use rustc_serialize::Decodable;
use rustc_serialize::json::{Decoder, ToJson};
lua_convertible! {
#[derive(Debug, Clone, PartialEq)]
struct Point {
x: f32,
y: f32
}
}
json_convertible! {
#[derive(Debug, Clone, PartialEq)]
struct Rectangle {
height: u32,
width: u32
}
}
#[test]
fn require_rustwlc() {
require_rustwlc!();
// If we're here we can use rustwlc.
// If we tried to get a view or something it'd fail though.
let _ = keypress!("Ctrl", "p");
}
#[test]
fn lua_convertible() {
let point = Point { x: 0f32, y: 0f32 };
let lua_point = point.clone().to_table();
let maybe_point = Point::from_table(LuaDecoder::new(lua_point));
let parsed_point = maybe_point.expect("Unable to parse point!");
assert_eq!(parsed_point, point);
}
#[test]
fn json_convertible() {
let rect = Rectangle { height: 1u32, width: 2u32 };
let json_rect = rect.to_json();
let maybe_rect = Rectangle::decode(&mut Decoder::new(json_rect));
let parsed_rect = maybe_rect.expect("Unable to parse rectangle!");
assert_eq!(parsed_rect, rect);
}
#[test]
fn keypress() {
require_rustwlc!();
use super::super::keys::KeyPress;
use std::hash::{SipHasher, Hash};
let press = KeyPress::from_key_names(&["Ctrl"], "p")
.expect("Unable to construct regular keypress");
let press_macro = keypress!("Ctrl", "p");
let mut hasher = SipHasher::new();
assert!(press.hash(&mut hasher) == press_macro.hash(&mut hasher),
"Hashes do not match");
assert_eq!(press, press_macro);
}
}
Fixed macro to make command so they take in input
//! Warning: extreme macros
/// This macro creates structs that implement `ToTable` and `FromTable`.
///
/// # Examples:
///
/// ```rust
/// lua_convertible! {
/// #[derive(Debug)]
/// // #[attribtue]
/// struct Point {
/// x: i32,
/// y: i32
/// }
/// }
/// ```
#[macro_export]
macro_rules! lua_convertible {
( $(#[$attr:meta])*
struct $name:ident { $($fname:ident : $ftype:ty),+ } ) => {
$(#[$attr])*
pub struct $name {
$($fname: $ftype),+
}
impl $crate::convert::ToTable for $name {
fn to_table(self) -> hlua::any::AnyLuaValue {
hlua::any::AnyLuaValue::LuaArray(vec![
$( (hlua::any::AnyLuaValue::LuaString(stringify!($fname).to_string()),
self.$fname.to_table()) ),+
])
}
}
impl $crate::convert::FromTable for $name {
#[allow(unused_variables)]
fn from_table(decoder: $crate::convert::LuaDecoder) ->
$crate::convert::ConvertResult<$name> {
$( let (decoder, $fname) =
try!(decoder.read_field(stringify!($fname).to_string())); )+
Ok($name {
$( $fname: $fname ),+
})
}
}
}
}
/// Creates a struct and implements `ToJson` and `Decodeable` from
/// rustc_serialize.
#[macro_export]
macro_rules! json_convertible {
( $(#[$attr:meta])*
struct $name:ident { $($fname:ident : $ftype:ty),+ } ) => {
$(#[$attr])*
pub struct $name {
$($fname: $ftype),+
}
impl ::rustc_serialize::json::ToJson for $name {
fn to_json(&self) -> ::rustc_serialize::json::Json {
let mut tree = ::std::collections::BTreeMap::new();
$( tree.insert(stringify!($fname).to_string(),
self.$fname.to_json()); )+
::rustc_serialize::json::Json::Object(tree)
}
}
impl ::rustc_serialize::Decodable for $name {
fn decode<D: ::rustc_serialize::Decoder>(d: &mut D) -> Result<$name, D::Error> {
$( let $fname = try!(d.read_struct_field(
stringify!($fname), 0usize,
|f| ::rustc_serialize::Decodable::decode(f))); )+
Ok($name {
$( $fname: $fname ),+
})
}
}
}
}
/// Create a keypress using fewer keystrokes. Provides a custom panic method.
#[macro_export]
macro_rules! keypress {
($modifier:expr, $key:expr) => {
$crate::keys::KeyPress::from_key_names(&[$modifier],
$key)
.expect(concat!("Unable to create keypress from macro with ",
$modifier, " and ", $key))
};
}
/// Return from a test method if DUMMY_RUSTWLC is defined.
#[cfg(test)]
macro_rules! require_rustwlc {
() => {
if cfg!(test) {
return;
}
}
}
/// Create a dbus interface object.
///
/// Given the path and name of a dbus interface
/// and a series of methods with type ipc::DBusResult, this macro
/// generates a big function `setup(&mut DBusFactory) -> DBusObjPath`
/// which will call the `Factory`'s `add_*` methods properly.
///
/// Currently limited to one dbus interface per invocation and requires setting
/// the name of the outputs.
macro_rules! dbus_interface {
( path: $obj_path:expr; name: $obj_name:expr;
$(fn $fn_name:ident($($in_name:ident : $in_ty:ty),*)
-> $out_name:ident : DBusResult< $out_ty_inner:ty > { $($inner:tt)* })+ ) => {
#[warn(dead_code)]
#[allow(unused_mut)]
pub fn setup(factory: &mut $crate::ipc::DBusFactory) -> $crate::ipc::DBusObjPath {
return factory.object_path($obj_path, ()).introspectable()
.add(factory.interface($obj_name, ())
$(
.add_m(factory.method(stringify!($fn_name), (),
move |msg| {
let mut args_iter = msg.msg.iter_init();
$(
let $in_name: $in_ty = try!(args_iter.read::<$in_ty>());
)*
let result = $fn_name($($in_name),*);
match result {
Ok(value) => {
let dbus_return = msg.msg.method_return().append(value);
return Ok(vec![dbus_return])
},
Err(err) => {
return Err(err)
}
}
})
.outarg::<$out_ty_inner, _>(stringify!($out_name))
$(
.inarg::<$in_ty, _>(stringify!($in_name))
)*
)
)*
);
}
$(
#[allow(non_snake_case)]
#[warn(dead_code)]
fn $fn_name( $($in_name: $in_ty),* )
-> $crate::ipc::DBusResult<$out_ty_inner> {
$($inner)*
}
)*
};
}
#[cfg(test)]
mod tests {
use super::super::convert::{ToTable, FromTable, LuaDecoder};
use hlua;
use rustc_serialize::Decodable;
use rustc_serialize::json::{Decoder, ToJson};
lua_convertible! {
#[derive(Debug, Clone, PartialEq)]
struct Point {
x: f32,
y: f32
}
}
json_convertible! {
#[derive(Debug, Clone, PartialEq)]
struct Rectangle {
height: u32,
width: u32
}
}
#[test]
fn require_rustwlc() {
require_rustwlc!();
// If we're here we can use rustwlc.
// If we tried to get a view or something it'd fail though.
let _ = keypress!("Ctrl", "p");
}
#[test]
fn lua_convertible() {
let point = Point { x: 0f32, y: 0f32 };
let lua_point = point.clone().to_table();
let maybe_point = Point::from_table(LuaDecoder::new(lua_point));
let parsed_point = maybe_point.expect("Unable to parse point!");
assert_eq!(parsed_point, point);
}
#[test]
fn json_convertible() {
let rect = Rectangle { height: 1u32, width: 2u32 };
let json_rect = rect.to_json();
let maybe_rect = Rectangle::decode(&mut Decoder::new(json_rect));
let parsed_rect = maybe_rect.expect("Unable to parse rectangle!");
assert_eq!(parsed_rect, rect);
}
#[test]
fn keypress() {
require_rustwlc!();
use super::super::keys::KeyPress;
use std::hash::{SipHasher, Hash};
let press = KeyPress::from_key_names(&["Ctrl"], "p")
.expect("Unable to construct regular keypress");
let press_macro = keypress!("Ctrl", "p");
let mut hasher = SipHasher::new();
assert!(press.hash(&mut hasher) == press_macro.hash(&mut hasher),
"Hashes do not match");
assert_eq!(press, press_macro);
}
}
|
macro_rules! native_ref(
(&$name:ident = $alias:ty) => (
impl Eq for $name {}
impl PartialEq<$name> for $name {
fn eq(&self, other: &$name) -> bool {
use std::mem;
unsafe { mem::transmute::<_, isize>(self) == mem::transmute(other) }
}
}
impl<'a> PartialEq<$name> for &'a $name {
fn eq(&self, other: &$name) -> bool {
use std::mem;
unsafe { mem::transmute::<_, isize>(self) == mem::transmute(other) }
}
}
impl<'a> From<&'a $name> for $alias {
fn from(ty: &'a $name) -> $alias {
use std::mem;
unsafe { mem::transmute(ty) }
}
}
impl<'a> From<&'a mut $name> for $alias {
fn from(ty: &'a mut $name) -> $alias {
use std::mem;
unsafe { mem::transmute(ty) }
}
}
impl<'a> From<$alias> for &'a $name {
fn from(ty: $alias) -> &'a $name {
use std::mem;
unsafe { mem::transmute(ty) }
}
}
impl<'a> From<$alias> for &'a mut $name {
fn from(ty: $alias) -> &'a mut $name {
use std::mem;
unsafe { mem::transmute(ty) }
}
}
);
($name:ident, $field:ident: $pointer_ty:ty) => (
impl<'a> From<&'a mut $name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a mut $name) -> $pointer_ty {
thing.$field
}
}
impl<'a> From<&'a $name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a $name) -> $pointer_ty {
thing.$field
}
}
impl From<$name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: $name) -> $pointer_ty {
thing.$field
}
}
impl From<$pointer_ty> for $name {
/// Convert from a native pointer
fn from(ptr: $pointer_ty) -> $name {
$name {
$field: ptr
}
}
}
);
($name:ident, $field:ident: $pointer_ty:ty, $($ofield:ident = $expr:expr),*) => (
impl<'a> From<&'a mut $name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a mut $name) -> $pointer_ty {
thing.$field
}
}
impl<'a> From<&'a $name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a $name) -> $pointer_ty {
thing.$field
}
}
impl From<$name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: $name) -> $pointer_ty {
thing.$field
}
}
impl From<$pointer_ty> for $name {
/// Convert from a native pointer
fn from(ptr: $pointer_ty) -> $name {
$name {
$field: ptr,
$($ofield: $expr),*
}
}
}
);
($name:ident<$ty:ident>, $field:ident: $pointer_ty:ty, $($ofield:ident = $expr:expr),*) => (
impl<'a, $ty> From<&'a mut $name<$ty>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a mut $name<$ty>) -> $pointer_ty {
thing.$field
}
}
impl<'a, $ty> From<&'a $name<$ty>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a $name<$ty>) -> $pointer_ty {
thing.$field
}
}
impl<$ty> From<$name<$ty>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: $name<$ty>) -> $pointer_ty {
thing.$field
}
}
impl<$ty> From<$pointer_ty> for $name<$ty> {
/// Convert from a native pointer
fn from(ptr: $pointer_ty) -> $name<$ty> {
$name {
$field: ptr,
$($ofield: $expr),*
}
}
}
);
(contra $name:ident, $field:ident: $pointer_ty:ty) => (
impl<'a, 'b> From<&'a mut $name<'b>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a mut $name<'b>) -> $pointer_ty {
thing.$field
}
}
impl<'a, 'b> From<&'a $name<'b>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a $name<'b>) -> $pointer_ty {
thing.$field
}
}
impl<'a> From<$name<'a>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: $name<'a>) -> $pointer_ty {
thing.$field
}
}
impl<'a> From<$pointer_ty> for $name<'a> {
/// Convert from a native pointer
fn from(ptr: $pointer_ty) -> $name<'a> {
$name {
$field: ptr,
marker: PhantomData
}
}
}
)
);
macro_rules! to_str(
($ty:ty, $func:ident) => (
impl fmt::Debug for $ty {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(unsafe {
let c_str = core::$func(self.into());
util::to_str(c_str)
})
}
}
);
);
macro_rules! get_context(
($ty:ty, $func:ident) => (
impl GetContext for $ty {
fn get_context(&self) -> &Context {
unsafe { core::$func(self.into()) }.into()
}
}
);
);
macro_rules! deref(
($ty:ty, $to:ty) => (
impl Deref for $ty {
type Target = $to;
fn deref(&self) -> &$to {
unsafe { mem::transmute(self) }
}
}
);
);
macro_rules! dispose(
($ty:ty, $ref_ty:ty, $func:expr) => (
impl ::cbox::DisposeRef for $ty {
type RefTo = $ref_ty;
#[inline(always)]
unsafe fn dispose(ptr: *mut $ref_ty) {
$func(ptr)
}
}
);
);
Fix permissions on macros.rs
macro_rules! native_ref(
(&$name:ident = $alias:ty) => (
impl Eq for $name {}
impl PartialEq<$name> for $name {
fn eq(&self, other: &$name) -> bool {
use std::mem;
unsafe { mem::transmute::<_, isize>(self) == mem::transmute(other) }
}
}
impl<'a> PartialEq<$name> for &'a $name {
fn eq(&self, other: &$name) -> bool {
use std::mem;
unsafe { mem::transmute::<_, isize>(self) == mem::transmute(other) }
}
}
impl<'a> From<&'a $name> for $alias {
fn from(ty: &'a $name) -> $alias {
use std::mem;
unsafe { mem::transmute(ty) }
}
}
impl<'a> From<&'a mut $name> for $alias {
fn from(ty: &'a mut $name) -> $alias {
use std::mem;
unsafe { mem::transmute(ty) }
}
}
impl<'a> From<$alias> for &'a $name {
fn from(ty: $alias) -> &'a $name {
use std::mem;
unsafe { mem::transmute(ty) }
}
}
impl<'a> From<$alias> for &'a mut $name {
fn from(ty: $alias) -> &'a mut $name {
use std::mem;
unsafe { mem::transmute(ty) }
}
}
);
($name:ident, $field:ident: $pointer_ty:ty) => (
impl<'a> From<&'a mut $name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a mut $name) -> $pointer_ty {
thing.$field
}
}
impl<'a> From<&'a $name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a $name) -> $pointer_ty {
thing.$field
}
}
impl From<$name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: $name) -> $pointer_ty {
thing.$field
}
}
impl From<$pointer_ty> for $name {
/// Convert from a native pointer
fn from(ptr: $pointer_ty) -> $name {
$name {
$field: ptr
}
}
}
);
($name:ident, $field:ident: $pointer_ty:ty, $($ofield:ident = $expr:expr),*) => (
impl<'a> From<&'a mut $name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a mut $name) -> $pointer_ty {
thing.$field
}
}
impl<'a> From<&'a $name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a $name) -> $pointer_ty {
thing.$field
}
}
impl From<$name> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: $name) -> $pointer_ty {
thing.$field
}
}
impl From<$pointer_ty> for $name {
/// Convert from a native pointer
fn from(ptr: $pointer_ty) -> $name {
$name {
$field: ptr,
$($ofield: $expr),*
}
}
}
);
($name:ident<$ty:ident>, $field:ident: $pointer_ty:ty, $($ofield:ident = $expr:expr),*) => (
impl<'a, $ty> From<&'a mut $name<$ty>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a mut $name<$ty>) -> $pointer_ty {
thing.$field
}
}
impl<'a, $ty> From<&'a $name<$ty>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a $name<$ty>) -> $pointer_ty {
thing.$field
}
}
impl<$ty> From<$name<$ty>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: $name<$ty>) -> $pointer_ty {
thing.$field
}
}
impl<$ty> From<$pointer_ty> for $name<$ty> {
/// Convert from a native pointer
fn from(ptr: $pointer_ty) -> $name<$ty> {
$name {
$field: ptr,
$($ofield: $expr),*
}
}
}
);
(contra $name:ident, $field:ident: $pointer_ty:ty) => (
impl<'a, 'b> From<&'a mut $name<'b>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a mut $name<'b>) -> $pointer_ty {
thing.$field
}
}
impl<'a, 'b> From<&'a $name<'b>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: &'a $name<'b>) -> $pointer_ty {
thing.$field
}
}
impl<'a> From<$name<'a>> for $pointer_ty {
/// Convert into a native pointer
fn from(thing: $name<'a>) -> $pointer_ty {
thing.$field
}
}
impl<'a> From<$pointer_ty> for $name<'a> {
/// Convert from a native pointer
fn from(ptr: $pointer_ty) -> $name<'a> {
$name {
$field: ptr,
marker: PhantomData
}
}
}
)
);
macro_rules! to_str(
($ty:ty, $func:ident) => (
impl fmt::Debug for $ty {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(unsafe {
let c_str = core::$func(self.into());
util::to_str(c_str)
})
}
}
);
);
macro_rules! get_context(
($ty:ty, $func:ident) => (
impl GetContext for $ty {
fn get_context(&self) -> &Context {
unsafe { core::$func(self.into()) }.into()
}
}
);
);
macro_rules! deref(
($ty:ty, $to:ty) => (
impl Deref for $ty {
type Target = $to;
fn deref(&self) -> &$to {
unsafe { mem::transmute(self) }
}
}
);
);
macro_rules! dispose(
($ty:ty, $ref_ty:ty, $func:expr) => (
impl ::cbox::DisposeRef for $ty {
type RefTo = $ref_ty;
#[inline(always)]
unsafe fn dispose(ptr: *mut $ref_ty) {
$func(ptr)
}
}
);
);
|
//! Macro combinators
//!
//! Macros are used to make combination easier,
//! since they often do not depend on the type
//! of the data they manipulate or return.
//!
//! There is a trick to make them easier to assemble,
//! combinators are defined like this:
//!
//! ```ignore
//! macro_rules! tag (
//! ($i:expr, $inp: expr) => (
//! {
//! ...
//! }
//! );
//! );
//! ```
//!
//! But when used in other combinators, are Used
//! like this:
//!
//! ```ignore
//! named!(my_function, tag!("abcd"));
//! ```
//!
//! Internally, other combinators will rewrite
//! that call to pass the input as first argument:
//!
//! ```ignore
//! macro_rules! named (
//! ($name:ident, $submac:ident!( $($args:tt)* )) => (
//! fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<'a,&[u8], &[u8]> {
//! $submac!(i, $($args)*)
//! }
//! );
//! );
//! ```
//!
//! If you want to call a combinator directly, you can
//! do it like this:
//!
//! ```ignore
//! let res = { tag!(input, "abcd"); }
//! ```
//!
//! Combinators must have a specific variant for
//! non-macro arguments. Example: passing a function
//! to take_while! instead of another combinator.
//!
//! ```ignore
//! macro_rules! take_while(
//! ($input:expr, $submac:ident!( $($args:tt)* )) => (
//! {
//! ...
//! }
//! );
//!
//! // wrap the function in a macro to pass it to the main implementation
//! ($input:expr, $f:expr) => (
//! take_while!($input, call!($f));
//! );
//! );
//!
/// Wraps a parser in a closure
#[macro_export]
macro_rules! closure (
($ty:ty, $submac:ident!( $($args:tt)* )) => (
|i: $ty| { $submac!(i, $($args)*) }
);
($submac:ident!( $($args:tt)* )) => (
|i| { $submac!(i, $($args)*) }
);
);
/// Makes a function from a parser combination
///
/// The type can be set up if the compiler needs
/// more information
///
/// ```ignore
/// named!(my_function( &[u8] ) -> &[u8], tag!("abcd"));
/// // first type parameter is input, second is output
/// named!(my_function<&[u8], &[u8]>, tag!("abcd"));
/// // will have &[u8] as input type, &[u8] as output type
/// named!(my_function, tag!("abcd"));
/// // will use &[u8] as input type (use this if the compiler
/// // complains about lifetime issues
/// named!(my_function<&[u8]>, tag!("abcd"));
/// //prefix them with 'pub' to make the functions public
/// named!(pub my_function, tag!("abcd"));
/// ```
#[macro_export]
macro_rules! named (
($name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> $crate::IResult<$i,$o> {
$submac!(i, $($args)*)
}
);
($name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> $crate::IResult<$i, $o> {
$submac!(i, $($args)*)
}
);
($name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name<'a>( i: &'a[u8] ) -> $crate::IResult<&'a [u8], $o> {
$submac!(i, $($args)*)
}
);
($name:ident<$life:item,$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name<$life>( i: $i ) -> $crate::IResult<$life, $i, $o> {
$submac!(i, $($args)*)
}
);
($name:ident, $submac:ident!( $($args:tt)* )) => (
fn $name( i: &[u8] ) -> $crate::IResult<&[u8], &[u8]> {
$submac!(i, $($args)*)
}
);
(pub $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: $i ) -> $crate::IResult<$i,$o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: $i ) -> $crate::IResult<$i, $o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident, $submac:ident!( $($args:tt)* )) => (
pub fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<&[u8], &[u8]> {
$submac!(i, $($args)*)
}
);
);
/// Used to wrap common expressions and function as macros
#[macro_export]
macro_rules! call (
($i:expr, $fun:expr) => ( $fun( $i ) );
($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) );
);
/// emulate function currying: `apply!(my_function, arg1, arg2, ...)` becomes `my_function(input, arg1, arg2, ...)`
///
/// Supports up to 6 arguments
#[macro_export]
macro_rules! apply (
($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) );
);
/// Prevents backtracking if the child parser fails
///
/// This parser will do an early return instead of sending
/// its result to the parent parser.
///
/// If another `error!` combinator is present in the parent
/// chain, the error will be wrapped and another early
/// return will be made.
///
/// This makes it easy to build report on which parser failed,
/// where it failed in the input, and the chain of parsers
/// that led it there.
///
/// Additionally, the error chain contains number identifiers
/// that can be matched to provide useful error messages.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use std::collections;
/// # use nom::IResult::Error;
/// # use nom::Err::{Position,NodePosition};
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(err_test, alt!(
/// tag!("abcd") |
/// preceded!(tag!("efgh"), error!(ErrorKind::Custom(42),
/// chain!(
/// tag!("ijkl") ~
/// res: error!(ErrorKind::Custom(128), tag!("mnop")) ,
/// || { res }
/// )
/// )
/// )
/// ));
/// let a = &b"efghblah"[..];
/// let b = &b"efghijklblah"[..];
/// let c = &b"efghijklmnop"[..];
///
/// let blah = &b"blah"[..];
///
/// let res_a = err_test(a);
/// let res_b = err_test(b);
/// let res_c = err_test(c);
/// assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
/// assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..],
/// Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah))))))
/// );
/// # }
/// ```
///
#[macro_export]
macro_rules! error (
($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => (
{
let cl = || {
$submac!($i, $($args)*)
};
match cl() {
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => {
return $crate::IResult::Error($crate::Err::NodePosition($code, $i, Box::new(e)))
}
}
}
);
($i:expr, $code:expr, $f:expr) => (
error!($i, $code, call!($f));
);
);
/// Add an error if the child parser fails
///
/// While error! does an early return and avoids backtracking,
/// add_error! backtracks normally. It just provides more context
/// for an error
///
#[macro_export]
macro_rules! add_error (
($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => {
$crate::IResult::Error($crate::Err::NodePosition($code, $i, Box::new(e)))
}
}
}
);
($i:expr, $code:expr, $f:expr) => (
add_error!($i, $code, call!($f));
);
);
/// translate parser result from IResult<I,O,u32> to IResult<I,O,E> woth a custom type
///
#[macro_export]
macro_rules! fix_error (
($i:expr, $t:ty, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error($crate::Err::Code(ErrorKind::Custom(_))) |
$crate::IResult::Error($crate::Err::Node(ErrorKind::Custom(_), _))=> {
let e: ErrorKind<$t> = ErrorKind::Fix;
$crate::IResult::Error($crate::Err::Code(e))
},
$crate::IResult::Error($crate::Err::Position(ErrorKind::Custom(_), p)) |
$crate::IResult::Error($crate::Err::NodePosition(ErrorKind::Custom(_), p, _)) => {
let e: ErrorKind<$t> = ErrorKind::Fix;
$crate::IResult::Error($crate::Err::Position(e, p))
},
$crate::IResult::Error($crate::Err::Code(_)) |
$crate::IResult::Error($crate::Err::Node(_, _))=> {
let e: ErrorKind<$t> = ErrorKind::Fix;
$crate::IResult::Error($crate::Err::Code(e))
},
$crate::IResult::Error($crate::Err::Position(_, p)) |
$crate::IResult::Error($crate::Err::NodePosition(_, p, _)) => {
let e: ErrorKind<$t> = ErrorKind::Fix;
$crate::IResult::Error($crate::Err::Position(e, p))
},
}
}
);
($i:expr, $t:ty, $f:expr) => (
fix_error!($i, $t, call!($f));
);
);
/// replaces a `Incomplete` returned by the child parser
/// with an `Error`
///
#[macro_export]
macro_rules! complete (
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(_) => {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Complete, $i))
},
}
}
);
($i:expr, $f:expr) => (
complete!($i, call!($f));
);
);
/// `flat_map!(R -> IResult<R,S>, S -> IResult<S,T>) => R -> IResult<R, T>`
///
/// combines a parser R -> IResult<R,S> and
/// a parser S -> IResult<S,T> to return another
/// parser R -> IResult<R,T>
#[macro_export]
macro_rules! flat_map(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
$crate::IResult::Error(e) => {
let err = match e {
$crate::Err::Code(k) | $crate::Err::Node(k, _) | $crate::Err::Position(k, _) | $crate::Err::NodePosition(k, _, _) => {
$crate::Err::Position(k, $i)
}
};
$crate::IResult::Error(err)
},
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(ref i2)) => $crate::IResult::Incomplete($crate::Needed::Size(*i2)),
$crate::IResult::Done(_, o2) => $crate::IResult::Done(i, o2)
}
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
flat_map!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $g:expr) => (
flat_map!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
flat_map!($i, call!($f), $submac!($($args)*));
);
);
/// `map!(I -> IResult<I,O>, O -> P) => I -> IResult<I, P>`
/// maps a function on the result of a parser
#[macro_export]
macro_rules! map(
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! map_impl(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, $submac2!(o, $($args2)*))
}
}
);
);
/// `map_res!(I -> IResult<I,O>, O -> Result<P>) => I -> IResult<I, P>`
/// maps a function returning a Result on the output of a parser
#[macro_export]
macro_rules! map_res (
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_res_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_res_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_res_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_res_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! map_res_impl (
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
Ok(output) => $crate::IResult::Done(i, output),
Err(_) => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::MapRes, $i))
}
}
}
);
);
/// `map_res!(I -> IResult<I,O>, O -> Option<P>) => I -> IResult<I, P>`
/// maps a function returning an Option on the output of a parser
#[macro_export]
macro_rules! map_opt (
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_opt_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_opt_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_opt_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_opt_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! map_opt_impl (
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
Some(output) => $crate::IResult::Done(i, output),
None => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::MapOpt, $i))
}
}
}
);
);
/// `expr_res!(Result<E,O>) => I -> IResult<I, O>`
/// evaluate an expression that returns a Result<T,E> and returns a IResult::Done(I,T) if Ok
///
/// See expr_opt for an example
#[macro_export]
macro_rules! expr_res (
($i:expr, $e:expr) => (
{
match $e {
Ok(output) => $crate::IResult::Done($i, output),
Err(_) => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::ExprRes, $i))
}
}
);
);
/// `expr_opt!(Option<O>) => I -> IResult<I, O>`
/// evaluate an expression that returns a Option<T> and returns a IResult::Done(I,T) if Ok
///
/// Useful when doing computations in a chain
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{self, Done, Error};
/// # use nom::Err::Position;
/// # use nom::{be_u8,ErrorKind};
///
/// fn take_add(input:&[u8], size: u8) -> IResult<&[u8],&[u8]> {
/// chain!(input,
/// sz: be_u8 ~
/// length: expr_opt!(size.checked_add(sz)) ~ // checking for integer overflow (returns an Option)
/// data: take!(length) ,
/// ||{ data }
/// )
/// }
/// # fn main() {
/// let arr1 = [1, 2, 3, 4, 5];
/// let r1 = take_add(&arr1[..], 1);
/// assert_eq!(r1, Done(&[4,5][..], &[2,3][..]));
///
/// let arr2 = [0xFE, 2, 3, 4, 5];
/// // size is overflowing
/// let r1 = take_add(&arr2[..], 42);
/// assert_eq!(r1, Error(Position(ErrorKind::ExprOpt,&[2,3,4,5][..])));
/// # }
/// ```
#[macro_export]
macro_rules! expr_opt (
($i:expr, $e:expr) => (
{
match $e {
Some(output) => $crate::IResult::Done($i, output),
None => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::ExprOpt, $i))
}
}
);
);
/// `chain!(I->IResult<I,A> ~ I->IResult<I,B> ~ ... I->IResult<I,X> , || { return O } ) => I -> IResult<I, O>`
/// chains parsers and assemble the results through a closure
/// the input type I must implement nom::InputLength
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{self, Done, Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// #[derive(PartialEq,Eq,Debug)]
/// struct B {
/// a: u8,
/// b: Option<u8>
/// }
///
/// named!(y, tag!("efgh"));
///
/// fn ret_int(i:&[u8]) -> IResult<&[u8], u8> { Done(i, 1) }
/// named!(ret_y<&[u8], u8>, map!(y, |_| 1)); // return 1 if the "efgh" tag is found
///
/// named!(z<&[u8], B>,
/// chain!(
/// tag!("abcd") ~
/// aa: ret_int ~ // the result of that parser will be used in the closure
/// tag!("abcd")? ~ // this parser is optional
/// bb: ret_y? , // the result of that parser is an option
/// ||{B{a: aa, b: bb}}
/// )
/// );
///
/// # fn main() {
/// // the first "abcd" tag is not present, we have an error
/// let r1 = z(&b"efgh"[..]);
/// assert_eq!(r1, Error(Position(ErrorKind::Tag,&b"efgh"[..])));
///
/// // everything is present, everything is parsed
/// let r2 = z(&b"abcdabcdefgh"[..]);
/// assert_eq!(r2, Done(&b""[..], B{a: 1, b: Some(1)}));
///
/// // the second "abcd" tag is optional
/// let r3 = z(&b"abcdefgh"[..]);
/// assert_eq!(r3, Done(&b""[..], B{a: 1, b: Some(1)}));
///
/// // the result of ret_y is optional, as seen in the B structure
/// let r4 = z(&b"abcdabcdwxyz"[..]);
/// assert_eq!(r4, Done(&b"wxyz"[..], B{a: 1, b: None}));
/// # }
/// ```
#[macro_export]
macro_rules! chain (
($i:expr, $($rest:tt)*) => (
{
//use $crate::InputLength;
chaining_parser!($i, 0usize, $($rest)*)
}
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! chaining_parser (
($i:expr, $consumed:expr, $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
use $crate::InputLength;
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,_) => {
chaining_parser!(i, $consumed + (($i).input_len() - i.input_len()), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
{
use $crate::InputLength;
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let input = if let $crate::IResult::Done(i,_) = res {
i
} else {
$i
};
chaining_parser!(input, $consumed + (($i).input_len() - input.input_len()), $($rest)*)
}
}
});
($i:expr, $consumed:expr, $field:ident : $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, $field: call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
use $crate::InputLength;
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let $field = o;
chaining_parser!(i, $consumed + (($i).input_len() - i.input_len()), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, mut $field:ident : $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, mut $field: call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
use $crate::InputLength;
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let mut $field = o;
chaining_parser!(i, $consumed + ($i).input_len() - i.input_len(), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, $field:ident : $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, $field : call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
{
use $crate::InputLength;
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let ($field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o),i)
} else {
(None,$i)
};
chaining_parser!(input, $consumed + ($i).input_len() - input.input_len(), $($rest)*)
}
}
});
($i:expr, $consumed:expr, mut $field:ident : $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, mut $field : call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
{
use $crate::InputLength;
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let (mut $field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o),i)
} else {
(None,$i)
};
chaining_parser!(input, $consumed + ($i).input_len() - input.input_len(), $($rest)*)
}
}
});
// ending the chain
($i:expr, $consumed:expr, $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, call!($e), $assemble);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,_) => {
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, $e:ident ?, $assemble:expr) => (
chaining_parser!($i, $consumed, call!($e) ?, $assemble);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ?, $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let input = if let $crate::IResult::Done(i,_) = res {
i
} else {
$i
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, $field:ident : $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, $field: call!($e), $assemble);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let $field = o;
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, mut $field:ident : $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, mut $field: call!($e), $assemble);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let mut $field = o;
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, $field:ident : $e:ident ? , $assemble:expr) => (
chaining_parser!($i, $consumed, $field : call!($e) ? , $assemble);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? , $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let ($field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o), i)
} else {
(None, $i)
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, mut $field:ident : $e:ident ? , $assemble:expr) => (
chaining_parser!($i, $consumed, $field : call!($e) ? , $assemble);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ? , $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let (mut $field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o), i)
} else {
(None, $i)
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, $assemble:expr) => (
$crate::IResult::Done($i, $assemble())
)
);
/// `alt!(I -> IResult<I,O> | I -> IResult<I,O> | ... | I -> IResult<I,O> ) => I -> IResult<I, O>`
/// try a list of parsers, return the result of the first successful one
///
/// If one of the parser returns Incomplete, alt will return Incomplete, to retry
/// once you get more input. Note that it is better for performance to know the
/// minimum size of data you need before you get into alt.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!( test, alt!( tag!( "abcd" ) | tag!( "efgh" ) ) );
/// let r1 = test(b"abcdefgh");
/// assert_eq!(r1, Done(&b"efgh"[..], &b"abcd"[..]));
/// let r2 = test(&b"efghijkl"[..]);
/// assert_eq!(r2, Done(&b"ijkl"[..], &b"efgh"[..]));
/// # }
/// ```
///
/// There is another syntax for alt allowing a block to manipulate the result:
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// #[derive(Debug,PartialEq,Eq)]
/// enum Tagged {
/// Abcd,
/// Efgh,
/// Took(usize)
/// }
/// named!(test<Tagged>, alt!(
/// tag!("abcd") => { |_| Tagged::Abcd }
/// | tag!("efgh") => { |_| Tagged::Efgh }
/// | take!(5) => { |res: &[u8]| Tagged::Took(res.len()) } // the closure takes the result as argument if the parser is successful
/// ));
/// let r1 = test(b"abcdefgh");
/// assert_eq!(r1, Done(&b"efgh"[..], Tagged::Abcd));
/// let r2 = test(&b"efghijkl"[..]);
/// assert_eq!(r2, Done(&b"ijkl"[..], Tagged::Efgh));
/// let r3 = test(&b"mnopqrst"[..]);
/// assert_eq!(r3, Done(&b"rst"[..], Tagged::Took(5)));
/// # }
/// ```
#[macro_export]
macro_rules! alt (
($i:expr, $($rest:tt)*) => (
{
alt_parser!($i, $($rest)*)
}
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! alt_parser (
($i:expr, $e:ident | $($rest:tt)*) => (
alt_parser!($i, call!($e) | $($rest)*);
);
($i:expr, $subrule:ident!( $($args:tt)*) | $($rest:tt)*) => (
{
let res = $subrule!($i, $($args)*);
match res {
$crate::IResult::Done(_,_) => res,
$crate::IResult::Incomplete(_) => res,
_ => alt_parser!($i, $($rest)*)
}
}
);
($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr } | $($rest:tt)+) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,$gen(o)),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i, $($rest)*)
}
}
}
);
($i:expr, $e:ident => { $gen:expr } | $($rest:tt)*) => (
alt_parser!($i, call!($e) => { $gen } | $($rest)*);
);
($i:expr, $e:ident => { $gen:expr }) => (
alt_parser!($i, call!($e) => { $gen });
);
($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr }) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,$gen(o)),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i)
}
}
}
);
($i:expr, $e:ident) => (
alt_parser!($i, call!($e));
);
($i:expr, $subrule:ident!( $($args:tt)*)) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,o),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i)
}
}
}
);
($i:expr) => (
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Alt,$i))
);
);
/// `switch!(I -> IResult<I,P>, P => I -> IResult<I,O> | ... | P => I -> IResult<I,O> ) => I -> IResult<I, O>`
/// choose the next parser depending on the result of the first one, if successful
///
#[macro_export]
macro_rules! switch (
($i:expr, $submac:ident!( $($args:tt)*), $($p:pat => $subrule:ident!( $($args2:tt)* ))|*) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i, o) => {
match o {
$($p => $subrule!(i, $($args2)*)),*,
_ => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Switch,i))
}
}
}
}
);
($i:expr, $e:ident, $($rest:tt)*) => (
{
switch!($i, call!(e), $($rest)*)
}
);
);
/// `opt!(I -> IResult<I,O>) => I -> IResult<I, Option<O>>`
/// make the underlying parser optional
///
/// returns an Option of the returned type. This parser never fails
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!( o<&[u8], Option<&[u8]> >, opt!( tag!( "abcd" ) ) );
///
/// let a = b"abcdef";
/// let b = b"bcdefg";
/// assert_eq!(o(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
/// assert_eq!(o(&b[..]), Done(&b"bcdefg"[..], None));
/// # }
/// ```
#[macro_export]
macro_rules! opt(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Some(o)),
$crate::IResult::Error(_) => $crate::IResult::Done($i, None),
$crate::IResult::Incomplete(_) => $crate::IResult::Done($i, None)
}
}
);
($i:expr, $f:expr) => (
opt!($i, call!($f));
);
);
/// `opt_res!(I -> IResult<I,O>) => I -> IResult<I, Result<nom::Err,O>>`
/// make the underlying parser optional
///
/// returns a Result, with Err containing the parsing error
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!( o<&[u8], Result<&[u8], nom::Err<&[u8]> > >, opt_res!( tag!( "abcd" ) ) );
///
/// let a = b"abcdef";
/// let b = b"bcdefg";
/// assert_eq!(o(&a[..]), Done(&b"ef"[..], Ok(&b"abcd"[..])));
/// assert_eq!(o(&b[..]), Done(&b"bcdefg"[..], Err(Position(ErrorKind::Tag, &b[..]))));
/// # }
/// ```
#[macro_export]
macro_rules! opt_res (
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Ok(o)),
$crate::IResult::Error(e) => $crate::IResult::Done($i, Err(e)),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
opt_res!($i, call!($f));
);
);
/// `cond!(bool, I -> IResult<I,O>) => I -> IResult<I, Option<O>>`
/// Conditional combinator
///
/// Wraps another parser and calls it if the
/// condition is met. This combinator returns
/// an Option of the return type of the child
/// parser.
///
/// This is especially useful if a parser depends
/// on the value return by a preceding parser in
/// a `chain!`.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use nom::IResult;
/// # fn main() {
/// let b = true;
/// let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8],
/// cond!( b, tag!("abcd") ))
/// );
///
/// let a = b"abcdef";
/// assert_eq!(f(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
///
/// let b2 = false;
/// let f2:Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8],
/// cond!( b2, tag!("abcd") ))
/// );
/// assert_eq!(f2(&a[..]), Done(&b"abcdef"[..], None));
/// # }
/// ```
///
#[macro_export]
macro_rules! cond(
($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => (
{
if $cond {
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Some(o)),
$crate::IResult::Error(_) => $crate::IResult::Done($i, None),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
} else {
$crate::IResult::Done($i, None)
}
}
);
($i:expr, $cond:expr, $f:expr) => (
cond!($i, $cond, call!($f));
);
);
/// `cond_reduce!(bool, I -> IResult<I,O>) => I -> IResult<I, O>`
/// Conditional combinator with error
///
/// Wraps another parser and calls it if the
/// condition is met. This combinator returns
/// an error if the condition is false
///
/// This is especially useful if a parser depends
/// on the value return by a preceding parser in
/// a `chain!`.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::{Err,ErrorKind};
/// # fn main() {
/// let b = true;
/// let f = closure!(&'static[u8],
/// cond_reduce!( b, tag!("abcd") )
/// );
///
/// let a = b"abcdef";
/// assert_eq!(f(&a[..]), Done(&b"ef"[..], &b"abcd"[..]));
///
/// let b2 = false;
/// let f2 = closure!(&'static[u8],
/// cond_reduce!( b2, tag!("abcd") )
/// );
/// assert_eq!(f2(&a[..]), Error(Err::Position(ErrorKind::CondReduce, &a[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! cond_reduce(
($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => (
{
if $cond {
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
} else {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::CondReduce, $i))
}
}
);
($i:expr, $cond:expr, $f:expr) => (
cond_reduce!($i, $cond, call!($f));
);
);
/// `peek!(I -> IResult<I,O>) => I -> IResult<I, O>`
/// returns a result without consuming the input
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!(ptag, peek!( tag!( "abcd" ) ) );
///
/// let r = ptag(&b"abcdefgh"[..]);
/// assert_eq!(r, Done(&b"abcdefgh"[..], &b"abcd"[..]));
/// # }
/// ```
#[macro_export]
macro_rules! peek(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(_,o) => $crate::IResult::Done($i, o),
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
peek!($i, call!(f));
);
);
/// `tap!(name: I -> IResult<I,O> => { block }) => I -> IResult<I, O>`
/// allows access to the parser's result without affecting it
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use std::str;
/// # fn main() {
/// named!(ptag, tap!(res: tag!( "abcd" ) => { println!("recognized {}", str::from_utf8(res).unwrap()) } ) );
///
/// let r = ptag(&b"abcdefgh"[..]);
/// assert_eq!(r, Done(&b"efgh"[..], &b"abcd"[..]));
/// # }
/// ```
#[macro_export]
macro_rules! tap (
($i:expr, $name:ident : $submac:ident!( $($args:tt)* ) => $e:expr) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => {
let $name = o;
$e;
$crate::IResult::Done(i, $name)
},
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $name: ident: $f:expr => $e:expr) => (
tap!($i, $name: call!($f) => $e);
);
);
/// `pair!(I -> IResult<I,O>, I -> IResult<I,P>) => I -> IResult<I, (O,P)>`
/// pair(X,Y), returns (x,y)
///
#[macro_export]
macro_rules! pair(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
$crate::IResult::Done(i2, (o1, o2))
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
pair!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
pair!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
pair!($i, call!($f), call!($g));
);
);
/// `separated_pair!(I -> IResult<I,O>, I -> IResult<I, T>, I -> IResult<I,P>) => I -> IResult<I, (O,P)>`
/// separated_pair(X,sep,Y) returns (x,y)
#[macro_export]
macro_rules! separated_pair(
($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
separated_pair1!(i1, o1, $($rest)*)
}
}
}
);
($i:expr, $f:expr, $($rest:tt)+) => (
separated_pair!($i, call!($f), $($rest)*);
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! separated_pair1(
($i:expr, $res1:ident, $submac2:ident!( $($args2:tt)* ), $($rest:tt)+) => (
{
match $submac2!($i, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,_) => {
separated_pair2!(i2, $res1, $($rest)*)
}
}
}
);
($i:expr, $res1:ident, $g:expr, $($rest:tt)+) => (
separated_pair1!($i, $res1, call!($g), $($rest)*);
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! separated_pair2(
($i:expr, $res1:ident, $submac3:ident!( $($args3:tt)* )) => (
{
match $submac3!($i, $($args3)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i3,o3) => {
$crate::IResult::Done(i3, ($res1, o3))
}
}
}
);
($i:expr, $res1:ident, $h:expr) => (
separated_pair2!($i, $res1, call!($h));
);
);
/// `preceded!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, O>`
/// preceded(opening, X) returns X
#[macro_export]
macro_rules! preceded(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,_) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
$crate::IResult::Done(i2, o2)
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
preceded!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
preceded!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
preceded!($i, call!($f), call!($g));
);
);
/// `terminated!(I -> IResult<I,O>, I -> IResult<I,T>) => I -> IResult<I, O>`
/// terminated(X, closing) returns X
#[macro_export]
macro_rules! terminated(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,_) => {
$crate::IResult::Done(i2, o1)
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
terminated!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
terminated!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
terminated!($i, call!($f), call!($g));
);
);
/// `delimited!(I -> IResult<I,T>, I -> IResult<I,O>, I -> IResult<I,U>) => I -> IResult<I, O>`
/// delimited(opening, X, closing) returns X
#[macro_export]
macro_rules! delimited(
($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,_) => {
delimited1!(i1, $($rest)*)
}
}
}
);
($i:expr, $f:expr, $($rest:tt)+) => (
delimited!($i, call!($f), $($rest)*);
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! delimited1(
($i:expr, $submac2:ident!( $($args2:tt)* ), $($rest:tt)+) => (
{
match $submac2!($i, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
delimited2!(i2, o2, $($rest)*)
}
}
}
);
($i:expr, $g:expr, $($rest:tt)+) => (
delimited1!($i, call!($g), $($rest)*);
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! delimited2(
($i:expr, $res2:ident, $submac3:ident!( $($args3:tt)* )) => (
{
match $submac3!($i, $($args3)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i3,_) => {
$crate::IResult::Done(i3, $res2)
}
}
}
);
($i:expr, $res2:ident, $h:expr) => (
delimited2!($i, $res2, call!($h));
);
);
/// `separated_list!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// separated_list(sep, X) returns Vec<X>
#[macro_export]
macro_rules! separated_list(
($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
// get the first element
match $submac!(input, $($args2)*) {
$crate::IResult::Error(_) => $crate::IResult::Done(input, Vec::new()),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i,o) => {
if i.len() == input.len() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::SeparatedList,input))
} else {
res.push(o);
input = i;
loop {
// get the separator first
if let $crate::IResult::Done(i2,_) = $sep!(input, $($args)*) {
if i2.len() == input.len() {
break;
}
input = i2;
// get the element next
if let $crate::IResult::Done(i3,o3) = $submac!(input, $($args2)*) {
if i3.len() == input.len() {
break;
}
res.push(o3);
input = i3;
} else {
break;
}
} else {
break;
}
}
$crate::IResult::Done(input, res)
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
separated_list!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
separated_list!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
separated_list!($i, call!($f), call!($g));
);
);
/// `separated_nonempty_list!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// separated_nonempty_list(sep, X) returns Vec<X>
#[macro_export]
macro_rules! separated_nonempty_list(
($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
// get the first element
match $submac!(input, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i,o) => {
if i.len() == input.len() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::SeparatedNonEmptyList,input))
} else {
res.push(o);
input = i;
loop {
if let $crate::IResult::Done(i2,_) = $sep!(input, $($args)*) {
if i2.len() == input.len() {
break;
}
input = i2;
if let $crate::IResult::Done(i3,o3) = $submac!(input, $($args2)*) {
if i3.len() == input.len() {
break;
}
res.push(o3);
input = i3;
} else {
break;
}
} else {
break;
}
}
$crate::IResult::Done(input, res)
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
separated_nonempty_list!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
separated_nonempty_list!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
separated_nonempty_list!($i, call!($f), call!($g));
);
);
/// `many0!(I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// Applies the parser 0 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!(multi<&[u8], Vec<&[u8]> >, many0!( tag!( "abcd" ) ) );
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
/// assert_eq!(multi(&a[..]), Done(&b"ef"[..], res));
/// assert_eq!(multi(&b[..]), Done(&b"azerty"[..], Vec::new()));
/// # }
/// ```
/// 0 or more
#[macro_export]
macro_rules! many0(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
while let $crate::IResult::Done(i,o) = $submac!(input, $($args)*) {
if i.len() == input.len() {
break;
}
res.push(o);
input = i;
}
$crate::IResult::Done(input, res)
}
);
($i:expr, $f:expr) => (
many0!($i, call!($f));
);
);
/// `many1!(I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// Applies the parser 1 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done, Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(multi<&[u8], Vec<&[u8]> >, many1!( tag!( "abcd" ) ) );
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
/// assert_eq!(multi(&a[..]), Done(&b"ef"[..], res));
/// assert_eq!(multi(&b[..]), Error(Position(ErrorKind::Many1,&b[..])));
/// # }
/// ```
#[macro_export]
macro_rules! many1(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
while let $crate::IResult::Done(i,o) = $submac!(input, $($args)*) {
if i.len() == input.len() {
break;
}
res.push(o);
input = i;
}
if res.is_empty() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Many1,$i))
} else {
$crate::IResult::Done(input, res)
}
}
);
($i:expr, $f:expr) => (
many1!($i, call!($f));
);
);
/// `count!(I -> IResult<I,O>, nb) => I -> IResult<I, Vec<O>>`
/// Applies the child parser a specified number of times
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(counter< Vec<&[u8]> >, count!( tag!( "abcd" ), 2 ) );
///
/// let a = b"abcdabcdabcdef";
/// let b = b"abcdefgh";
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
///
/// assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
/// assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! count(
($i:expr, $submac:ident!( $($args:tt)* ), $count: expr) => (
{
let mut input = $i;
let mut res = Vec::with_capacity($count);
let mut cnt: usize = 0;
let mut err = false;
loop {
if cnt == $count {
break
}
match $submac!(input, $($args)*) {
$crate::IResult::Done(i,o) => {
res.push(o);
input = i;
cnt = cnt + 1;
},
$crate::IResult::Error(_) => {
err = true;
break;
},
$crate::IResult::Incomplete(_) => {
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Count,$i))
} else if cnt == $count {
$crate::IResult::Done(input, res)
} else {
$crate::IResult::Incomplete($crate::Needed::Unknown)
}
}
);
($i:expr, $f:expr, $count: expr) => (
count!($i, call!($f), $count);
);
);
/// `count_fixed!(O, I -> IResult<I,O>, nb) => I -> IResult<I, [O; nb]>`
/// Applies the child parser a fixed number of times and returns a fixed size array
/// The type must be specified and it must be `Copy`
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(counter< [&[u8]; 2] >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
/// // can omit the type specifier if returning slices
/// // named!(counter< [&[u8]; 2] >, count_fixed!( tag!( "abcd" ), 2 ) );
///
/// let a = b"abcdabcdabcdef";
/// let b = b"abcdefgh";
/// let res = [&b"abcd"[..], &b"abcd"[..]];
///
/// assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
/// assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! count_fixed (
($i:expr, $typ:ty, $submac:ident!( $($args:tt)* ), $count: expr) => (
{
let mut input = $i;
// `$typ` must be Copy, and thus having no destructor, this is panic safe
let mut res: [$typ; $count] = unsafe{[::std::mem::uninitialized(); $count as usize]};
let mut cnt: usize = 0;
let mut err = false;
loop {
if cnt == $count {
break
}
match $submac!(input, $($args)*) {
$crate::IResult::Done(i,o) => {
res[cnt] = o;
input = i;
cnt = cnt + 1;
},
$crate::IResult::Error(_) => {
err = true;
break;
},
$crate::IResult::Incomplete(_) => {
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Count,$i))
} else if cnt == $count {
$crate::IResult::Done(input, res)
} else {
$crate::IResult::Incomplete($crate::Needed::Unknown)
}
}
);
($i:expr, $typ: ty, $f:ident, $count: expr) => (
count_fixed!($i, $typ, call!($f), $count);
);
);
/// `length_value!(I -> IResult<I, nb>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// gets a number from the first parser, then applies the second parser that many times
#[macro_export]
macro_rules! length_value(
($i:expr, $f:expr, $g:expr) => (
{
match $f($i) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,nb) => {
let length_token = $i.len() - i1.len();
let mut input = i1;
let mut res = Vec::new();
let mut err = false;
let mut inc = $crate::Needed::Unknown;
loop {
if res.len() == nb as usize {
break;
}
match $g(input) {
$crate::IResult::Done(i2,o2) => {
res.push(o2);
input = i2;
},
$crate::IResult::Error(_) => {
err = true;
},
$crate::IResult::Incomplete(a) => {
inc = a;
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::LengthValue,$i))
} else if res.len() < nb as usize {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(length) => $crate::IResult::Incomplete($crate::Needed::Size(length_token + nb as usize * length))
}
} else {
$crate::IResult::Done(input, res)
}
}
}
}
);
($i:expr, $f:expr, $g:expr, $length:expr) => (
{
match $f($i) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,nb) => {
let length_token = $i.len() - i1.len();
let mut input = i1;
let mut res = Vec::new();
let mut err = false;
let mut inc = $crate::Needed::Unknown;
loop {
if res.len() == nb as usize {
break;
}
match $g(input) {
$crate::IResult::Done(i2,o2) => {
res.push(o2);
input = i2;
},
$crate::IResult::Error(_) => {
err = true;
},
$crate::IResult::Incomplete(a) => {
inc = a;
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::LengthValue,$i))
} else if res.len() < nb as usize {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(_) => $crate::IResult::Incomplete($crate::Needed::Size(length_token + nb as usize * $length))
}
} else {
$crate::IResult::Done(input, res)
}
}
}
}
);
);
#[cfg(test)]
mod tests {
use internal::{Needed,IResult,Err};
use internal::IResult::*;
use internal::Err::*;
use util::ErrorKind;
// reproduce the tag and take macros, because of module import order
macro_rules! tag (
($i:expr, $inp: expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
let res : $crate::IResult<&[u8],&[u8]> = if bytes.len() > $i.len() {
$crate::IResult::Incomplete($crate::Needed::Size(bytes.len()))
} else if &$i[0..bytes.len()] == bytes {
$crate::IResult::Done(&$i[bytes.len()..], &$i[0..bytes.len()])
} else {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Tag, $i))
};
res
}
);
);
macro_rules! take(
($i:expr, $count:expr) => (
{
let cnt = $count as usize;
let res:$crate::IResult<&[u8],&[u8]> = if $i.len() < cnt {
$crate::IResult::Incomplete($crate::Needed::Size(cnt))
} else {
$crate::IResult::Done(&$i[cnt..],&$i[0..cnt])
};
res
}
);
);
mod pub_named_mod {
named!(pub tst, tag!("abcd"));
}
#[test]
fn pub_named_test() {
let a = &b"abcd"[..];
let res = pub_named_mod::tst(a);
assert_eq!(res, Done(&b""[..], a));
}
#[test]
fn apply_test() {
fn sum2(a:u8, b:u8) -> u8 { a + b }
fn sum3(a:u8, b:u8, c:u8) -> u8 { a + b + c }
let a = apply!(1, sum2, 2);
let b = apply!(1, sum3, 2, 3);
assert_eq!(a, 3);
assert_eq!(b, 6);
}
#[derive(PartialEq,Eq,Debug)]
struct B {
a: u8,
b: u8
}
#[test]
fn chain2() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
tag!("abcd") ~
tag!("abcd")? ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[test]
fn nested_chain() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
chain!(
tag!("abcd") ~
tag!("abcd")? ,
|| {}
) ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[derive(PartialEq,Eq,Debug)]
struct C {
a: u8,
b: Option<u8>
}
#[test]
fn chain_mut() {
fn ret_b1_2(i:&[u8]) -> IResult<&[u8], B> { Done(i,B{a:1,b:2}) };
named!(f<&[u8],B>,
chain!(
tag!("abcd") ~
tag!("abcd")? ~
tag!("efgh") ~
mut bb: ret_b1_2 ~
tag!("efgh") ,
||{
bb.b = 3;
bb
}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 3}));
}
#[test]
fn chain_opt() {
named!(y, tag!("efgh"));
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
named!(ret_y<&[u8], u8>, map!(y, |_| 2));
named!(f<&[u8],C>,
chain!(
tag!("abcd") ~
aa: ret_int1 ~
bb: ret_y? ,
||{C{a: aa, b: bb}}
)
);
let r = f(&b"abcdefghX"[..]);
assert_eq!(r, Done(&b"X"[..], C{a: 1, b: Some(2)}));
let r2 = f(&b"abcdWXYZ"[..]);
assert_eq!(r2, Done(&b"WXYZ"[..], C{a: 1, b: None}));
let r3 = f(&b"abcdX"[..]);
assert_eq!(r3, Incomplete(Needed::Size(8)));
}
use util::{error_to_list, add_error_pattern, print_error};
fn error_to_string<P>(e: &Err<P>) -> &'static str {
let v:Vec<ErrorKind> = error_to_list(e);
// do it this way if you can use slice patterns
/*
match &v[..] {
[ErrorKind::Custom(42), ErrorKind::Tag] => "missing `ijkl` tag",
[ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] => "missing `mnop` tag after `ijkl`",
_ => "unrecognized error"
}
*/
if &v[..] == [ErrorKind::Custom(42),ErrorKind::Tag] {
"missing `ijkl` tag"
} else if &v[..] == [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] {
"missing `mnop` tag after `ijkl`"
} else {
"unrecognized error"
}
}
// do it this way if you can use box patterns
/*use std::str;
fn error_to_string(e:Err) -> String
match e {
NodePosition(ErrorKind::Custom(42), i1, box Position(ErrorKind::Tag, i2)) => {
format!("missing `ijkl` tag, found '{}' instead", str::from_utf8(i2).unwrap())
},
NodePosition(ErrorKind::Custom(42), i1, box NodePosition(ErrorKind::Custom(128), i2, box Position(ErrorKind::Tag, i3))) => {
format!("missing `mnop` tag after `ijkl`, found '{}' instead", str::from_utf8(i3).unwrap())
},
_ => "unrecognized error".to_string()
}
}*/
use std::collections;
#[test]
fn err() {
named!(err_test, alt!(
tag!("abcd") |
preceded!(tag!("efgh"), error!(ErrorKind::Custom(42),
chain!(
tag!("ijkl") ~
res: error!(ErrorKind::Custom(128), tag!("mnop")) ,
|| { res }
)
)
)
));
let a = &b"efghblah"[..];
let b = &b"efghijklblah"[..];
let c = &b"efghijklmnop"[..];
let blah = &b"blah"[..];
let res_a = err_test(a);
let res_b = err_test(b);
let res_c = err_test(c);
assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..], Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah)))))));
assert_eq!(res_c, Done(&b""[..], &b"mnop"[..]));
// Merr-like error matching
let mut err_map = collections::HashMap::new();
assert!(add_error_pattern(&mut err_map, err_test(&b"efghpouet"[..]), "missing `ijkl` tag"));
assert!(add_error_pattern(&mut err_map, err_test(&b"efghijklpouet"[..]), "missing `mnop` tag after `ijkl`"));
let res_a2 = res_a.clone();
match res_a {
Error(e) => {
assert_eq!(error_to_list(&e), [ErrorKind::Custom(42), ErrorKind::Tag]);
assert_eq!(error_to_string(&e), "missing `ijkl` tag");
assert_eq!(err_map.get(&error_to_list(&e)), Some(&"missing `ijkl` tag"));
},
_ => panic!()
};
let res_b2 = res_b.clone();
match res_b {
Error(e) => {
assert_eq!(error_to_list(&e), [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag]);
assert_eq!(error_to_string(&e), "missing `mnop` tag after `ijkl`");
assert_eq!(err_map.get(&error_to_list(&e)), Some(&"missing `mnop` tag after `ijkl`"));
},
_ => panic!()
};
print_error(a, res_a2);
print_error(b, res_b2);
}
#[test]
fn add_err() {
named!(err_test,
preceded!(tag!("efgh"), add_error!(ErrorKind::Custom(42),
chain!(
tag!("ijkl") ~
res: add_error!(ErrorKind::Custom(128), tag!("mnop")) ,
|| { res }
)
)
));
let a = &b"efghblah"[..];
let b = &b"efghijklblah"[..];
let c = &b"efghijklmnop"[..];
let blah = &b"blah"[..];
let res_a = err_test(a);
let res_b = err_test(b);
let res_c = err_test(c);
assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..], Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah)))))));
assert_eq!(res_c, Done(&b""[..], &b"mnop"[..]));
}
#[test]
fn complete() {
named!(err_test,
chain!(
tag!("ijkl") ~
res: complete!(tag!("mnop")) ,
|| { res }
)
);
let a = &b"ijklmn"[..];
let res_a = err_test(a);
assert_eq!(res_a, Error(Position(ErrorKind::Complete, &b"mn"[..])));
}
#[test]
fn alt() {
fn work(input: &[u8]) -> IResult<&[u8],&[u8], &'static str> {
Done(&b""[..], input)
}
#[allow(unused_variables)]
fn dont_work(input: &[u8]) -> IResult<&[u8],&[u8],&'static str> {
Error(Code(ErrorKind::Custom("abcd")))
}
fn work2(input: &[u8]) -> IResult<&[u8],&[u8], &'static str> {
Done(input, &b""[..])
}
fn alt1(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | dont_work)
}
fn alt2(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | work)
}
fn alt3(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | dont_work | work2 | dont_work)
}
//named!(alt1, alt!(dont_work | dont_work));
//named!(alt2, alt!(dont_work | work));
//named!(alt3, alt!(dont_work | dont_work | work2 | dont_work));
let a = &b"abcd"[..];
assert_eq!(alt1(a), Error(Position(ErrorKind::Alt, a)));
assert_eq!(alt2(a), Done(&b""[..], a));
assert_eq!(alt3(a), Done(a, &b""[..]));
named!(alt4, alt!(tag!("abcd") | tag!("efgh")));
let b = &b"efgh"[..];
assert_eq!(alt4(a), Done(&b""[..], a));
assert_eq!(alt4(b), Done(&b""[..], b));
// test the alternative syntax
named!(alt5<bool>, alt!(tag!("abcd") => { |_| false } | tag!("efgh") => { |_| true }));
assert_eq!(alt5(a), Done(&b""[..], false));
assert_eq!(alt5(b), Done(&b""[..], true));
}
#[test]
fn alt_incomplete() {
named!(alt1, alt!(tag!("a") | tag!("bc") | tag!("def")));
let a = &b""[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(1)));
let a = &b"b"[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(2)));
let a = &b"bcd"[..];
assert_eq!(alt1(a), Done(&b"d"[..], &b"bc"[..]));
let a = &b"cde"[..];
assert_eq!(alt1(a), Error(Position(ErrorKind::Alt, a)));
let a = &b"de"[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(3)));
let a = &b"defg"[..];
assert_eq!(alt1(a), Done(&b"g"[..], &b"def"[..]));
}
#[test]
fn switch() {
named!(sw,
switch!(take!(4),
b"abcd" => take!(2) |
b"efgh" => take!(4)
)
);
let a = &b"abcdefgh"[..];
assert_eq!(sw(a), Done(&b"gh"[..], &b"ef"[..]));
let b = &b"efghijkl"[..];
assert_eq!(sw(b), Done(&b""[..], &b"ijkl"[..]));
let c = &b"afghijkl"[..];
assert_eq!(sw(c), Error(Position(ErrorKind::Switch, &b"ijkl"[..])));
}
#[test]
fn opt() {
named!(o<&[u8],Option<&[u8]> >, opt!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"bcdefg"[..];
assert_eq!(o(a), Done(&b"ef"[..], Some(&b"abcd"[..])));
assert_eq!(o(b), Done(&b"bcdefg"[..], None));
}
#[test]
fn opt_res() {
named!(o<&[u8], Result<&[u8], Err<&[u8]>> >, opt_res!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"bcdefg"[..];
assert_eq!(o(a), Done(&b"ef"[..], Ok(&b"abcd"[..])));
assert_eq!(o(b), Done(&b"bcdefg"[..], Err(Position(ErrorKind::Tag, b))));
}
#[test]
fn cond() {
let b = true;
let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b, tag!("abcd") ) ));
let a = b"abcdef";
assert_eq!(f(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
let b2 = false;
let f2: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b2, tag!("abcd") ) ));
//let f2 = closure!(&'static [u8], cond!( b2, tag!("abcd") ) );
assert_eq!(f2(&a[..]), Done(&b"abcdef"[..], None));
}
#[test]
fn cond_wrapping() {
// Test that cond!() will wrap a given identifier in the call!() macro.
named!(silly, tag!("foo"));
let b = true;
//let f = closure!(&'static [u8], cond!( b, silly ) );
let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b, silly ) ));
assert_eq!(f(b"foobar"), Done(&b"bar"[..], Some(&b"foo"[..])));
}
#[test]
fn peek() {
named!(ptag<&[u8],&[u8]>, peek!(tag!("abcd")));
let r1 = ptag(&b"abcdefgh"[..]);
assert_eq!(r1, Done(&b"abcdefgh"[..], &b"abcd"[..]));
let r1 = ptag(&b"efgh"[..]);
assert_eq!(r1, Error(Position(ErrorKind::Tag,&b"efgh"[..])));
}
#[test]
fn pair() {
named!(p<&[u8],(&[u8], &[u8])>, pair!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], (&b"abcd"[..], &b"efgh"[..])));
}
#[test]
fn separated_pair() {
named!(p<&[u8],(&[u8], &[u8])>, separated_pair!(tag!("abcd"), tag!(","), tag!("efgh")));
let r1 = p(&b"abcd,efghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], (&b"abcd"[..], &b"efgh"[..])));
}
#[test]
fn preceded() {
named!(p<&[u8], &[u8]>, preceded!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], &b"efgh"[..]));
}
#[test]
fn terminated() {
named!(p<&[u8], &[u8]>, terminated!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], &b"abcd"[..]));
}
#[test]
fn delimited() {
named!(p<&[u8], &[u8]>, delimited!(tag!("abcd"), tag!("efgh"), tag!("ij")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"kl"[..], &b"efgh"[..]));
}
#[test]
fn separated_list() {
named!(multi<&[u8],Vec<&[u8]> >, separated_list!(tag!(","), tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcd,abcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Done(&b"azerty"[..], Vec::new()));
}
#[test]
fn separated_nonempty_list() {
named!(multi<&[u8],Vec<&[u8]> >, separated_nonempty_list!(tag!(","), tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcd,abcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Error(Position(ErrorKind::Tag,c)));
}
#[test]
fn many0() {
named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Done(&b"azerty"[..], Vec::new()));
}
#[cfg(feature = "nightly")]
use test::Bencher;
#[cfg(feature = "nightly")]
#[bench]
fn many0_bench(b: &mut Bencher) {
named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd")));
b.iter(|| {
multi(&b"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"[..])
});
}
#[test]
fn many1() {
named!(multi<&[u8],Vec<&[u8]> >, many1!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Error(Position(ErrorKind::Many1,c)));
}
#[test]
fn infinite_many() {
fn tst(input: &[u8]) -> IResult<&[u8], &[u8]> {
println!("input: {:?}", input);
Error(Position(ErrorKind::Custom(0),input))
}
// should not go into an infinite loop
named!(multi0<&[u8],Vec<&[u8]> >, many0!(tst));
let a = &b"abcdef"[..];
assert_eq!(multi0(a), Done(a, Vec::new()));
named!(multi1<&[u8],Vec<&[u8]> >, many1!(tst));
let a = &b"abcdef"[..];
assert_eq!(multi1(a), Error(Position(ErrorKind::Many1,a)));
}
#[test]
fn count() {
fn counter(input: &[u8]) -> IResult<&[u8], Vec<&[u8]>> {
let size: usize = 2;
count!(input, tag!( "abcd" ), size )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
#[test]
fn count_zero() {
fn counter(input: &[u8]) -> IResult<&[u8], Vec<&[u8]>> {
let size: usize = 0;
count!(input, tag!( "abcd" ), size )
}
let a = b"abcdabcdabcdef";
let res: Vec<&[u8]> = Vec::new();
assert_eq!(counter(&a[..]), Done(&b"abcdabcdabcdef"[..], res));
}
#[test]
fn count_fixed() {
//named!(counter< [&[u8]; 2], u32 >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
fn counter(input:&[u8]) -> IResult<&[u8], [&[u8]; 2], () > {
count_fixed!(input, &[u8], tag!( "abcd" ), 2 )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = [&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
use nom::{le_u16,eof};
#[allow(dead_code)]
pub fn compile_count_fixed(input: &[u8]) -> IResult<&[u8], ()> {
chain!(input,
tag!("abcd") ~
count_fixed!( u16, le_u16, 4 ) ~
eof ,
|| { () }
)
}
#[test]
fn count_fixed_no_type() {
//named!(counter< [&[u8]; 2], u32 >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
fn counter(input:&[u8]) -> IResult<&[u8], [&[u8]; 2], () > {
count_fixed!(input, &[u8], tag!( "abcd" ), 2 )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = [&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
use nom::{be_u8,be_u16};
#[test]
fn length_value_test() {
named!(tst1<&[u8], Vec<u16> >, length_value!(be_u8, be_u16));
named!(tst2<&[u8], Vec<u16> >, length_value!(be_u8, be_u16, 2));
let i1 = vec![0, 5, 6];
let i2 = vec![1, 5, 6, 3];
let i3 = vec![2, 5, 6, 3];
let i4 = vec![2, 5, 6, 3, 4, 5, 7];
let i5 = vec![3, 5, 6, 3, 4, 5];
let r1: Vec<u16> = Vec::new();
let r2: Vec<u16> = vec![1286];
let r4: Vec<u16> = vec![1286, 772];
assert_eq!(tst1(&i1), IResult::Done(&i1[1..], r1));
assert_eq!(tst1(&i2), IResult::Done(&i2[3..], r2));
assert_eq!(tst1(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst1(&i4), IResult::Done(&i4[5..], r4));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
let r6: Vec<u16> = Vec::new();
let r7: Vec<u16> = vec![1286];
let r9: Vec<u16> = vec![1286, 772];
assert_eq!(tst2(&i1), IResult::Done(&i1[1..], r6));
assert_eq!(tst2(&i2), IResult::Done(&i2[3..], r7));
assert_eq!(tst2(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst2(&i4), IResult::Done(&i4[5..], r9));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
}
#[test]
fn chain_incomplete() {
let res = chain!(&b"abcdefgh"[..],
a: take!(4) ~
b: take!(8),
||{(a,b )}
);
assert_eq!(res, IResult::Incomplete(Needed::Size(12)));
}
}
more docs
//! Macro combinators
//!
//! Macros are used to make combination easier,
//! since they often do not depend on the type
//! of the data they manipulate or return.
//!
//! There is a trick to make them easier to assemble,
//! combinators are defined like this:
//!
//! ```ignore
//! macro_rules! tag (
//! ($i:expr, $inp: expr) => (
//! {
//! ...
//! }
//! );
//! );
//! ```
//!
//! But when used in other combinators, are Used
//! like this:
//!
//! ```ignore
//! named!(my_function, tag!("abcd"));
//! ```
//!
//! Internally, other combinators will rewrite
//! that call to pass the input as first argument:
//!
//! ```ignore
//! macro_rules! named (
//! ($name:ident, $submac:ident!( $($args:tt)* )) => (
//! fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<'a,&[u8], &[u8]> {
//! $submac!(i, $($args)*)
//! }
//! );
//! );
//! ```
//!
//! If you want to call a combinator directly, you can
//! do it like this:
//!
//! ```ignore
//! let res = { tag!(input, "abcd"); }
//! ```
//!
//! Combinators must have a specific variant for
//! non-macro arguments. Example: passing a function
//! to take_while! instead of another combinator.
//!
//! ```ignore
//! macro_rules! take_while(
//! ($input:expr, $submac:ident!( $($args:tt)* )) => (
//! {
//! ...
//! }
//! );
//!
//! // wrap the function in a macro to pass it to the main implementation
//! ($input:expr, $f:expr) => (
//! take_while!($input, call!($f));
//! );
//! );
//!
/// Wraps a parser in a closure
#[macro_export]
macro_rules! closure (
($ty:ty, $submac:ident!( $($args:tt)* )) => (
|i: $ty| { $submac!(i, $($args)*) }
);
($submac:ident!( $($args:tt)* )) => (
|i| { $submac!(i, $($args)*) }
);
);
/// Makes a function from a parser combination
///
/// The type can be set up if the compiler needs
/// more information
///
/// ```ignore
/// named!(my_function( &[u8] ) -> &[u8], tag!("abcd"));
/// // first type parameter is input, second is output
/// named!(my_function<&[u8], &[u8]>, tag!("abcd"));
/// // will have &[u8] as input type, &[u8] as output type
/// named!(my_function, tag!("abcd"));
/// // will use &[u8] as input type (use this if the compiler
/// // complains about lifetime issues
/// named!(my_function<&[u8]>, tag!("abcd"));
/// //prefix them with 'pub' to make the functions public
/// named!(pub my_function, tag!("abcd"));
/// ```
#[macro_export]
macro_rules! named (
($name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> $crate::IResult<$i,$o> {
$submac!(i, $($args)*)
}
);
($name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name( i: $i ) -> $crate::IResult<$i, $o> {
$submac!(i, $($args)*)
}
);
($name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name<'a>( i: &'a[u8] ) -> $crate::IResult<&'a [u8], $o> {
$submac!(i, $($args)*)
}
);
($name:ident<$life:item,$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
fn $name<$life>( i: $i ) -> $crate::IResult<$life, $i, $o> {
$submac!(i, $($args)*)
}
);
($name:ident, $submac:ident!( $($args:tt)* )) => (
fn $name( i: &[u8] ) -> $crate::IResult<&[u8], &[u8]> {
$submac!(i, $($args)*)
}
);
(pub $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: $i ) -> $crate::IResult<$i,$o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: $i ) -> $crate::IResult<$i, $o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => (
pub fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o> {
$submac!(i, $($args)*)
}
);
(pub $name:ident, $submac:ident!( $($args:tt)* )) => (
pub fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<&[u8], &[u8]> {
$submac!(i, $($args)*)
}
);
);
/// Used to wrap common expressions and function as macros
#[macro_export]
macro_rules! call (
($i:expr, $fun:expr) => ( $fun( $i ) );
($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) );
);
/// emulate function currying: `apply!(my_function, arg1, arg2, ...)` becomes `my_function(input, arg1, arg2, ...)`
///
/// Supports up to 6 arguments
#[macro_export]
macro_rules! apply (
($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) );
);
/// Prevents backtracking if the child parser fails
///
/// This parser will do an early return instead of sending
/// its result to the parent parser.
///
/// If another `error!` combinator is present in the parent
/// chain, the error will be wrapped and another early
/// return will be made.
///
/// This makes it easy to build report on which parser failed,
/// where it failed in the input, and the chain of parsers
/// that led it there.
///
/// Additionally, the error chain contains number identifiers
/// that can be matched to provide useful error messages.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use std::collections;
/// # use nom::IResult::Error;
/// # use nom::Err::{Position,NodePosition};
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(err_test, alt!(
/// tag!("abcd") |
/// preceded!(tag!("efgh"), error!(ErrorKind::Custom(42),
/// chain!(
/// tag!("ijkl") ~
/// res: error!(ErrorKind::Custom(128), tag!("mnop")) ,
/// || { res }
/// )
/// )
/// )
/// ));
/// let a = &b"efghblah"[..];
/// let b = &b"efghijklblah"[..];
/// let c = &b"efghijklmnop"[..];
///
/// let blah = &b"blah"[..];
///
/// let res_a = err_test(a);
/// let res_b = err_test(b);
/// let res_c = err_test(c);
/// assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
/// assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..],
/// Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah))))))
/// );
/// # }
/// ```
///
#[macro_export]
macro_rules! error (
($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => (
{
let cl = || {
$submac!($i, $($args)*)
};
match cl() {
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => {
return $crate::IResult::Error($crate::Err::NodePosition($code, $i, Box::new(e)))
}
}
}
);
($i:expr, $code:expr, $f:expr) => (
error!($i, $code, call!($f));
);
);
/// Add an error if the child parser fails
///
/// While error! does an early return and avoids backtracking,
/// add_error! backtracks normally. It just provides more context
/// for an error
///
#[macro_export]
macro_rules! add_error (
($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => {
$crate::IResult::Error($crate::Err::NodePosition($code, $i, Box::new(e)))
}
}
}
);
($i:expr, $code:expr, $f:expr) => (
add_error!($i, $code, call!($f));
);
);
/// translate parser result from IResult<I,O,u32> to IResult<I,O,E> woth a custom type
///
#[macro_export]
macro_rules! fix_error (
($i:expr, $t:ty, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error($crate::Err::Code(ErrorKind::Custom(_))) |
$crate::IResult::Error($crate::Err::Node(ErrorKind::Custom(_), _))=> {
let e: ErrorKind<$t> = ErrorKind::Fix;
$crate::IResult::Error($crate::Err::Code(e))
},
$crate::IResult::Error($crate::Err::Position(ErrorKind::Custom(_), p)) |
$crate::IResult::Error($crate::Err::NodePosition(ErrorKind::Custom(_), p, _)) => {
let e: ErrorKind<$t> = ErrorKind::Fix;
$crate::IResult::Error($crate::Err::Position(e, p))
},
$crate::IResult::Error($crate::Err::Code(_)) |
$crate::IResult::Error($crate::Err::Node(_, _))=> {
let e: ErrorKind<$t> = ErrorKind::Fix;
$crate::IResult::Error($crate::Err::Code(e))
},
$crate::IResult::Error($crate::Err::Position(_, p)) |
$crate::IResult::Error($crate::Err::NodePosition(_, p, _)) => {
let e: ErrorKind<$t> = ErrorKind::Fix;
$crate::IResult::Error($crate::Err::Position(e, p))
},
}
}
);
($i:expr, $t:ty, $f:expr) => (
fix_error!($i, $t, call!($f));
);
);
/// replaces a `Incomplete` returned by the child parser
/// with an `Error`
///
#[macro_export]
macro_rules! complete (
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(_) => {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Complete, $i))
},
}
}
);
($i:expr, $f:expr) => (
complete!($i, call!($f));
);
);
/// `flat_map!(R -> IResult<R,S>, S -> IResult<S,T>) => R -> IResult<R, T>`
///
/// combines a parser R -> IResult<R,S> and
/// a parser S -> IResult<S,T> to return another
/// parser R -> IResult<R,T>
#[macro_export]
macro_rules! flat_map(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
$crate::IResult::Error(e) => {
let err = match e {
$crate::Err::Code(k) | $crate::Err::Node(k, _) | $crate::Err::Position(k, _) | $crate::Err::NodePosition(k, _, _) => {
$crate::Err::Position(k, $i)
}
};
$crate::IResult::Error(err)
},
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(ref i2)) => $crate::IResult::Incomplete($crate::Needed::Size(*i2)),
$crate::IResult::Done(_, o2) => $crate::IResult::Done(i, o2)
}
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
flat_map!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $g:expr) => (
flat_map!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
flat_map!($i, call!($f), $submac!($($args)*));
);
);
/// `map!(I -> IResult<I,O>, O -> P) => I -> IResult<I, P>`
/// maps a function on the result of a parser
#[macro_export]
macro_rules! map(
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! map_impl(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => $crate::IResult::Done(i, $submac2!(o, $($args2)*))
}
}
);
);
/// `map_res!(I -> IResult<I,O>, O -> Result<P>) => I -> IResult<I, P>`
/// maps a function returning a Result on the output of a parser
#[macro_export]
macro_rules! map_res (
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_res_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_res_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_res_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_res_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! map_res_impl (
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
Ok(output) => $crate::IResult::Done(i, output),
Err(_) => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::MapRes, $i))
}
}
}
);
);
/// `map_res!(I -> IResult<I,O>, O -> Option<P>) => I -> IResult<I, P>`
/// maps a function returning an Option on the output of a parser
#[macro_export]
macro_rules! map_opt (
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
map_opt_impl!($i, $submac!($($args)*), call!($g));
);
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
map_opt_impl!($i, $submac!($($args)*), $submac2!($($args2)*));
);
($i:expr, $f:expr, $g:expr) => (
map_opt_impl!($i, call!($f), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
map_opt_impl!($i, call!($f), $submac!($($args)*));
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! map_opt_impl (
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size(i)),
$crate::IResult::Done(i, o) => match $submac2!(o, $($args2)*) {
Some(output) => $crate::IResult::Done(i, output),
None => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::MapOpt, $i))
}
}
}
);
);
/// `expr_res!(Result<E,O>) => I -> IResult<I, O>`
/// evaluate an expression that returns a Result<T,E> and returns a IResult::Done(I,T) if Ok
///
/// See expr_opt for an example
#[macro_export]
macro_rules! expr_res (
($i:expr, $e:expr) => (
{
match $e {
Ok(output) => $crate::IResult::Done($i, output),
Err(_) => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::ExprRes, $i))
}
}
);
);
/// `expr_opt!(Option<O>) => I -> IResult<I, O>`
/// evaluate an expression that returns a Option<T> and returns a IResult::Done(I,T) if Ok
///
/// Useful when doing computations in a chain
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{self, Done, Error};
/// # use nom::Err::Position;
/// # use nom::{be_u8,ErrorKind};
///
/// fn take_add(input:&[u8], size: u8) -> IResult<&[u8],&[u8]> {
/// chain!(input,
/// sz: be_u8 ~
/// length: expr_opt!(size.checked_add(sz)) ~ // checking for integer overflow (returns an Option)
/// data: take!(length) ,
/// ||{ data }
/// )
/// }
/// # fn main() {
/// let arr1 = [1, 2, 3, 4, 5];
/// let r1 = take_add(&arr1[..], 1);
/// assert_eq!(r1, Done(&[4,5][..], &[2,3][..]));
///
/// let arr2 = [0xFE, 2, 3, 4, 5];
/// // size is overflowing
/// let r1 = take_add(&arr2[..], 42);
/// assert_eq!(r1, Error(Position(ErrorKind::ExprOpt,&[2,3,4,5][..])));
/// # }
/// ```
#[macro_export]
macro_rules! expr_opt (
($i:expr, $e:expr) => (
{
match $e {
Some(output) => $crate::IResult::Done($i, output),
None => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::ExprOpt, $i))
}
}
);
);
/// `chain!(I->IResult<I,A> ~ I->IResult<I,B> ~ ... I->IResult<I,X> , || { return O } ) => I -> IResult<I, O>`
/// chains parsers and assemble the results through a closure
/// the input type I must implement nom::InputLength
/// this combinator will count how much data is consumed by every child parser and take it into account if
/// there is not enough data
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{self, Done, Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// #[derive(PartialEq,Eq,Debug)]
/// struct B {
/// a: u8,
/// b: Option<u8>
/// }
///
/// named!(y, tag!("efgh"));
///
/// fn ret_int(i:&[u8]) -> IResult<&[u8], u8> { Done(i, 1) }
/// named!(ret_y<&[u8], u8>, map!(y, |_| 1)); // return 1 if the "efgh" tag is found
///
/// named!(z<&[u8], B>,
/// chain!(
/// tag!("abcd") ~ // the '~' character is used as separator
/// aa: ret_int ~ // the result of that parser will be used in the closure
/// tag!("abcd")? ~ // this parser is optional
/// bb: ret_y? , // the result of that parser is an option
/// // the last parser in the chain is followed by a ','
/// ||{B{a: aa, b: bb}}
/// )
/// );
///
/// # fn main() {
/// // the first "abcd" tag is not present, we have an error
/// let r1 = z(&b"efgh"[..]);
/// assert_eq!(r1, Error(Position(ErrorKind::Tag,&b"efgh"[..])));
///
/// // everything is present, everything is parsed
/// let r2 = z(&b"abcdabcdefgh"[..]);
/// assert_eq!(r2, Done(&b""[..], B{a: 1, b: Some(1)}));
///
/// // the second "abcd" tag is optional
/// let r3 = z(&b"abcdefgh"[..]);
/// assert_eq!(r3, Done(&b""[..], B{a: 1, b: Some(1)}));
///
/// // the result of ret_y is optional, as seen in the B structure
/// let r4 = z(&b"abcdabcdwxyz"[..]);
/// assert_eq!(r4, Done(&b"wxyz"[..], B{a: 1, b: None}));
/// # }
/// ```
#[macro_export]
macro_rules! chain (
($i:expr, $($rest:tt)*) => (
{
//use $crate::InputLength;
chaining_parser!($i, 0usize, $($rest)*)
}
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! chaining_parser (
($i:expr, $consumed:expr, $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
use $crate::InputLength;
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,_) => {
chaining_parser!(i, $consumed + (($i).input_len() - i.input_len()), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
{
use $crate::InputLength;
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let input = if let $crate::IResult::Done(i,_) = res {
i
} else {
$i
};
chaining_parser!(input, $consumed + (($i).input_len() - input.input_len()), $($rest)*)
}
}
});
($i:expr, $consumed:expr, $field:ident : $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, $field: call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
use $crate::InputLength;
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let $field = o;
chaining_parser!(i, $consumed + (($i).input_len() - i.input_len()), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, mut $field:ident : $e:ident ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, mut $field: call!($e) ~ $($rest)*);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)*) => (
{
use $crate::InputLength;
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let mut $field = o;
chaining_parser!(i, $consumed + ($i).input_len() - i.input_len(), $($rest)*)
}
}
}
);
($i:expr, $consumed:expr, $field:ident : $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, $field : call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
{
use $crate::InputLength;
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let ($field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o),i)
} else {
(None,$i)
};
chaining_parser!(input, $consumed + ($i).input_len() - input.input_len(), $($rest)*)
}
}
});
($i:expr, $consumed:expr, mut $field:ident : $e:ident ? ~ $($rest:tt)*) => (
chaining_parser!($i, $consumed, mut $field : call!($e) ? ~ $($rest)*);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ? ~ $($rest:tt)*) => ({
{
use $crate::InputLength;
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let (mut $field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o),i)
} else {
(None,$i)
};
chaining_parser!(input, $consumed + ($i).input_len() - input.input_len(), $($rest)*)
}
}
});
// ending the chain
($i:expr, $consumed:expr, $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, call!($e), $assemble);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,_) => {
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, $e:ident ?, $assemble:expr) => (
chaining_parser!($i, $consumed, call!($e) ?, $assemble);
);
($i:expr, $consumed:expr, $submac:ident!( $($args:tt)* ) ?, $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let input = if let $crate::IResult::Done(i,_) = res {
i
} else {
$i
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, $field:ident : $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, $field: call!($e), $assemble);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let $field = o;
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, mut $field:ident : $e:ident, $assemble:expr) => (
chaining_parser!($i, $consumed, mut $field: call!($e), $assemble);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ), $assemble:expr) => (
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete($crate::Needed::Unknown) => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::IResult::Incomplete($crate::Needed::Size(i)) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
$crate::IResult::Done(i,o) => {
let mut $field = o;
$crate::IResult::Done(i, $assemble())
}
}
);
($i:expr, $consumed:expr, $field:ident : $e:ident ? , $assemble:expr) => (
chaining_parser!($i, $consumed, $field : call!($e) ? , $assemble);
);
($i:expr, $consumed:expr, $field:ident : $submac:ident!( $($args:tt)* ) ? , $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let ($field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o), i)
} else {
(None, $i)
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, mut $field:ident : $e:ident ? , $assemble:expr) => (
chaining_parser!($i, $consumed, $field : call!($e) ? , $assemble);
);
($i:expr, $consumed:expr, mut $field:ident : $submac:ident!( $($args:tt)* ) ? , $assemble:expr) => ({
let res = $submac!($i, $($args)*);
if let $crate::IResult::Incomplete(inc) = res {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(i) => $crate::IResult::Incomplete($crate::Needed::Size($consumed + i)),
}
} else {
let (mut $field,input) = if let $crate::IResult::Done(i,o) = res {
(Some(o), i)
} else {
(None, $i)
};
$crate::IResult::Done(input, $assemble())
}
});
($i:expr, $consumed:expr, $assemble:expr) => (
$crate::IResult::Done($i, $assemble())
)
);
/// `alt!(I -> IResult<I,O> | I -> IResult<I,O> | ... | I -> IResult<I,O> ) => I -> IResult<I, O>`
/// try a list of parsers, return the result of the first successful one
///
/// If one of the parser returns Incomplete, alt will return Incomplete, to retry
/// once you get more input. Note that it is better for performance to know the
/// minimum size of data you need before you get into alt.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!( test, alt!( tag!( "abcd" ) | tag!( "efgh" ) ) );
/// let r1 = test(b"abcdefgh");
/// assert_eq!(r1, Done(&b"efgh"[..], &b"abcd"[..]));
/// let r2 = test(&b"efghijkl"[..]);
/// assert_eq!(r2, Done(&b"ijkl"[..], &b"efgh"[..]));
/// # }
/// ```
///
/// There is another syntax for alt allowing a block to manipulate the result:
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// #[derive(Debug,PartialEq,Eq)]
/// enum Tagged {
/// Abcd,
/// Efgh,
/// Took(usize)
/// }
/// named!(test<Tagged>, alt!(
/// tag!("abcd") => { |_| Tagged::Abcd }
/// | tag!("efgh") => { |_| Tagged::Efgh }
/// | take!(5) => { |res: &[u8]| Tagged::Took(res.len()) } // the closure takes the result as argument if the parser is successful
/// ));
/// let r1 = test(b"abcdefgh");
/// assert_eq!(r1, Done(&b"efgh"[..], Tagged::Abcd));
/// let r2 = test(&b"efghijkl"[..]);
/// assert_eq!(r2, Done(&b"ijkl"[..], Tagged::Efgh));
/// let r3 = test(&b"mnopqrst"[..]);
/// assert_eq!(r3, Done(&b"rst"[..], Tagged::Took(5)));
/// # }
/// ```
#[macro_export]
macro_rules! alt (
($i:expr, $($rest:tt)*) => (
{
alt_parser!($i, $($rest)*)
}
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! alt_parser (
($i:expr, $e:ident | $($rest:tt)*) => (
alt_parser!($i, call!($e) | $($rest)*);
);
($i:expr, $subrule:ident!( $($args:tt)*) | $($rest:tt)*) => (
{
let res = $subrule!($i, $($args)*);
match res {
$crate::IResult::Done(_,_) => res,
$crate::IResult::Incomplete(_) => res,
_ => alt_parser!($i, $($rest)*)
}
}
);
($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr } | $($rest:tt)+) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,$gen(o)),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i, $($rest)*)
}
}
}
);
($i:expr, $e:ident => { $gen:expr } | $($rest:tt)*) => (
alt_parser!($i, call!($e) => { $gen } | $($rest)*);
);
($i:expr, $e:ident => { $gen:expr }) => (
alt_parser!($i, call!($e) => { $gen });
);
($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr }) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,$gen(o)),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i)
}
}
}
);
($i:expr, $e:ident) => (
alt_parser!($i, call!($e));
);
($i:expr, $subrule:ident!( $($args:tt)*)) => (
{
match $subrule!( $i, $($args)* ) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i,o),
$crate::IResult::Incomplete(x) => $crate::IResult::Incomplete(x),
$crate::IResult::Error(_) => {
alt_parser!($i)
}
}
}
);
($i:expr) => (
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Alt,$i))
);
);
/// `switch!(I -> IResult<I,P>, P => I -> IResult<I,O> | ... | P => I -> IResult<I,O> ) => I -> IResult<I, O>`
/// choose the next parser depending on the result of the first one, if successful
///
#[macro_export]
macro_rules! switch (
($i:expr, $submac:ident!( $($args:tt)*), $($p:pat => $subrule:ident!( $($args2:tt)* ))|*) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i, o) => {
match o {
$($p => $subrule!(i, $($args2)*)),*,
_ => $crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Switch,i))
}
}
}
}
);
($i:expr, $e:ident, $($rest:tt)*) => (
{
switch!($i, call!(e), $($rest)*)
}
);
);
/// `opt!(I -> IResult<I,O>) => I -> IResult<I, Option<O>>`
/// make the underlying parser optional
///
/// returns an Option of the returned type. This parser never fails
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!( o<&[u8], Option<&[u8]> >, opt!( tag!( "abcd" ) ) );
///
/// let a = b"abcdef";
/// let b = b"bcdefg";
/// assert_eq!(o(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
/// assert_eq!(o(&b[..]), Done(&b"bcdefg"[..], None));
/// # }
/// ```
#[macro_export]
macro_rules! opt(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Some(o)),
$crate::IResult::Error(_) => $crate::IResult::Done($i, None),
$crate::IResult::Incomplete(_) => $crate::IResult::Done($i, None)
}
}
);
($i:expr, $f:expr) => (
opt!($i, call!($f));
);
);
/// `opt_res!(I -> IResult<I,O>) => I -> IResult<I, Result<nom::Err,O>>`
/// make the underlying parser optional
///
/// returns a Result, with Err containing the parsing error
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!( o<&[u8], Result<&[u8], nom::Err<&[u8]> > >, opt_res!( tag!( "abcd" ) ) );
///
/// let a = b"abcdef";
/// let b = b"bcdefg";
/// assert_eq!(o(&a[..]), Done(&b"ef"[..], Ok(&b"abcd"[..])));
/// assert_eq!(o(&b[..]), Done(&b"bcdefg"[..], Err(Position(ErrorKind::Tag, &b[..]))));
/// # }
/// ```
#[macro_export]
macro_rules! opt_res (
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Ok(o)),
$crate::IResult::Error(e) => $crate::IResult::Done($i, Err(e)),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
opt_res!($i, call!($f));
);
);
/// `cond!(bool, I -> IResult<I,O>) => I -> IResult<I, Option<O>>`
/// Conditional combinator
///
/// Wraps another parser and calls it if the
/// condition is met. This combinator returns
/// an Option of the return type of the child
/// parser.
///
/// This is especially useful if a parser depends
/// on the value return by a preceding parser in
/// a `chain!`.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use nom::IResult;
/// # fn main() {
/// let b = true;
/// let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8],
/// cond!( b, tag!("abcd") ))
/// );
///
/// let a = b"abcdef";
/// assert_eq!(f(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
///
/// let b2 = false;
/// let f2:Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8],
/// cond!( b2, tag!("abcd") ))
/// );
/// assert_eq!(f2(&a[..]), Done(&b"abcdef"[..], None));
/// # }
/// ```
///
#[macro_export]
macro_rules! cond(
($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => (
{
if $cond {
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, Some(o)),
$crate::IResult::Error(_) => $crate::IResult::Done($i, None),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
} else {
$crate::IResult::Done($i, None)
}
}
);
($i:expr, $cond:expr, $f:expr) => (
cond!($i, $cond, call!($f));
);
);
/// `cond_reduce!(bool, I -> IResult<I,O>) => I -> IResult<I, O>`
/// Conditional combinator with error
///
/// Wraps another parser and calls it if the
/// condition is met. This combinator returns
/// an error if the condition is false
///
/// This is especially useful if a parser depends
/// on the value return by a preceding parser in
/// a `chain!`.
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::{Err,ErrorKind};
/// # fn main() {
/// let b = true;
/// let f = closure!(&'static[u8],
/// cond_reduce!( b, tag!("abcd") )
/// );
///
/// let a = b"abcdef";
/// assert_eq!(f(&a[..]), Done(&b"ef"[..], &b"abcd"[..]));
///
/// let b2 = false;
/// let f2 = closure!(&'static[u8],
/// cond_reduce!( b2, tag!("abcd") )
/// );
/// assert_eq!(f2(&a[..]), Error(Err::Position(ErrorKind::CondReduce, &a[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! cond_reduce(
($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => (
{
if $cond {
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => $crate::IResult::Done(i, o),
$crate::IResult::Error(e) => $crate::IResult::Error(e),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
} else {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::CondReduce, $i))
}
}
);
($i:expr, $cond:expr, $f:expr) => (
cond_reduce!($i, $cond, call!($f));
);
);
/// `peek!(I -> IResult<I,O>) => I -> IResult<I, O>`
/// returns a result without consuming the input
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!(ptag, peek!( tag!( "abcd" ) ) );
///
/// let r = ptag(&b"abcdefgh"[..]);
/// assert_eq!(r, Done(&b"abcdefgh"[..], &b"abcd"[..]));
/// # }
/// ```
#[macro_export]
macro_rules! peek(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(_,o) => $crate::IResult::Done($i, o),
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $f:expr) => (
peek!($i, call!(f));
);
);
/// `tap!(name: I -> IResult<I,O> => { block }) => I -> IResult<I, O>`
/// allows access to the parser's result without affecting it
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # use std::str;
/// # fn main() {
/// named!(ptag, tap!(res: tag!( "abcd" ) => { println!("recognized {}", str::from_utf8(res).unwrap()) } ) );
///
/// let r = ptag(&b"abcdefgh"[..]);
/// assert_eq!(r, Done(&b"efgh"[..], &b"abcd"[..]));
/// # }
/// ```
#[macro_export]
macro_rules! tap (
($i:expr, $name:ident : $submac:ident!( $($args:tt)* ) => $e:expr) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Done(i,o) => {
let $name = o;
$e;
$crate::IResult::Done(i, $name)
},
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i)
}
}
);
($i:expr, $name: ident: $f:expr => $e:expr) => (
tap!($i, $name: call!($f) => $e);
);
);
/// `pair!(I -> IResult<I,O>, I -> IResult<I,P>) => I -> IResult<I, (O,P)>`
/// pair(X,Y), returns (x,y)
///
#[macro_export]
macro_rules! pair(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
$crate::IResult::Done(i2, (o1, o2))
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
pair!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
pair!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
pair!($i, call!($f), call!($g));
);
);
/// `separated_pair!(I -> IResult<I,O>, I -> IResult<I, T>, I -> IResult<I,P>) => I -> IResult<I, (O,P)>`
/// separated_pair(X,sep,Y) returns (x,y)
#[macro_export]
macro_rules! separated_pair(
($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
separated_pair1!(i1, o1, $($rest)*)
}
}
}
);
($i:expr, $f:expr, $($rest:tt)+) => (
separated_pair!($i, call!($f), $($rest)*);
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! separated_pair1(
($i:expr, $res1:ident, $submac2:ident!( $($args2:tt)* ), $($rest:tt)+) => (
{
match $submac2!($i, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,_) => {
separated_pair2!(i2, $res1, $($rest)*)
}
}
}
);
($i:expr, $res1:ident, $g:expr, $($rest:tt)+) => (
separated_pair1!($i, $res1, call!($g), $($rest)*);
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! separated_pair2(
($i:expr, $res1:ident, $submac3:ident!( $($args3:tt)* )) => (
{
match $submac3!($i, $($args3)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i3,o3) => {
$crate::IResult::Done(i3, ($res1, o3))
}
}
}
);
($i:expr, $res1:ident, $h:expr) => (
separated_pair2!($i, $res1, call!($h));
);
);
/// `preceded!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, O>`
/// preceded(opening, X) returns X
#[macro_export]
macro_rules! preceded(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,_) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
$crate::IResult::Done(i2, o2)
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
preceded!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
preceded!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
preceded!($i, call!($f), call!($g));
);
);
/// `terminated!(I -> IResult<I,O>, I -> IResult<I,T>) => I -> IResult<I, O>`
/// terminated(X, closing) returns X
#[macro_export]
macro_rules! terminated(
($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,o1) => {
match $submac2!(i1, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,_) => {
$crate::IResult::Done(i2, o1)
}
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
terminated!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
terminated!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
terminated!($i, call!($f), call!($g));
);
);
/// `delimited!(I -> IResult<I,T>, I -> IResult<I,O>, I -> IResult<I,U>) => I -> IResult<I, O>`
/// delimited(opening, X, closing) returns X
#[macro_export]
macro_rules! delimited(
($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => (
{
match $submac!($i, $($args)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,_) => {
delimited1!(i1, $($rest)*)
}
}
}
);
($i:expr, $f:expr, $($rest:tt)+) => (
delimited!($i, call!($f), $($rest)*);
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! delimited1(
($i:expr, $submac2:ident!( $($args2:tt)* ), $($rest:tt)+) => (
{
match $submac2!($i, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i2,o2) => {
delimited2!(i2, o2, $($rest)*)
}
}
}
);
($i:expr, $g:expr, $($rest:tt)+) => (
delimited1!($i, call!($g), $($rest)*);
);
);
/// Internal parser, do not use directly
#[doc(hidden)]
#[macro_export]
macro_rules! delimited2(
($i:expr, $res2:ident, $submac3:ident!( $($args3:tt)* )) => (
{
match $submac3!($i, $($args3)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i3,_) => {
$crate::IResult::Done(i3, $res2)
}
}
}
);
($i:expr, $res2:ident, $h:expr) => (
delimited2!($i, $res2, call!($h));
);
);
/// `separated_list!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// separated_list(sep, X) returns Vec<X>
#[macro_export]
macro_rules! separated_list(
($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
// get the first element
match $submac!(input, $($args2)*) {
$crate::IResult::Error(_) => $crate::IResult::Done(input, Vec::new()),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i,o) => {
if i.len() == input.len() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::SeparatedList,input))
} else {
res.push(o);
input = i;
loop {
// get the separator first
if let $crate::IResult::Done(i2,_) = $sep!(input, $($args)*) {
if i2.len() == input.len() {
break;
}
input = i2;
// get the element next
if let $crate::IResult::Done(i3,o3) = $submac!(input, $($args2)*) {
if i3.len() == input.len() {
break;
}
res.push(o3);
input = i3;
} else {
break;
}
} else {
break;
}
}
$crate::IResult::Done(input, res)
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
separated_list!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
separated_list!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
separated_list!($i, call!($f), call!($g));
);
);
/// `separated_nonempty_list!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// separated_nonempty_list(sep, X) returns Vec<X>
#[macro_export]
macro_rules! separated_nonempty_list(
($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
// get the first element
match $submac!(input, $($args2)*) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i,o) => {
if i.len() == input.len() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::SeparatedNonEmptyList,input))
} else {
res.push(o);
input = i;
loop {
if let $crate::IResult::Done(i2,_) = $sep!(input, $($args)*) {
if i2.len() == input.len() {
break;
}
input = i2;
if let $crate::IResult::Done(i3,o3) = $submac!(input, $($args2)*) {
if i3.len() == input.len() {
break;
}
res.push(o3);
input = i3;
} else {
break;
}
} else {
break;
}
}
$crate::IResult::Done(input, res)
}
},
}
}
);
($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => (
separated_nonempty_list!($i, $submac!($($args)*), call!($g));
);
($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => (
separated_nonempty_list!($i, call!($f), $submac!($($args)*));
);
($i:expr, $f:expr, $g:expr) => (
separated_nonempty_list!($i, call!($f), call!($g));
);
);
/// `many0!(I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// Applies the parser 0 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::Done;
/// # fn main() {
/// named!(multi<&[u8], Vec<&[u8]> >, many0!( tag!( "abcd" ) ) );
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
/// assert_eq!(multi(&a[..]), Done(&b"ef"[..], res));
/// assert_eq!(multi(&b[..]), Done(&b"azerty"[..], Vec::new()));
/// # }
/// ```
/// 0 or more
#[macro_export]
macro_rules! many0(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
while let $crate::IResult::Done(i,o) = $submac!(input, $($args)*) {
if i.len() == input.len() {
break;
}
res.push(o);
input = i;
}
$crate::IResult::Done(input, res)
}
);
($i:expr, $f:expr) => (
many0!($i, call!($f));
);
);
/// `many1!(I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// Applies the parser 1 or more times and returns the list of results in a Vec
///
/// the embedded parser may return Incomplete
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done, Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(multi<&[u8], Vec<&[u8]> >, many1!( tag!( "abcd" ) ) );
///
/// let a = b"abcdabcdef";
/// let b = b"azerty";
///
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
/// assert_eq!(multi(&a[..]), Done(&b"ef"[..], res));
/// assert_eq!(multi(&b[..]), Error(Position(ErrorKind::Many1,&b[..])));
/// # }
/// ```
#[macro_export]
macro_rules! many1(
($i:expr, $submac:ident!( $($args:tt)* )) => (
{
let mut res = Vec::new();
let mut input = $i;
while let $crate::IResult::Done(i,o) = $submac!(input, $($args)*) {
if i.len() == input.len() {
break;
}
res.push(o);
input = i;
}
if res.is_empty() {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Many1,$i))
} else {
$crate::IResult::Done(input, res)
}
}
);
($i:expr, $f:expr) => (
many1!($i, call!($f));
);
);
/// `count!(I -> IResult<I,O>, nb) => I -> IResult<I, Vec<O>>`
/// Applies the child parser a specified number of times
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(counter< Vec<&[u8]> >, count!( tag!( "abcd" ), 2 ) );
///
/// let a = b"abcdabcdabcdef";
/// let b = b"abcdefgh";
/// let res = vec![&b"abcd"[..], &b"abcd"[..]];
///
/// assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
/// assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! count(
($i:expr, $submac:ident!( $($args:tt)* ), $count: expr) => (
{
let mut input = $i;
let mut res = Vec::with_capacity($count);
let mut cnt: usize = 0;
let mut err = false;
loop {
if cnt == $count {
break
}
match $submac!(input, $($args)*) {
$crate::IResult::Done(i,o) => {
res.push(o);
input = i;
cnt = cnt + 1;
},
$crate::IResult::Error(_) => {
err = true;
break;
},
$crate::IResult::Incomplete(_) => {
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Count,$i))
} else if cnt == $count {
$crate::IResult::Done(input, res)
} else {
$crate::IResult::Incomplete($crate::Needed::Unknown)
}
}
);
($i:expr, $f:expr, $count: expr) => (
count!($i, call!($f), $count);
);
);
/// `count_fixed!(O, I -> IResult<I,O>, nb) => I -> IResult<I, [O; nb]>`
/// Applies the child parser a fixed number of times and returns a fixed size array
/// The type must be specified and it must be `Copy`
///
/// ```
/// # #[macro_use] extern crate nom;
/// # use nom::IResult::{Done,Error};
/// # use nom::Err::Position;
/// # use nom::ErrorKind;
/// # fn main() {
/// named!(counter< [&[u8]; 2] >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
/// // can omit the type specifier if returning slices
/// // named!(counter< [&[u8]; 2] >, count_fixed!( tag!( "abcd" ), 2 ) );
///
/// let a = b"abcdabcdabcdef";
/// let b = b"abcdefgh";
/// let res = [&b"abcd"[..], &b"abcd"[..]];
///
/// assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
/// assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
/// # }
/// ```
///
#[macro_export]
macro_rules! count_fixed (
($i:expr, $typ:ty, $submac:ident!( $($args:tt)* ), $count: expr) => (
{
let mut input = $i;
// `$typ` must be Copy, and thus having no destructor, this is panic safe
let mut res: [$typ; $count] = unsafe{[::std::mem::uninitialized(); $count as usize]};
let mut cnt: usize = 0;
let mut err = false;
loop {
if cnt == $count {
break
}
match $submac!(input, $($args)*) {
$crate::IResult::Done(i,o) => {
res[cnt] = o;
input = i;
cnt = cnt + 1;
},
$crate::IResult::Error(_) => {
err = true;
break;
},
$crate::IResult::Incomplete(_) => {
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Count,$i))
} else if cnt == $count {
$crate::IResult::Done(input, res)
} else {
$crate::IResult::Incomplete($crate::Needed::Unknown)
}
}
);
($i:expr, $typ: ty, $f:ident, $count: expr) => (
count_fixed!($i, $typ, call!($f), $count);
);
);
/// `length_value!(I -> IResult<I, nb>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>`
/// gets a number from the first parser, then applies the second parser that many times
#[macro_export]
macro_rules! length_value(
($i:expr, $f:expr, $g:expr) => (
{
match $f($i) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,nb) => {
let length_token = $i.len() - i1.len();
let mut input = i1;
let mut res = Vec::new();
let mut err = false;
let mut inc = $crate::Needed::Unknown;
loop {
if res.len() == nb as usize {
break;
}
match $g(input) {
$crate::IResult::Done(i2,o2) => {
res.push(o2);
input = i2;
},
$crate::IResult::Error(_) => {
err = true;
},
$crate::IResult::Incomplete(a) => {
inc = a;
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::LengthValue,$i))
} else if res.len() < nb as usize {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(length) => $crate::IResult::Incomplete($crate::Needed::Size(length_token + nb as usize * length))
}
} else {
$crate::IResult::Done(input, res)
}
}
}
}
);
($i:expr, $f:expr, $g:expr, $length:expr) => (
{
match $f($i) {
$crate::IResult::Error(a) => $crate::IResult::Error(a),
$crate::IResult::Incomplete(i) => $crate::IResult::Incomplete(i),
$crate::IResult::Done(i1,nb) => {
let length_token = $i.len() - i1.len();
let mut input = i1;
let mut res = Vec::new();
let mut err = false;
let mut inc = $crate::Needed::Unknown;
loop {
if res.len() == nb as usize {
break;
}
match $g(input) {
$crate::IResult::Done(i2,o2) => {
res.push(o2);
input = i2;
},
$crate::IResult::Error(_) => {
err = true;
},
$crate::IResult::Incomplete(a) => {
inc = a;
break;
}
}
}
if err {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::LengthValue,$i))
} else if res.len() < nb as usize {
match inc {
$crate::Needed::Unknown => $crate::IResult::Incomplete($crate::Needed::Unknown),
$crate::Needed::Size(_) => $crate::IResult::Incomplete($crate::Needed::Size(length_token + nb as usize * $length))
}
} else {
$crate::IResult::Done(input, res)
}
}
}
}
);
);
#[cfg(test)]
mod tests {
use internal::{Needed,IResult,Err};
use internal::IResult::*;
use internal::Err::*;
use util::ErrorKind;
// reproduce the tag and take macros, because of module import order
macro_rules! tag (
($i:expr, $inp: expr) => (
{
#[inline(always)]
fn as_bytes<T: $crate::AsBytes>(b: &T) -> &[u8] {
b.as_bytes()
}
let expected = $inp;
let bytes = as_bytes(&expected);
let res : $crate::IResult<&[u8],&[u8]> = if bytes.len() > $i.len() {
$crate::IResult::Incomplete($crate::Needed::Size(bytes.len()))
} else if &$i[0..bytes.len()] == bytes {
$crate::IResult::Done(&$i[bytes.len()..], &$i[0..bytes.len()])
} else {
$crate::IResult::Error($crate::Err::Position($crate::ErrorKind::Tag, $i))
};
res
}
);
);
macro_rules! take(
($i:expr, $count:expr) => (
{
let cnt = $count as usize;
let res:$crate::IResult<&[u8],&[u8]> = if $i.len() < cnt {
$crate::IResult::Incomplete($crate::Needed::Size(cnt))
} else {
$crate::IResult::Done(&$i[cnt..],&$i[0..cnt])
};
res
}
);
);
mod pub_named_mod {
named!(pub tst, tag!("abcd"));
}
#[test]
fn pub_named_test() {
let a = &b"abcd"[..];
let res = pub_named_mod::tst(a);
assert_eq!(res, Done(&b""[..], a));
}
#[test]
fn apply_test() {
fn sum2(a:u8, b:u8) -> u8 { a + b }
fn sum3(a:u8, b:u8, c:u8) -> u8 { a + b + c }
let a = apply!(1, sum2, 2);
let b = apply!(1, sum3, 2, 3);
assert_eq!(a, 3);
assert_eq!(b, 6);
}
#[derive(PartialEq,Eq,Debug)]
struct B {
a: u8,
b: u8
}
#[test]
fn chain2() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
tag!("abcd") ~
tag!("abcd")? ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[test]
fn nested_chain() {
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Done(i,2) };
named!(f<&[u8],B>,
chain!(
chain!(
tag!("abcd") ~
tag!("abcd")? ,
|| {}
) ~
aa: ret_int1 ~
tag!("efgh") ~
bb: ret_int2 ~
tag!("efgh") ,
||{B{a: aa, b: bb}}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 2}));
let r2 = f(&b"abcdefghefghX"[..]);
assert_eq!(r2, Done(&b"X"[..], B{a: 1, b: 2}));
}
#[derive(PartialEq,Eq,Debug)]
struct C {
a: u8,
b: Option<u8>
}
#[test]
fn chain_mut() {
fn ret_b1_2(i:&[u8]) -> IResult<&[u8], B> { Done(i,B{a:1,b:2}) };
named!(f<&[u8],B>,
chain!(
tag!("abcd") ~
tag!("abcd")? ~
tag!("efgh") ~
mut bb: ret_b1_2 ~
tag!("efgh") ,
||{
bb.b = 3;
bb
}
)
);
let r = f(&b"abcdabcdefghefghX"[..]);
assert_eq!(r, Done(&b"X"[..], B{a: 1, b: 3}));
}
#[test]
fn chain_opt() {
named!(y, tag!("efgh"));
fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Done(i,1) };
named!(ret_y<&[u8], u8>, map!(y, |_| 2));
named!(f<&[u8],C>,
chain!(
tag!("abcd") ~
aa: ret_int1 ~
bb: ret_y? ,
||{C{a: aa, b: bb}}
)
);
let r = f(&b"abcdefghX"[..]);
assert_eq!(r, Done(&b"X"[..], C{a: 1, b: Some(2)}));
let r2 = f(&b"abcdWXYZ"[..]);
assert_eq!(r2, Done(&b"WXYZ"[..], C{a: 1, b: None}));
let r3 = f(&b"abcdX"[..]);
assert_eq!(r3, Incomplete(Needed::Size(8)));
}
use util::{error_to_list, add_error_pattern, print_error};
fn error_to_string<P>(e: &Err<P>) -> &'static str {
let v:Vec<ErrorKind> = error_to_list(e);
// do it this way if you can use slice patterns
/*
match &v[..] {
[ErrorKind::Custom(42), ErrorKind::Tag] => "missing `ijkl` tag",
[ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] => "missing `mnop` tag after `ijkl`",
_ => "unrecognized error"
}
*/
if &v[..] == [ErrorKind::Custom(42),ErrorKind::Tag] {
"missing `ijkl` tag"
} else if &v[..] == [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] {
"missing `mnop` tag after `ijkl`"
} else {
"unrecognized error"
}
}
// do it this way if you can use box patterns
/*use std::str;
fn error_to_string(e:Err) -> String
match e {
NodePosition(ErrorKind::Custom(42), i1, box Position(ErrorKind::Tag, i2)) => {
format!("missing `ijkl` tag, found '{}' instead", str::from_utf8(i2).unwrap())
},
NodePosition(ErrorKind::Custom(42), i1, box NodePosition(ErrorKind::Custom(128), i2, box Position(ErrorKind::Tag, i3))) => {
format!("missing `mnop` tag after `ijkl`, found '{}' instead", str::from_utf8(i3).unwrap())
},
_ => "unrecognized error".to_string()
}
}*/
use std::collections;
#[test]
fn err() {
named!(err_test, alt!(
tag!("abcd") |
preceded!(tag!("efgh"), error!(ErrorKind::Custom(42),
chain!(
tag!("ijkl") ~
res: error!(ErrorKind::Custom(128), tag!("mnop")) ,
|| { res }
)
)
)
));
let a = &b"efghblah"[..];
let b = &b"efghijklblah"[..];
let c = &b"efghijklmnop"[..];
let blah = &b"blah"[..];
let res_a = err_test(a);
let res_b = err_test(b);
let res_c = err_test(c);
assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..], Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah)))))));
assert_eq!(res_c, Done(&b""[..], &b"mnop"[..]));
// Merr-like error matching
let mut err_map = collections::HashMap::new();
assert!(add_error_pattern(&mut err_map, err_test(&b"efghpouet"[..]), "missing `ijkl` tag"));
assert!(add_error_pattern(&mut err_map, err_test(&b"efghijklpouet"[..]), "missing `mnop` tag after `ijkl`"));
let res_a2 = res_a.clone();
match res_a {
Error(e) => {
assert_eq!(error_to_list(&e), [ErrorKind::Custom(42), ErrorKind::Tag]);
assert_eq!(error_to_string(&e), "missing `ijkl` tag");
assert_eq!(err_map.get(&error_to_list(&e)), Some(&"missing `ijkl` tag"));
},
_ => panic!()
};
let res_b2 = res_b.clone();
match res_b {
Error(e) => {
assert_eq!(error_to_list(&e), [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag]);
assert_eq!(error_to_string(&e), "missing `mnop` tag after `ijkl`");
assert_eq!(err_map.get(&error_to_list(&e)), Some(&"missing `mnop` tag after `ijkl`"));
},
_ => panic!()
};
print_error(a, res_a2);
print_error(b, res_b2);
}
#[test]
fn add_err() {
named!(err_test,
preceded!(tag!("efgh"), add_error!(ErrorKind::Custom(42),
chain!(
tag!("ijkl") ~
res: add_error!(ErrorKind::Custom(128), tag!("mnop")) ,
|| { res }
)
)
));
let a = &b"efghblah"[..];
let b = &b"efghijklblah"[..];
let c = &b"efghijklmnop"[..];
let blah = &b"blah"[..];
let res_a = err_test(a);
let res_b = err_test(b);
let res_c = err_test(c);
assert_eq!(res_a, Error(NodePosition(ErrorKind::Custom(42), blah, Box::new(Position(ErrorKind::Tag, blah)))));
assert_eq!(res_b, Error(NodePosition(ErrorKind::Custom(42), &b"ijklblah"[..], Box::new(NodePosition(ErrorKind::Custom(128), blah, Box::new(Position(ErrorKind::Tag, blah)))))));
assert_eq!(res_c, Done(&b""[..], &b"mnop"[..]));
}
#[test]
fn complete() {
named!(err_test,
chain!(
tag!("ijkl") ~
res: complete!(tag!("mnop")) ,
|| { res }
)
);
let a = &b"ijklmn"[..];
let res_a = err_test(a);
assert_eq!(res_a, Error(Position(ErrorKind::Complete, &b"mn"[..])));
}
#[test]
fn alt() {
fn work(input: &[u8]) -> IResult<&[u8],&[u8], &'static str> {
Done(&b""[..], input)
}
#[allow(unused_variables)]
fn dont_work(input: &[u8]) -> IResult<&[u8],&[u8],&'static str> {
Error(Code(ErrorKind::Custom("abcd")))
}
fn work2(input: &[u8]) -> IResult<&[u8],&[u8], &'static str> {
Done(input, &b""[..])
}
fn alt1(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | dont_work)
}
fn alt2(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | work)
}
fn alt3(i:&[u8]) -> IResult<&[u8],&[u8], &'static str> {
alt!(i, dont_work | dont_work | work2 | dont_work)
}
//named!(alt1, alt!(dont_work | dont_work));
//named!(alt2, alt!(dont_work | work));
//named!(alt3, alt!(dont_work | dont_work | work2 | dont_work));
let a = &b"abcd"[..];
assert_eq!(alt1(a), Error(Position(ErrorKind::Alt, a)));
assert_eq!(alt2(a), Done(&b""[..], a));
assert_eq!(alt3(a), Done(a, &b""[..]));
named!(alt4, alt!(tag!("abcd") | tag!("efgh")));
let b = &b"efgh"[..];
assert_eq!(alt4(a), Done(&b""[..], a));
assert_eq!(alt4(b), Done(&b""[..], b));
// test the alternative syntax
named!(alt5<bool>, alt!(tag!("abcd") => { |_| false } | tag!("efgh") => { |_| true }));
assert_eq!(alt5(a), Done(&b""[..], false));
assert_eq!(alt5(b), Done(&b""[..], true));
}
#[test]
fn alt_incomplete() {
named!(alt1, alt!(tag!("a") | tag!("bc") | tag!("def")));
let a = &b""[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(1)));
let a = &b"b"[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(2)));
let a = &b"bcd"[..];
assert_eq!(alt1(a), Done(&b"d"[..], &b"bc"[..]));
let a = &b"cde"[..];
assert_eq!(alt1(a), Error(Position(ErrorKind::Alt, a)));
let a = &b"de"[..];
assert_eq!(alt1(a), Incomplete(Needed::Size(3)));
let a = &b"defg"[..];
assert_eq!(alt1(a), Done(&b"g"[..], &b"def"[..]));
}
#[test]
fn switch() {
named!(sw,
switch!(take!(4),
b"abcd" => take!(2) |
b"efgh" => take!(4)
)
);
let a = &b"abcdefgh"[..];
assert_eq!(sw(a), Done(&b"gh"[..], &b"ef"[..]));
let b = &b"efghijkl"[..];
assert_eq!(sw(b), Done(&b""[..], &b"ijkl"[..]));
let c = &b"afghijkl"[..];
assert_eq!(sw(c), Error(Position(ErrorKind::Switch, &b"ijkl"[..])));
}
#[test]
fn opt() {
named!(o<&[u8],Option<&[u8]> >, opt!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"bcdefg"[..];
assert_eq!(o(a), Done(&b"ef"[..], Some(&b"abcd"[..])));
assert_eq!(o(b), Done(&b"bcdefg"[..], None));
}
#[test]
fn opt_res() {
named!(o<&[u8], Result<&[u8], Err<&[u8]>> >, opt_res!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"bcdefg"[..];
assert_eq!(o(a), Done(&b"ef"[..], Ok(&b"abcd"[..])));
assert_eq!(o(b), Done(&b"bcdefg"[..], Err(Position(ErrorKind::Tag, b))));
}
#[test]
fn cond() {
let b = true;
let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b, tag!("abcd") ) ));
let a = b"abcdef";
assert_eq!(f(&a[..]), Done(&b"ef"[..], Some(&b"abcd"[..])));
let b2 = false;
let f2: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b2, tag!("abcd") ) ));
//let f2 = closure!(&'static [u8], cond!( b2, tag!("abcd") ) );
assert_eq!(f2(&a[..]), Done(&b"abcdef"[..], None));
}
#[test]
fn cond_wrapping() {
// Test that cond!() will wrap a given identifier in the call!() macro.
named!(silly, tag!("foo"));
let b = true;
//let f = closure!(&'static [u8], cond!( b, silly ) );
let f: Box<Fn(&'static [u8]) -> IResult<&[u8],Option<&[u8]>, &str>> = Box::new(closure!(&'static [u8], cond!( b, silly ) ));
assert_eq!(f(b"foobar"), Done(&b"bar"[..], Some(&b"foo"[..])));
}
#[test]
fn peek() {
named!(ptag<&[u8],&[u8]>, peek!(tag!("abcd")));
let r1 = ptag(&b"abcdefgh"[..]);
assert_eq!(r1, Done(&b"abcdefgh"[..], &b"abcd"[..]));
let r1 = ptag(&b"efgh"[..]);
assert_eq!(r1, Error(Position(ErrorKind::Tag,&b"efgh"[..])));
}
#[test]
fn pair() {
named!(p<&[u8],(&[u8], &[u8])>, pair!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], (&b"abcd"[..], &b"efgh"[..])));
}
#[test]
fn separated_pair() {
named!(p<&[u8],(&[u8], &[u8])>, separated_pair!(tag!("abcd"), tag!(","), tag!("efgh")));
let r1 = p(&b"abcd,efghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], (&b"abcd"[..], &b"efgh"[..])));
}
#[test]
fn preceded() {
named!(p<&[u8], &[u8]>, preceded!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], &b"efgh"[..]));
}
#[test]
fn terminated() {
named!(p<&[u8], &[u8]>, terminated!(tag!("abcd"), tag!("efgh")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"ijkl"[..], &b"abcd"[..]));
}
#[test]
fn delimited() {
named!(p<&[u8], &[u8]>, delimited!(tag!("abcd"), tag!("efgh"), tag!("ij")));
let r1 = p(&b"abcdefghijkl"[..]);
assert_eq!(r1, Done(&b"kl"[..], &b"efgh"[..]));
}
#[test]
fn separated_list() {
named!(multi<&[u8],Vec<&[u8]> >, separated_list!(tag!(","), tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcd,abcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Done(&b"azerty"[..], Vec::new()));
}
#[test]
fn separated_nonempty_list() {
named!(multi<&[u8],Vec<&[u8]> >, separated_nonempty_list!(tag!(","), tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcd,abcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Error(Position(ErrorKind::Tag,c)));
}
#[test]
fn many0() {
named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Done(&b"azerty"[..], Vec::new()));
}
#[cfg(feature = "nightly")]
use test::Bencher;
#[cfg(feature = "nightly")]
#[bench]
fn many0_bench(b: &mut Bencher) {
named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd")));
b.iter(|| {
multi(&b"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"[..])
});
}
#[test]
fn many1() {
named!(multi<&[u8],Vec<&[u8]> >, many1!(tag!("abcd")));
let a = &b"abcdef"[..];
let b = &b"abcdabcdef"[..];
let c = &b"azerty"[..];
let res1 = vec![&b"abcd"[..]];
assert_eq!(multi(a), Done(&b"ef"[..], res1));
let res2 = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(multi(b), Done(&b"ef"[..], res2));
assert_eq!(multi(c), Error(Position(ErrorKind::Many1,c)));
}
#[test]
fn infinite_many() {
fn tst(input: &[u8]) -> IResult<&[u8], &[u8]> {
println!("input: {:?}", input);
Error(Position(ErrorKind::Custom(0),input))
}
// should not go into an infinite loop
named!(multi0<&[u8],Vec<&[u8]> >, many0!(tst));
let a = &b"abcdef"[..];
assert_eq!(multi0(a), Done(a, Vec::new()));
named!(multi1<&[u8],Vec<&[u8]> >, many1!(tst));
let a = &b"abcdef"[..];
assert_eq!(multi1(a), Error(Position(ErrorKind::Many1,a)));
}
#[test]
fn count() {
fn counter(input: &[u8]) -> IResult<&[u8], Vec<&[u8]>> {
let size: usize = 2;
count!(input, tag!( "abcd" ), size )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = vec![&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
#[test]
fn count_zero() {
fn counter(input: &[u8]) -> IResult<&[u8], Vec<&[u8]>> {
let size: usize = 0;
count!(input, tag!( "abcd" ), size )
}
let a = b"abcdabcdabcdef";
let res: Vec<&[u8]> = Vec::new();
assert_eq!(counter(&a[..]), Done(&b"abcdabcdabcdef"[..], res));
}
#[test]
fn count_fixed() {
//named!(counter< [&[u8]; 2], u32 >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
fn counter(input:&[u8]) -> IResult<&[u8], [&[u8]; 2], () > {
count_fixed!(input, &[u8], tag!( "abcd" ), 2 )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = [&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
use nom::{le_u16,eof};
#[allow(dead_code)]
pub fn compile_count_fixed(input: &[u8]) -> IResult<&[u8], ()> {
chain!(input,
tag!("abcd") ~
count_fixed!( u16, le_u16, 4 ) ~
eof ,
|| { () }
)
}
#[test]
fn count_fixed_no_type() {
//named!(counter< [&[u8]; 2], u32 >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) );
fn counter(input:&[u8]) -> IResult<&[u8], [&[u8]; 2], () > {
count_fixed!(input, &[u8], tag!( "abcd" ), 2 )
}
let a = b"abcdabcdabcdef";
let b = b"abcdefgh";
let res = [&b"abcd"[..], &b"abcd"[..]];
assert_eq!(counter(&a[..]), Done(&b"abcdef"[..], res));
assert_eq!(counter(&b[..]), Error(Position(ErrorKind::Count, &b[..])));
}
use nom::{be_u8,be_u16};
#[test]
fn length_value_test() {
named!(tst1<&[u8], Vec<u16> >, length_value!(be_u8, be_u16));
named!(tst2<&[u8], Vec<u16> >, length_value!(be_u8, be_u16, 2));
let i1 = vec![0, 5, 6];
let i2 = vec![1, 5, 6, 3];
let i3 = vec![2, 5, 6, 3];
let i4 = vec![2, 5, 6, 3, 4, 5, 7];
let i5 = vec![3, 5, 6, 3, 4, 5];
let r1: Vec<u16> = Vec::new();
let r2: Vec<u16> = vec![1286];
let r4: Vec<u16> = vec![1286, 772];
assert_eq!(tst1(&i1), IResult::Done(&i1[1..], r1));
assert_eq!(tst1(&i2), IResult::Done(&i2[3..], r2));
assert_eq!(tst1(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst1(&i4), IResult::Done(&i4[5..], r4));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
let r6: Vec<u16> = Vec::new();
let r7: Vec<u16> = vec![1286];
let r9: Vec<u16> = vec![1286, 772];
assert_eq!(tst2(&i1), IResult::Done(&i1[1..], r6));
assert_eq!(tst2(&i2), IResult::Done(&i2[3..], r7));
assert_eq!(tst2(&i3), IResult::Incomplete(Needed::Size(5)));
assert_eq!(tst2(&i4), IResult::Done(&i4[5..], r9));
assert_eq!(tst1(&i5), IResult::Incomplete(Needed::Size(7)));
}
#[test]
fn chain_incomplete() {
let res = chain!(&b"abcdefgh"[..],
a: take!(4) ~
b: take!(8),
||{(a,b )}
);
assert_eq!(res, IResult::Incomplete(Needed::Size(12)));
}
}
|
pub use src::typedefs::{vec2,vec3,vec4};
pub use src::typedefs::{mat2x2,mat2x3,mat2x4};
pub use src::typedefs::{mat3x2,mat3x3,mat3x4};
pub use src::typedefs::{mat4x2,mat4x3,mat4x4};
use src::vector::{TVec2,TVec2MulRHS};
use src::vector::{TVec3,TVec3MulRHS};
use src::vector::{TVec4,TVec4MulRHS};
use src::scalar::{S,SMulRHS,SDivRHS};
use std::fmt;
trait Columns<ColVec> {
fn num_cols(&self) -> uint;
fn col(&self, i: uint) -> ColVec;
}
trait Rows<RowVec> {
fn num_rows(&self) -> uint;
fn row(&self, j: uint) -> RowVec;
}
trait Invertible {
// (should this be Result<Self> ?)
// (also, should this take self by value?)
fn inverse(&self) -> Self;
}
trait Mappable<T,U,SelfU> {
fn map(&self, |&T| -> U) -> SelfU;
}
pub fn inverse<M:Invertible>(m: &M) -> M { m.inverse() }
// All the matrix representations use column-major order:
// namely, [[a, b, c], [d, e, f]]
// should be read as corresponding to the 2 x 3 matrix:
//
// ( a d |
// | b e |
// | c f )
pub struct TMat2<T> {
elems: [[T, ..2], ..2]
}
pub struct TMat3<T> {
elems: [[T, ..3], ..3]
}
pub struct TMat4<T> {
elems: [[T, ..4], ..4]
}
pub struct TMat2x3<T> {
elems: [[T, ..3], ..2]
}
pub struct TMat2x4<T> {
elems: [[T, ..4], ..2]
}
pub struct TMat3x2<T> {
elems: [[T, ..2], ..3]
}
pub struct TMat3x4<T> {
elems: [[T, ..4], ..3]
}
pub struct TMat4x2<T> {
elems: [[T, ..2], ..4]
}
pub struct TMat4x3<T> {
elems: [[T, ..3], ..4]
}
macro_rules! tvec_of_len {
($T:ident, 2) => { TVec2<$T> };
($T:ident, 3) => { TVec3<$T> };
($T:ident, 4) => { TVec4<$T> };
}
macro_rules! impl_Columns_for {
( $TMat:ident $ncols:expr 2 ) =>
{
impl<T:Clone> Columns<TVec2<T>> for $TMat<T> {
fn num_cols(&self) -> uint { $ncols }
fn col(&self, i: uint) -> TVec2<T> {
assert!(i < $ncols);
TVec2 { x: self.elems[i][0].clone(),
y: self.elems[i][1].clone(),
}
}
}
};
( $TMat:ident $ncols:expr 3 ) =>
{
impl<T:Clone> Columns<TVec3<T>> for $TMat<T> {
fn num_cols(&self) -> uint { $ncols }
fn col(&self, i: uint) -> TVec3<T> {
assert!(i < $ncols);
TVec3 { x: self.elems[i][0].clone(),
y: self.elems[i][1].clone(),
z: self.elems[i][2].clone(),
}
}
}
};
( $TMat:ident $ncols:expr 4 ) =>
{
impl<T:Clone> Columns<TVec4<T>> for $TMat<T> {
fn num_cols(&self) -> uint { $ncols }
fn col(&self, i: uint) -> TVec4<T> {
assert!(i < $ncols);
TVec4 { x: self.elems[i][0].clone(),
y: self.elems[i][1].clone(),
z: self.elems[i][2].clone(),
w: self.elems[i][3].clone(),
}
}
}
};
}
macro_rules! impl_Rows_for {
( $TMat:ident 2 $nrows:expr ) =>
{
impl<T:Clone> Rows<TVec2<T>> for $TMat<T> {
fn num_rows(&self) -> uint { $nrows }
fn row(&self, j: uint) -> TVec2<T> {
assert!(j < $nrows);
TVec2 { x: self.elems[0][j].clone(),
y: self.elems[1][j].clone(),
}
}
}
};
( $TMat:ident 3 $nrows:expr ) =>
{
impl<T:Clone> Rows<TVec3<T>> for $TMat<T> {
fn num_rows(&self) -> uint { $nrows }
fn row(&self, j: uint) -> TVec3<T> {
assert!(j < $nrows);
TVec3 { x: self.elems[0][j].clone(),
y: self.elems[1][j].clone(),
z: self.elems[2][j].clone(),
}
}
}
};
( $TMat:ident 4 $nrows:expr ) =>
{
impl<T:Clone> Rows<TVec4<T>> for $TMat<T> {
fn num_rows(&self) -> uint { $nrows }
fn row(&self, j: uint) -> TVec4<T> {
assert!(j < $nrows);
TVec4 { x: self.elems[0][j].clone(),
y: self.elems[1][j].clone(),
z: self.elems[2][j].clone(),
w: self.elems[3][j].clone(),
}
}
}
};
}
macro_rules! impl_ColRow_for {
( $TMat:ident $ncols:tt $nrows:tt ) =>
{
impl_Columns_for! ($TMat $ncols $nrows)
impl_Rows_for! ($TMat $ncols $nrows)
}
}
impl_ColRow_for!(TMat2 2 2)
impl_ColRow_for!(TMat2x3 2 3)
impl_ColRow_for!(TMat2x4 2 4)
impl_ColRow_for!(TMat3 3 3)
impl_ColRow_for!(TMat3x2 3 2)
impl_ColRow_for!(TMat3x4 3 4)
impl_ColRow_for!(TMat4 4 4)
impl_ColRow_for!(TMat4x2 4 2)
impl_ColRow_for!(TMat4x3 4 3)
impl<T:Num> Invertible for TMat2<T> {
fn inverse(&self) -> TMat2<T> {
#![allow(uppercase_variables)]
use std::num::One;
let m = &self.elems;
let one : T = One::one();
let OneOverDeterminant = one / (m[0][0] * m[1][1] - m[1][0] * m[0][1]);
TMat2 { elems: [[m[1][1] * OneOverDeterminant, m[0][1] * OneOverDeterminant],
[m[1][0] * OneOverDeterminant, m[0][0] * OneOverDeterminant]] }
}
}
macro_rules! impl_Show_for {
($TMat:ident $ncols:expr $nrows:expr) =>
{
impl<T:fmt::Show> fmt::Show for $TMat<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "["));
for u in range(0u, $ncols-1) {
try!(self.elems[u].as_slice().fmt(f));
try!(write!(f, "\n "));
}
try!(self.elems[$ncols-1].as_slice().fmt(f));
write!(f, "]")
}
}
}
}
macro_rules! impl_Eq_for {
($TMat:ident $ncols:expr $nrows:expr) =>
{
impl<T:Eq> Eq for $TMat<T> {
fn eq(&self, rhs: &$TMat<T>) -> bool {
for u in range(0u, $ncols) {
if self.elems[u] != rhs.elems[u] {
return false;
}
}
return true;
}
}
}
}
macro_rules! easy_impls_for {
($TMat:ident $ncols:expr $nrows:expr) =>
{
impl_Show_for!($TMat $ncols $nrows)
impl_Eq_for!($TMat $ncols $nrows)
}
}
easy_impls_for!(TMat2 2 2)
easy_impls_for!(TMat2x3 2 3)
easy_impls_for!(TMat2x4 2 4)
easy_impls_for!(TMat3 3 3)
easy_impls_for!(TMat3x2 3 2)
easy_impls_for!(TMat3x4 3 4)
easy_impls_for!(TMat4 4 4)
easy_impls_for!(TMat4x2 4 2)
easy_impls_for!(TMat4x3 4 3)
impl<T,U> Mappable<T,U,TMat2<U>> for TMat2<T> {
fn map(&self, f: |&T| -> U) -> TMat2<U> {
TMat2 { elems: [[f(&self.elems[0][0]), f(&self.elems[0][1])],
[f(&self.elems[1][0]), f(&self.elems[1][1])]] }
}
}
impl<T,U> Mappable<T,U,TMat2x3<U>> for TMat2x3<T> {
fn map(&self, f: |&T| -> U) -> TMat2x3<U> {
TMat2x3 { elems: [[f(&self.elems[0][0]), f(&self.elems[0][1]), f(&self.elems[0][2])],
[f(&self.elems[1][0]), f(&self.elems[1][1]), f(&self.elems[1][2])]] }
}
}
impl<T,U> Mappable<T,U,TMat2x4<U>> for TMat2x4<T> {
fn map(&self, f: |&T| -> U) -> TMat2x4<U> {
TMat2x4 { elems: [[f(&self.elems[0][0]), f(&self.elems[0][1]), f(&self.elems[0][2]), f(&self.elems[0][3])],
[f(&self.elems[1][0]), f(&self.elems[1][1]), f(&self.elems[1][2]), f(&self.elems[1][3])]] }
}
}
pub trait Mat2x2Args { fn make(self) -> mat2x2; }
pub trait Mat2x3Args { fn make(self) -> mat2x3; }
pub trait Mat2x4Args { fn make(self) -> mat2x4; }
pub trait Mat3x2Args { fn make(self) -> mat3x2; }
pub trait Mat3x3Args { fn make(self) -> mat3x3; }
pub trait Mat3x4Args { fn make(self) -> mat3x4; }
pub trait Mat4x2Args { fn make(self) -> mat4x2; }
pub trait Mat4x3Args { fn make(self) -> mat4x3; }
pub trait Mat4x4Args { fn make(self) -> mat4x4; }
pub fn mat2<Args:Mat2x2Args>(args: Args) -> mat2x2 { args.make() }
pub fn mat2x2<Args:Mat2x2Args>(args: Args) -> mat2x2 { args.make() }
pub fn mat2x3<Args:Mat2x3Args>(args: Args) -> mat2x3 { args.make() }
pub fn mat2x4<Args:Mat2x4Args>(args: Args) -> mat2x4 { args.make() }
pub fn mat3<Args:Mat3x3Args>(args: Args) -> mat3x3 { args.make() }
pub fn mat3x2<Args:Mat3x2Args>(args: Args) -> mat3x2 { args.make() }
pub fn mat3x3<Args:Mat3x3Args>(args: Args) -> mat3x3 { args.make() }
pub fn mat3x4<Args:Mat3x4Args>(args: Args) -> mat3x4 { args.make() }
pub fn mat4<Args:Mat4x4Args>(args: Args) -> mat4x4 { args.make() }
pub fn mat4x2<Args:Mat4x2Args>(args: Args) -> mat4x2 { args.make() }
pub fn mat4x3<Args:Mat4x3Args>(args: Args) -> mat4x3 { args.make() }
pub fn mat4x4<Args:Mat4x4Args>(args: Args) -> mat4x4 { args.make() }
macro_rules! impl_Mat2x2Args_for {
($a:ident copy) => {
impl Mat2x2Args for $a {
fn make(self) -> mat2x2 {
let x = self;
TMat2 { elems: [[x as f32,x as f32],
[x as f32,x as f32]] }
}
}
};
($a:ident,$b:ident,$c:ident,$d:ident) => {
impl Mat2x2Args for ($a,$b,$c,$d) {
fn make(self) -> mat2x2 {
let (a,b,c,d) = self;
TMat2 { elems: [[a as f32,b as f32],
[c as f32,d as f32]] }
}
}
impl Mat2x2Args for (($a,$b),$c,$d) {
fn make(self) -> mat2x2 {
let ((a,b),c,d) = self;
TMat2 { elems: [[a as f32,b as f32],
[c as f32,d as f32]] }
}
}
impl Mat2x2Args for ($a,$b,($c,$d)) {
fn make(self) -> mat2x2 {
let (a,b,(c,d)) = self;
TMat2 { elems: [[a as f32,b as f32],
[c as f32,d as f32]] }
}
}
impl Mat2x2Args for (($a,$b),($c,$d)) {
fn make(self) -> mat2x2 {
let ((a,b),(c,d)) = self;
TMat2 { elems: [[a as f32,b as f32],
[c as f32,d as f32]] }
}
}
};
($a:ident 2,$b:ident,$c:ident) => {
impl Mat2x2Args for ($a,$b,$c) {
fn make(self) -> mat2x2 {
let (a,b,c) = self;
TMat2 { elems: [[a.x as f32,a.y as f32],
[b as f32, c as f32]] }
}
}
impl Mat2x2Args for ($a,($b,$c)) {
fn make(self) -> mat2x2 {
let (a,(b,c)) = self;
TMat2 { elems: [[a.x as f32,a.y as f32],
[b as f32, c as f32]] }
}
}
};
($a:ident,$b:ident,$c:ident 2) => {
impl Mat2x2Args for ($a,$b,$c) {
fn make(self) -> mat2x2 {
let (a,b,c) = self;
TMat2 { elems: [[a as f32, b as f32],
[c.x as f32,c.y as f32]] }
}
}
impl Mat2x2Args for (($a,$b),$c) {
fn make(self) -> mat2x2 {
let ((a,b),c) = self;
TMat2 { elems: [[a as f32, b as f32],
[c.x as f32,c.y as f32]] }
}
}
};
($a:ident 2,$b:ident 2) => {
impl Mat2x2Args for ($a,$b) {
fn make(self) -> mat2x2 {
let (a,b) = self;
TMat2 { elems: [[a.x as f32,a.y as f32],
[b.x as f32,b.y as f32]] }
}
}
};
}
macro_rules! impl_Mat2x3Args_for {
($a:ident copy) => {
impl Mat2x3Args for $a {
fn make(self) -> mat2x3 {
let x = self;
TMat2x3 { elems: [[x as f32,x as f32,x as f32],
[x as f32,x as f32,x as f32]] }
}
}
};
($a:ident,$b:ident,$c:ident,
$d:ident,$e:ident,$f:ident) => {
impl Mat2x3Args for ($a,$b,$c,
$d,$e,$f) {
fn make(self) -> mat2x3 {
let (a,b,c,d,e,f) = self;
TMat2x3 { elems: [[a as f32,b as f32,c as f32],
[d as f32,e as f32,f as f32]] }
}
}
impl Mat2x3Args for (($a,$b,$c),
($d,$e,$f)) {
fn make(self) -> mat2x3 {
let ((a,b,c),(d,e,f)) = self;
TMat2x3 { elems: [[a as f32,b as f32,c as f32],
[d as f32,e as f32,f as f32]] }
}
}
impl Mat2x3Args for ($a,$b,$c,
($d,$e,$f)) {
fn make(self) -> mat2x3 {
let (a,b,c,(d,e,f)) = self;
TMat2x3 { elems: [[a as f32,b as f32,c as f32],
[d as f32,e as f32,f as f32]] }
}
}
impl Mat2x3Args for (($a,$b,$c),
$d,$e,$f) {
fn make(self) -> mat2x3 {
let ((a,b,c),d,e,f) = self;
TMat2x3 { elems: [[a as f32,b as f32,c as f32],
[d as f32,e as f32,f as f32]] }
}
}
}
;
($a:ident 3,
$d:ident,$e:ident,$f:ident) => {
impl Mat2x3Args for ($a,
$d,$e,$f) {
fn make(self) -> mat2x3 {
let (a,
d,e,f) = self;
TMat2x3 { elems: [[a.x as f32,a.y as f32,a.z as f32],
[d as f32,e as f32,f as f32]] }
}
}
}
;
($a:ident,$b:ident,$c:ident,
$d:ident 3) => {
impl Mat2x3Args for ($a,$b,$c,
$d) {
fn make(self) -> mat2x3 {
let (a,b,c,
d) = self;
TMat2x3 { elems: [[a as f32,b as f32,c as f32],
[d.x as f32,d.y as f32,d.z as f32]] }
}
}
}
; ($a:ident 3,
$d:ident,$e:ident,$f:ident) => {
impl Mat2x3Args for ($a,
$d,$e,$f) {
fn make(self) -> mat2x3 {
let (a,
d,e,f) = self;
TMat2x3 { elems: [[a.x as f32,a.y as f32,a.z as f32],
[d as f32,e as f32,f as f32]] }
}
}
}
;
($a:ident 3,
$d:ident 3) => {
impl Mat2x3Args for ($a,
$d) {
fn make(self) -> mat2x3 {
let (a,
d) = self;
TMat2x3 { elems: [[a.x as f32,a.y as f32,a.z as f32],
[d.x as f32,d.y as f32,d.z as f32]] }
}
}
}
;
}
impl_Mat2x2Args_for!(f32 copy)
impl_Mat2x2Args_for!(int copy)
macro_rules! impl_Mat2x2Args_for_choice {
( $a:ident, $b:ident, $c:ident, $d:ident, $($ignore:ident),*) => {
impl_Mat2x2Args_for!($a, $b, $c, $d)
}
}
all_choices!( impl_Mat2x2Args_for_choice :
todo: { (int | f32) (int | f32)
(int | f32) (int | f32) }
done: { (ignored) } )
impl_Mat2x2Args_for!(vec2 2,vec2 2)
macro_rules! impl_Mat2x3Args_for_choice {
( $a:ident, $b:ident, $c:ident, $d:ident, $e:ident, $f:ident, $($ignore:ident),*) => {
impl_Mat2x3Args_for!($a, $b, $c, $d, $e, $f)
}
}
all_choices!( impl_Mat2x3Args_for_choice :
todo: { (int | f32) (int | f32) (int | f32)
(int | f32) (int | f32) (int | f32) }
done: { (ignored) } )
impl_Mat2x3Args_for!(f32 copy)
impl_Mat2x3Args_for!(int copy)
impl_Mat2x3Args_for!(vec3 3,vec3 3)
impl_Mat2x3Args_for!(vec3 3,f32,f32,f32)
impl_Mat2x3Args_for!(vec3 3,f32,f32,int)
impl_Mat2x3Args_for!(vec3 3,f32,int,f32)
impl_Mat2x3Args_for!(vec3 3,f32,int,int)
impl_Mat2x3Args_for!(vec3 3,int,f32,f32)
impl_Mat2x3Args_for!(vec3 3,int,f32,int)
impl_Mat2x3Args_for!(vec3 3,int,int,f32)
impl_Mat2x3Args_for!(vec3 3,int,int,int)
impl_Mat2x3Args_for!(f32,f32,f32,vec3 3)
impl_Mat2x3Args_for!(f32,f32,int,vec3 3)
impl_Mat2x3Args_for!(f32,int,f32,vec3 3)
impl_Mat2x3Args_for!(f32,int,int,vec3 3)
impl_Mat2x3Args_for!(int,f32,f32,vec3 3)
impl_Mat2x3Args_for!(int,f32,int,vec3 3)
impl_Mat2x3Args_for!(int,int,f32,vec3 3)
impl_Mat2x3Args_for!(int,int,int,vec3 3)
macro_rules! impl_Mat2x4Args_for {
($a:ident copy) => {
impl Mat2x4Args for $a {
fn make(self) -> mat2x4 {
let x = self;
TMat2x4 { elems: [[x as f32,x as f32,x as f32,x as f32],
[x as f32,x as f32,x as f32,x as f32]] }
}
}
};
($a:ident,$b:ident,$c:ident,$d:ident,
$e:ident,$f:ident,$g:ident,$h:ident) => {
impl Mat2x4Args for ($a,$b,$c,$d,
$e,$f,$g,$h) {
fn make(self) -> mat2x4 {
let (a,b,c,d,
e,f,g,h) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
impl Mat2x4Args for (($a,$b,$c,$d),
$e,$f,$g,$h) {
fn make(self) -> mat2x4 {
let ((a,b,c,d),
e,f,g,h) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
impl Mat2x4Args for ($a,$b,$c,$d,
($e,$f,$g,$h)) {
fn make(self) -> mat2x4 {
let (a,b,c,d,
(e,f,g,h)) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
impl Mat2x4Args for (($a,$b,$c,$d),
($e,$f,$g,$h)) {
fn make(self) -> mat2x4 {
let ((a,b,c,d),
(e,f,g,h)) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
};
($a:ident 4,
$e:ident,$f:ident,$g:ident,$h:ident) => {
impl Mat2x4Args for ($a,
$e,$f,$g,$h) {
fn make(self) -> mat2x4 {
let (a,
e,f,g,h) = self;
TMat2x4 { elems: [[a.x as f32,a.y as f32,a.z as f32,a.w as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
impl Mat2x4Args for ($a,
($e,$f,$g,$h)) {
fn make(self) -> mat2x4 {
let (a,
(e,f,g,h)) = self;
TMat2x4 { elems: [[a.x as f32,a.y as f32,a.z as f32,a.w as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
};
($a:ident,$b:ident,$c:ident,$d:ident,
$e:ident 4) => {
impl Mat2x4Args for ($a,$b,$c,$d,
$e) {
fn make(self) -> mat2x4 {
let (a,b,c,d,
e) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e.x as f32,e.y as f32,e.z as f32,e.w as f32]] }
}
}
impl Mat2x4Args for (($a,$b,$c,$d),
$e) {
fn make(self) -> mat2x4 {
let ((a,b,c,d),
e) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e.x as f32,e.y as f32,e.z as f32,e.w as f32]] }
}
}
};
($a:ident 4,
$e:ident 4) => {
impl Mat2x4Args for ($a,
$e) {
fn make(self) -> mat2x4 {
let (a,
e) = self;
TMat2x4 { elems: [[a.x as f32,a.y as f32,a.z as f32,a.w as f32],
[e.x as f32,e.y as f32,e.z as f32,e.w as f32]] }
}
}
};
}
macro_rules! impl_Mat2x4Args_for_choice {
( $a:ident, $b:ident, $c:ident, $d:ident,
$e:ident, $f:ident, $g:ident, $h:ident, $($ignore:ident),*) => {
impl_Mat2x4Args_for!($a, $b, $c, $d, $e, $f, $g, $h)
}
}
impl_Mat2x4Args_for!(f32 copy)
impl_Mat2x4Args_for!(int copy)
all_choices!(impl_Mat2x4Args_for_choice :
todo: { (int | f32) (int | f32) (int | f32) (int | f32)
(int | f32) (int | f32) (int | f32) (int | f32) }
done: { (ignored) } )
impl_Mat2x4Args_for!(vec4 4,vec4 4)
double_dispatch_T!{Mul for TMat2 mul via TMat2x2MulRHS rev_mul}
double_dispatch_T!{Div for TMat2 div via TMat2x2DivRHS rev_div}
double_dispatch_T!{Mul for TMat2x3 mul via TMat2x3MulRHS rev_mul}
double_dispatch_T!{Div for TMat2x3 div via TMat2x3DivRHS rev_div}
double_dispatch_T!{Mul for TMat2x4 mul via TMat2x4MulRHS rev_mul}
double_dispatch_T!{Div for TMat2x4 div via TMat2x4DivRHS rev_div}
double_dispatch_T!{Mul for TMat3 mul via TMat3x3MulRHS rev_mul}
double_dispatch_T!{Div for TMat3 div via TMat3x3DivRHS rev_div}
double_dispatch_T!{Mul for TMat3x2 mul via TMat3x2MulRHS rev_mul}
double_dispatch_T!{Div for TMat3x2 div via TMat3x2DivRHS rev_div}
double_dispatch_T!{Mul for TMat3x4 mul via TMat3x4MulRHS rev_mul}
double_dispatch_T!{Div for TMat3x4 div via TMat3x4DivRHS rev_div}
double_dispatch_T!{Mul for TMat4 mul via TMat4x4MulRHS rev_mul}
double_dispatch_T!{Div for TMat4 div via TMat4x4DivRHS rev_div}
double_dispatch_T!{Mul for TMat4x2 mul via TMat4x2MulRHS rev_mul}
double_dispatch_T!{Div for TMat4x2 div via TMat4x2DivRHS rev_div}
double_dispatch_T!{Mul for TMat4x3 mul via TMat4x3MulRHS rev_mul}
double_dispatch_T!{Div for TMat4x3 div via TMat4x3DivRHS rev_div}
impl<T:Num> TMat2x2MulRHS<T,TMat2<T>> for TMat2<T> {
fn rev_mul(&self, lhs: &TMat2<T>) -> TMat2<T> {
let l = &lhs.elems;
let r = &self.elems;
let c00 = l[0][0] * r[0][0] + l[0][1] * r[1][0];
let c01 = l[0][0] * r[0][1] + l[0][1] * r[1][1];
let c10 = l[1][0] * r[0][0] + l[1][1] * r[1][0];
let c11 = l[1][0] * r[0][1] + l[1][1] * r[1][1];
TMat2 { elems: [[c00, c01],
[c10, c11]] }
}
}
impl<T:Num> TMat2x3MulRHS<T,TMat2<T>> for TMat3x2<T> {
fn rev_mul(&self, lhs: &TMat2x3<T>) -> TMat2<T> {
let l = &lhs.elems;
let r = &self.elems;
let c00 = l[0][0] * r[0][0] + l[0][1] * r[1][0] + l[0][2] * r[2][0];
let c01 = l[0][0] * r[0][1] + l[0][1] * r[1][1] + l[0][2] * r[2][1];
let c10 = l[1][0] * r[0][0] + l[1][1] * r[1][0] + l[1][2] * r[2][0];
let c11 = l[1][0] * r[0][1] + l[1][1] * r[1][1] + l[1][2] * r[2][1];
TMat2 { elems: [[c00, c01],
[c10, c11]] }
}
}
impl<T:Num> TMat2x3MulRHS<T,TVec3<T>> for TVec2<T> {
fn rev_mul(&self, lhs: &TMat2x3<T>) -> TVec3<T> {
let cr00 = &lhs.elems[0][0]; let cr01 = &lhs.elems[0][1]; let cr02 = &lhs.elems[0][2];
let cr10 = &lhs.elems[1][0]; let cr11 = &lhs.elems[1][1]; let cr12 = &lhs.elems[1][2];
TVec3{ x: *cr00 * self.x + *cr10 * self.y,
y: *cr01 * self.x + *cr11 * self.y,
z: *cr02 * self.x + *cr12 * self.y }
}
}
impl<T:Num> TMat2x2MulRHS<T,TVec2<T>> for TVec2<T> {
fn rev_mul(&self, lhs: &TMat2<T>) -> TVec2<T> {
let cr00 = &lhs.elems[0][0]; let cr01 = &lhs.elems[0][1];
let cr10 = &lhs.elems[1][0]; let cr11 = &lhs.elems[1][1];
TVec2{ x: *cr00 * self.x + *cr10 * self.y,
y: *cr01 * self.x + *cr11 * self.y }
}
}
impl<T:Num> TVec2MulRHS<T,TVec2<T>> for TMat2<T> {
fn rev_mul(&self, lhs: &TVec2<T>) -> TVec2<T> {
let cr00 = &self.elems[0][0]; let cr01 = &self.elems[0][1];
let cr10 = &self.elems[1][0]; let cr11 = &self.elems[1][1];
TVec2{ x: lhs.x * *cr00 + lhs.y * *cr01,
y: lhs.x * *cr10 + lhs.y * *cr11 }
}
}
impl<T:Num> TVec3MulRHS<T,TVec2<T>> for TMat2x3<T> {
fn rev_mul(&self, lhs: &TVec3<T>) -> TVec2<T> {
let cr00 = &self.elems[0][0]; let cr01 = &self.elems[0][1]; let cr02 = &self.elems[0][2];
let cr10 = &self.elems[1][0]; let cr11 = &self.elems[1][1]; let cr12 = &self.elems[1][2];
TVec2{ x: lhs.x * *cr00 + lhs.y * *cr01 + lhs.z * *cr02,
y: lhs.x * *cr10 + lhs.y * *cr11 + lhs.z * *cr12, }
}
}
impl<T:Num> TVec3MulRHS<T,TVec2<T>> for TMat3x2<T> {
fn rev_mul(&self, lhs: &TVec3<T>) -> TVec2<T> {
let cr00 = &self.elems[0][0]; let cr01 = &self.elems[0][1];
let cr10 = &self.elems[1][0]; let cr11 = &self.elems[1][1];
let cr20 = &self.elems[2][0]; let cr21 = &self.elems[2][1];
TVec2{ x: lhs.x * *cr00 + lhs.y * *cr10 + lhs.z * *cr20,
y: lhs.x * *cr01 + lhs.y * *cr11 + lhs.z * *cr21, }
}
}
impl<T:Num> TMat2x2MulRHS<T,TMat2<T>> for S<T> {
fn rev_mul(&self, lhs: &TMat2<T>) -> TMat2<T> {
let S(ref f) = *self;
lhs.map(|x|*x * *f)
}
}
impl<T:Num> TMat2x3MulRHS<T,TMat2x3<T>> for S<T> {
fn rev_mul(&self, lhs: &TMat2x3<T>) -> TMat2x3<T> {
let S(ref f) = *self;
lhs.map(|x|*x * *f)
}
}
impl<T:Num> TMat2x2DivRHS<T,TMat2<T>> for S<T> {
fn rev_div(&self, lhs: &TMat2<T>) -> TMat2<T> {
let S(ref f) = *self;
lhs.map(|x|*x / *f)
}
}
impl<T:Num> TMat2x3DivRHS<T,TMat2x3<T>> for S<T> {
fn rev_div(&self, lhs: &TMat2x3<T>) -> TMat2x3<T> {
let S(ref f) = *self;
lhs.map(|x|*x / *f)
}
}
impl<T:Num> SMulRHS<T,TMat2<T>> for TMat2<T> {
fn rev_mul(&self, lhs: &S<T>) -> TMat2<T> {
let S(ref f) = *lhs;
self.map(|x|*f * *x)
}
}
impl<T:Num> SMulRHS<T,TMat2x3<T>> for TMat2x3<T> {
fn rev_mul(&self, lhs: &S<T>) -> TMat2x3<T> {
let S(ref f) = *lhs;
self.map(|x|*f * *x)
}
}
impl<T:Num> SDivRHS<T,TMat2<T>> for TMat2<T> {
fn rev_div(&self, lhs: &S<T>) -> TMat2<T> {
let S(ref f) = *lhs;
self.map(|x|*f / *x)
}
}
impl<T:Num> SDivRHS<T,TMat2x3<T>> for TMat2x3<T> {
fn rev_div(&self, lhs: &S<T>) -> TMat2x3<T> {
let S(ref f) = *lhs;
self.map(|x|*f / *x)
}
}
impl<T:Num> TMat2x4MulRHS<T,TVec4<T>> for TVec2<T> {
fn rev_mul(&self, lhs: &TMat2x4<T>) -> TVec4<T> {
TVec4 { x: lhs.elems[0][0] * self.x + lhs.elems[1][0] * self.y,
y: lhs.elems[0][1] * self.x + lhs.elems[1][1] * self.y,
z: lhs.elems[0][2] * self.x + lhs.elems[1][2] * self.y,
w: lhs.elems[0][3] * self.x + lhs.elems[1][3] * self.y, }
}
}
impl<T:Num> TVec4MulRHS<T,TVec2<T>> for TMat2x4<T> {
fn rev_mul(&self, lhs: &TVec4<T>) -> TVec2<T> {
TVec2 { x: (lhs.x * self.elems[0][0] + lhs.y * self.elems[0][1] +
lhs.z * self.elems[0][2] + lhs.w * self.elems[0][3]),
y: (lhs.x * self.elems[1][0] + lhs.y * self.elems[1][1] +
lhs.z * self.elems[1][2] + lhs.w * self.elems[1][3]),
}
}
}
impl<T:Num> SMulRHS<T,TMat2x4<T>> for TMat2x4<T> {
fn rev_mul(&self, lhs: &S<T>) -> TMat2x4<T> {
let &S(ref f) = lhs;
self.map(|x|*f * *x)
}
}
impl<T:Num> SDivRHS<T,TMat2x4<T>> for TMat2x4<T> {
fn rev_div(&self, lhs: &S<T>) -> TMat2x4<T> {
let &S(ref f) = lhs;
self.map(|x|*f / *x)
}
}
impl<T:Num> TMat2x4MulRHS<T,TMat2x4<T>> for S<T> {
fn rev_mul(&self, lhs: &TMat2x4<T>) -> TMat2x4<T> {
let &S(ref f) = self;
lhs.map(|x|*x * *f)
}
}
impl<T:Num> TMat2x4DivRHS<T,TMat2x4<T>> for S<T> {
fn rev_div(&self, lhs: &TMat2x4<T>) -> TMat2x4<T> {
let &S(ref f) = self;
lhs.map(|x|*x / *f)
}
}
#[cfg(test)]
mod mat2x2_tests {
#![allow(uppercase_variables)]
use super::{mat2x2,mat2,mat2x3};
use super::{inverse};
use super::{Rows,Columns};
use src::operators::{EpsilonEq};
use src::typedefs::{vec2};
use src::vector::{vec2,vec3};
use src::scalar::S;
#[test]
fn basics() {
let m123_456 = mat2x3(((1,2,3),
(4,5,6)));
assert_eq!(m123_456.row(0), vec2((1,4)));
assert_eq!(m123_456.row(1), vec2((2,5)));
assert_eq!(m123_456.row(2), vec2((3,6)));
assert_eq!(m123_456.col(0), vec3((1,2,3)));
assert_eq!(m123_456.col(1), vec3((4,5,6)));
}
#[test]
fn test_operators() {
let l = mat2x2(1.0f32);
let m = mat2x2(1.0f32);
let u = vec2(1.0f32);
let v = vec2(1.0f32);
let x = S(1.0f32);
let a : vec2 = m * u;
let b : vec2 = v * m;
let n : mat2x2 = x / m;
let o : mat2x2 = m / x;
let p : mat2x2 = x * m;
let q : mat2x2 = m * x;
let _ = (a,b,n,o,p);
assert_eq!(m, q);
assert_eq!(m, l);
}
#[test]
fn test_inverse() {
let Matrix = mat2((1, 2, 3, 4));
let Inverse = inverse(&Matrix);
let Identity = Matrix * Inverse;
assert!(Identity.row(0).epsilon_eq(&vec2((1f32, 0f32)), &vec2(0.01f32)));
assert!(Identity.row(1).epsilon_eq(&vec2((0f32, 1f32)), &vec2(0.01f32)));
}
#[test]
fn test_ctr() {
let m0 = mat2x2((vec2((0,1)),
vec2((2, 3))));
let m1 = mat2x2((0, 1,
2, 3));
let m2 = mat2x2(((0, 1),
(2, 3)));
assert_eq!(m0, m2);
assert_eq!(m1, m2);
}
}
#[cfg(test)]
mod mat2x3_tests {
use super::{mat2x3};
use src::scalar::{S};
use src::typedefs::{vec2,vec3};
use src::vector::{vec2,vec3};
#[test]
fn test_operators() {
let l = mat2x3(1.0f32);
let m = mat2x3(1.0f32);
let u = vec2(1.0f32);
let v = vec3(1.0f32);
let x = S(1.0f32);
let a : vec3 = m * u;
let b : vec2 = v * m;
let n : mat2x3 = x / m;
let o : mat2x3 = m / x;
let p : mat2x3 = x * m;
let q : mat2x3 = m * x;
let _ = (a,b,n,o,p);
assert_eq!(m, q);
assert_eq!(m, l);
}
#[test]
fn test_ctr() {
let m0 = mat2x3((vec3((0, 1, 2)),
vec3((3, 4, 5))));
let m1 = mat2x3((0, 1, 2,
3, 4, 5));
let m2 = mat2x3(((0, 1, 2),
(3, 4, 5)));
assert_eq!(m0, m2);
assert_eq!(m1, m2);
}
}
#[cfg(test)]
mod mat2x4_tests {
use super::{mat2x4};
use src::scalar::S;
use src::typedefs::{vec2,vec4};
use src::vector::{vec2,vec4};
#[test]
fn test_operators() {
let l = mat2x4(1.0f32);
let m = mat2x4(1.0f32);
let u = vec2(1.0f32);
let v = vec4(1.0f32);
let x = S(1.0f32);
let a : vec4 = m * u;
let b : vec2 = v * m;
let n : mat2x4 = x / m;
let o : mat2x4 = m / x;
let p : mat2x4 = x * m;
let q : mat2x4 = m * x;
let _ = (a,b,n,o,p);
assert_eq!(m, q);
assert_eq!(m, l);
}
#[test]
fn test_ctr() {
let m0 = mat2x4((vec4((0, 1, 2, 3)),
vec4((4, 5, 6, 7))));
let m1 = mat2x4((0, 1, 2, 3,
4, 5, 6, 7));
let m2 = mat2x4(((0, 1, 2, 3),
(4, 5, 6, 7)));
assert_eq!(m0, m2);
assert_eq!(m1, m2);
}
}
Cleaned up code a bit, mostly by macro-factoring binop overloads on scalars.
pub use src::typedefs::{vec2,vec3,vec4};
pub use src::typedefs::{mat2x2,mat2x3,mat2x4};
pub use src::typedefs::{mat3x2,mat3x3,mat3x4};
pub use src::typedefs::{mat4x2,mat4x3,mat4x4};
use src::vector::{TVec2,TVec2MulRHS};
use src::vector::{TVec3,TVec3MulRHS};
use src::vector::{TVec4,TVec4MulRHS};
use src::scalar::{S,SMulRHS,SDivRHS};
use std::fmt;
trait Columns<ColVec> {
fn num_cols(&self) -> uint;
fn col(&self, i: uint) -> ColVec;
}
trait Rows<RowVec> {
fn num_rows(&self) -> uint;
fn row(&self, j: uint) -> RowVec;
}
trait Invertible {
// (should this be Result<Self> ?)
// (also, should this take self by value?)
fn inverse(&self) -> Self;
}
trait Mappable<T,U,SelfU> {
fn map(&self, |&T| -> U) -> SelfU;
}
pub fn inverse<M:Invertible>(m: &M) -> M { m.inverse() }
// All the matrix representations use column-major order:
// namely, [[a, b, c], [d, e, f]]
// should be read as corresponding to the 2 x 3 matrix:
//
// ( a d |
// | b e |
// | c f )
pub struct TMat2<T> {
elems: [[T, ..2], ..2]
}
pub struct TMat3<T> {
elems: [[T, ..3], ..3]
}
pub struct TMat4<T> {
elems: [[T, ..4], ..4]
}
pub struct TMat2x3<T> {
elems: [[T, ..3], ..2]
}
pub struct TMat2x4<T> {
elems: [[T, ..4], ..2]
}
pub struct TMat3x2<T> {
elems: [[T, ..2], ..3]
}
pub struct TMat3x4<T> {
elems: [[T, ..4], ..3]
}
pub struct TMat4x2<T> {
elems: [[T, ..2], ..4]
}
pub struct TMat4x3<T> {
elems: [[T, ..3], ..4]
}
macro_rules! tvec_of_len {
($T:ident, 2) => { TVec2<$T> };
($T:ident, 3) => { TVec3<$T> };
($T:ident, 4) => { TVec4<$T> };
}
macro_rules! impl_Columns_for {
( $TMat:ident $ncols:expr 2 ) =>
{
impl<T:Clone> Columns<TVec2<T>> for $TMat<T> {
fn num_cols(&self) -> uint { $ncols }
fn col(&self, i: uint) -> TVec2<T> {
assert!(i < $ncols);
TVec2 { x: self.elems[i][0].clone(),
y: self.elems[i][1].clone(),
}
}
}
};
( $TMat:ident $ncols:expr 3 ) =>
{
impl<T:Clone> Columns<TVec3<T>> for $TMat<T> {
fn num_cols(&self) -> uint { $ncols }
fn col(&self, i: uint) -> TVec3<T> {
assert!(i < $ncols);
TVec3 { x: self.elems[i][0].clone(),
y: self.elems[i][1].clone(),
z: self.elems[i][2].clone(),
}
}
}
};
( $TMat:ident $ncols:expr 4 ) =>
{
impl<T:Clone> Columns<TVec4<T>> for $TMat<T> {
fn num_cols(&self) -> uint { $ncols }
fn col(&self, i: uint) -> TVec4<T> {
assert!(i < $ncols);
TVec4 { x: self.elems[i][0].clone(),
y: self.elems[i][1].clone(),
z: self.elems[i][2].clone(),
w: self.elems[i][3].clone(),
}
}
}
};
}
macro_rules! impl_Rows_for {
( $TMat:ident 2 $nrows:expr ) =>
{
impl<T:Clone> Rows<TVec2<T>> for $TMat<T> {
fn num_rows(&self) -> uint { $nrows }
fn row(&self, j: uint) -> TVec2<T> {
assert!(j < $nrows);
TVec2 { x: self.elems[0][j].clone(),
y: self.elems[1][j].clone(),
}
}
}
};
( $TMat:ident 3 $nrows:expr ) =>
{
impl<T:Clone> Rows<TVec3<T>> for $TMat<T> {
fn num_rows(&self) -> uint { $nrows }
fn row(&self, j: uint) -> TVec3<T> {
assert!(j < $nrows);
TVec3 { x: self.elems[0][j].clone(),
y: self.elems[1][j].clone(),
z: self.elems[2][j].clone(),
}
}
}
};
( $TMat:ident 4 $nrows:expr ) =>
{
impl<T:Clone> Rows<TVec4<T>> for $TMat<T> {
fn num_rows(&self) -> uint { $nrows }
fn row(&self, j: uint) -> TVec4<T> {
assert!(j < $nrows);
TVec4 { x: self.elems[0][j].clone(),
y: self.elems[1][j].clone(),
z: self.elems[2][j].clone(),
w: self.elems[3][j].clone(),
}
}
}
};
}
macro_rules! impl_ColRow_for {
( $TMat:ident $ncols:tt $nrows:tt ) =>
{
impl_Columns_for! ($TMat $ncols $nrows)
impl_Rows_for! ($TMat $ncols $nrows)
}
}
impl_ColRow_for!(TMat2 2 2)
impl_ColRow_for!(TMat2x3 2 3)
impl_ColRow_for!(TMat2x4 2 4)
impl_ColRow_for!(TMat3 3 3)
impl_ColRow_for!(TMat3x2 3 2)
impl_ColRow_for!(TMat3x4 3 4)
impl_ColRow_for!(TMat4 4 4)
impl_ColRow_for!(TMat4x2 4 2)
impl_ColRow_for!(TMat4x3 4 3)
impl<T:Num> Invertible for TMat2<T> {
fn inverse(&self) -> TMat2<T> {
#![allow(uppercase_variables)]
use std::num::One;
let m = &self.elems;
let one : T = One::one();
let OneOverDeterminant = one / (m[0][0] * m[1][1] - m[1][0] * m[0][1]);
TMat2 { elems: [[m[1][1] * OneOverDeterminant, m[0][1] * OneOverDeterminant],
[m[1][0] * OneOverDeterminant, m[0][0] * OneOverDeterminant]] }
}
}
macro_rules! impl_Show_for {
($TMat:ident $ncols:expr $nrows:expr) =>
{
impl<T:fmt::Show> fmt::Show for $TMat<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "["));
for u in range(0u, $ncols-1) {
try!(self.elems[u].as_slice().fmt(f));
try!(write!(f, "\n "));
}
try!(self.elems[$ncols-1].as_slice().fmt(f));
write!(f, "]")
}
}
}
}
macro_rules! impl_Eq_for {
($TMat:ident $ncols:expr $nrows:expr) =>
{
impl<T:Eq> Eq for $TMat<T> {
fn eq(&self, rhs: &$TMat<T>) -> bool {
for u in range(0u, $ncols) {
if self.elems[u] != rhs.elems[u] {
return false;
}
}
return true;
}
}
}
}
macro_rules! easy_impls_for {
($TMat:ident $ncols:expr $nrows:expr) =>
{
impl_Show_for!($TMat $ncols $nrows)
impl_Eq_for!($TMat $ncols $nrows)
}
}
easy_impls_for!(TMat2 2 2)
easy_impls_for!(TMat2x3 2 3)
easy_impls_for!(TMat2x4 2 4)
easy_impls_for!(TMat3 3 3)
easy_impls_for!(TMat3x2 3 2)
easy_impls_for!(TMat3x4 3 4)
easy_impls_for!(TMat4 4 4)
easy_impls_for!(TMat4x2 4 2)
easy_impls_for!(TMat4x3 4 3)
impl<T,U> Mappable<T,U,TMat2<U>> for TMat2<T> {
fn map(&self, f: |&T| -> U) -> TMat2<U> {
TMat2 { elems: [[f(&self.elems[0][0]), f(&self.elems[0][1])],
[f(&self.elems[1][0]), f(&self.elems[1][1])]] }
}
}
impl<T,U> Mappable<T,U,TMat2x3<U>> for TMat2x3<T> {
fn map(&self, f: |&T| -> U) -> TMat2x3<U> {
TMat2x3 { elems: [[f(&self.elems[0][0]), f(&self.elems[0][1]), f(&self.elems[0][2])],
[f(&self.elems[1][0]), f(&self.elems[1][1]), f(&self.elems[1][2])]] }
}
}
impl<T,U> Mappable<T,U,TMat2x4<U>> for TMat2x4<T> {
fn map(&self, f: |&T| -> U) -> TMat2x4<U> {
TMat2x4 { elems: [[f(&self.elems[0][0]), f(&self.elems[0][1]), f(&self.elems[0][2]), f(&self.elems[0][3])],
[f(&self.elems[1][0]), f(&self.elems[1][1]), f(&self.elems[1][2]), f(&self.elems[1][3])]] }
}
}
pub trait Mat2x2Args { fn make(self) -> mat2x2; }
pub trait Mat2x3Args { fn make(self) -> mat2x3; }
pub trait Mat2x4Args { fn make(self) -> mat2x4; }
pub trait Mat3x2Args { fn make(self) -> mat3x2; }
pub trait Mat3x3Args { fn make(self) -> mat3x3; }
pub trait Mat3x4Args { fn make(self) -> mat3x4; }
pub trait Mat4x2Args { fn make(self) -> mat4x2; }
pub trait Mat4x3Args { fn make(self) -> mat4x3; }
pub trait Mat4x4Args { fn make(self) -> mat4x4; }
pub fn mat2<Args:Mat2x2Args>(args: Args) -> mat2x2 { args.make() }
pub fn mat2x2<Args:Mat2x2Args>(args: Args) -> mat2x2 { args.make() }
pub fn mat2x3<Args:Mat2x3Args>(args: Args) -> mat2x3 { args.make() }
pub fn mat2x4<Args:Mat2x4Args>(args: Args) -> mat2x4 { args.make() }
pub fn mat3<Args:Mat3x3Args>(args: Args) -> mat3x3 { args.make() }
pub fn mat3x2<Args:Mat3x2Args>(args: Args) -> mat3x2 { args.make() }
pub fn mat3x3<Args:Mat3x3Args>(args: Args) -> mat3x3 { args.make() }
pub fn mat3x4<Args:Mat3x4Args>(args: Args) -> mat3x4 { args.make() }
pub fn mat4<Args:Mat4x4Args>(args: Args) -> mat4x4 { args.make() }
pub fn mat4x2<Args:Mat4x2Args>(args: Args) -> mat4x2 { args.make() }
pub fn mat4x3<Args:Mat4x3Args>(args: Args) -> mat4x3 { args.make() }
pub fn mat4x4<Args:Mat4x4Args>(args: Args) -> mat4x4 { args.make() }
macro_rules! impl_Mat2x2Args_for {
($a:ident copy) => {
impl Mat2x2Args for $a {
fn make(self) -> mat2x2 {
let x = self;
TMat2 { elems: [[x as f32,x as f32],
[x as f32,x as f32]] }
}
}
};
($a:ident,$b:ident,$c:ident,$d:ident) => {
impl Mat2x2Args for ($a,$b,$c,$d) {
fn make(self) -> mat2x2 {
let (a,b,c,d) = self;
TMat2 { elems: [[a as f32,b as f32],
[c as f32,d as f32]] }
}
}
impl Mat2x2Args for (($a,$b),$c,$d) {
fn make(self) -> mat2x2 {
let ((a,b),c,d) = self;
TMat2 { elems: [[a as f32,b as f32],
[c as f32,d as f32]] }
}
}
impl Mat2x2Args for ($a,$b,($c,$d)) {
fn make(self) -> mat2x2 {
let (a,b,(c,d)) = self;
TMat2 { elems: [[a as f32,b as f32],
[c as f32,d as f32]] }
}
}
impl Mat2x2Args for (($a,$b),($c,$d)) {
fn make(self) -> mat2x2 {
let ((a,b),(c,d)) = self;
TMat2 { elems: [[a as f32,b as f32],
[c as f32,d as f32]] }
}
}
};
($a:ident 2,$b:ident,$c:ident) => {
impl Mat2x2Args for ($a,$b,$c) {
fn make(self) -> mat2x2 {
let (a,b,c) = self;
TMat2 { elems: [[a.x as f32,a.y as f32],
[b as f32, c as f32]] }
}
}
impl Mat2x2Args for ($a,($b,$c)) {
fn make(self) -> mat2x2 {
let (a,(b,c)) = self;
TMat2 { elems: [[a.x as f32,a.y as f32],
[b as f32, c as f32]] }
}
}
};
($a:ident,$b:ident,$c:ident 2) => {
impl Mat2x2Args for ($a,$b,$c) {
fn make(self) -> mat2x2 {
let (a,b,c) = self;
TMat2 { elems: [[a as f32, b as f32],
[c.x as f32,c.y as f32]] }
}
}
impl Mat2x2Args for (($a,$b),$c) {
fn make(self) -> mat2x2 {
let ((a,b),c) = self;
TMat2 { elems: [[a as f32, b as f32],
[c.x as f32,c.y as f32]] }
}
}
};
($a:ident 2,$b:ident 2) => {
impl Mat2x2Args for ($a,$b) {
fn make(self) -> mat2x2 {
let (a,b) = self;
TMat2 { elems: [[a.x as f32,a.y as f32],
[b.x as f32,b.y as f32]] }
}
}
};
}
macro_rules! impl_Mat2x3Args_for {
($a:ident copy) => {
impl Mat2x3Args for $a {
fn make(self) -> mat2x3 {
let x = self;
TMat2x3 { elems: [[x as f32,x as f32,x as f32],
[x as f32,x as f32,x as f32]] }
}
}
};
($a:ident,$b:ident,$c:ident,
$d:ident,$e:ident,$f:ident) => {
impl Mat2x3Args for ($a,$b,$c,
$d,$e,$f) {
fn make(self) -> mat2x3 {
let (a,b,c,d,e,f) = self;
TMat2x3 { elems: [[a as f32,b as f32,c as f32],
[d as f32,e as f32,f as f32]] }
}
}
impl Mat2x3Args for (($a,$b,$c),
($d,$e,$f)) {
fn make(self) -> mat2x3 {
let ((a,b,c),(d,e,f)) = self;
TMat2x3 { elems: [[a as f32,b as f32,c as f32],
[d as f32,e as f32,f as f32]] }
}
}
impl Mat2x3Args for ($a,$b,$c,
($d,$e,$f)) {
fn make(self) -> mat2x3 {
let (a,b,c,(d,e,f)) = self;
TMat2x3 { elems: [[a as f32,b as f32,c as f32],
[d as f32,e as f32,f as f32]] }
}
}
impl Mat2x3Args for (($a,$b,$c),
$d,$e,$f) {
fn make(self) -> mat2x3 {
let ((a,b,c),d,e,f) = self;
TMat2x3 { elems: [[a as f32,b as f32,c as f32],
[d as f32,e as f32,f as f32]] }
}
}
}
;
($a:ident 3,
$d:ident,$e:ident,$f:ident) => {
impl Mat2x3Args for ($a,
$d,$e,$f) {
fn make(self) -> mat2x3 {
let (a,
d,e,f) = self;
TMat2x3 { elems: [[a.x as f32,a.y as f32,a.z as f32],
[d as f32,e as f32,f as f32]] }
}
}
}
;
($a:ident,$b:ident,$c:ident,
$d:ident 3) => {
impl Mat2x3Args for ($a,$b,$c,
$d) {
fn make(self) -> mat2x3 {
let (a,b,c,
d) = self;
TMat2x3 { elems: [[a as f32,b as f32,c as f32],
[d.x as f32,d.y as f32,d.z as f32]] }
}
}
}
; ($a:ident 3,
$d:ident,$e:ident,$f:ident) => {
impl Mat2x3Args for ($a,
$d,$e,$f) {
fn make(self) -> mat2x3 {
let (a,
d,e,f) = self;
TMat2x3 { elems: [[a.x as f32,a.y as f32,a.z as f32],
[d as f32,e as f32,f as f32]] }
}
}
}
;
($a:ident 3,
$d:ident 3) => {
impl Mat2x3Args for ($a,
$d) {
fn make(self) -> mat2x3 {
let (a,
d) = self;
TMat2x3 { elems: [[a.x as f32,a.y as f32,a.z as f32],
[d.x as f32,d.y as f32,d.z as f32]] }
}
}
}
;
}
impl_Mat2x2Args_for!(f32 copy)
impl_Mat2x2Args_for!(int copy)
macro_rules! impl_Mat2x2Args_for_choice {
( $a:ident, $b:ident, $c:ident, $d:ident, $($ignore:ident),*) => {
impl_Mat2x2Args_for!($a, $b, $c, $d)
}
}
all_choices!( impl_Mat2x2Args_for_choice :
todo: { (int | f32) (int | f32)
(int | f32) (int | f32) }
done: { (ignored) } )
impl_Mat2x2Args_for!(vec2 2,vec2 2)
macro_rules! impl_Mat2x3Args_for_choice {
( $a:ident, $b:ident, $c:ident, $d:ident, $e:ident, $f:ident, $($ignore:ident),*) => {
impl_Mat2x3Args_for!($a, $b, $c, $d, $e, $f)
}
}
all_choices!( impl_Mat2x3Args_for_choice :
todo: { (int | f32) (int | f32) (int | f32)
(int | f32) (int | f32) (int | f32) }
done: { (ignored) } )
impl_Mat2x3Args_for!(f32 copy)
impl_Mat2x3Args_for!(int copy)
impl_Mat2x3Args_for!(vec3 3,vec3 3)
impl_Mat2x3Args_for!(vec3 3,f32,f32,f32)
impl_Mat2x3Args_for!(vec3 3,f32,f32,int)
impl_Mat2x3Args_for!(vec3 3,f32,int,f32)
impl_Mat2x3Args_for!(vec3 3,f32,int,int)
impl_Mat2x3Args_for!(vec3 3,int,f32,f32)
impl_Mat2x3Args_for!(vec3 3,int,f32,int)
impl_Mat2x3Args_for!(vec3 3,int,int,f32)
impl_Mat2x3Args_for!(vec3 3,int,int,int)
impl_Mat2x3Args_for!(f32,f32,f32,vec3 3)
impl_Mat2x3Args_for!(f32,f32,int,vec3 3)
impl_Mat2x3Args_for!(f32,int,f32,vec3 3)
impl_Mat2x3Args_for!(f32,int,int,vec3 3)
impl_Mat2x3Args_for!(int,f32,f32,vec3 3)
impl_Mat2x3Args_for!(int,f32,int,vec3 3)
impl_Mat2x3Args_for!(int,int,f32,vec3 3)
impl_Mat2x3Args_for!(int,int,int,vec3 3)
macro_rules! impl_Mat2x4Args_for {
($a:ident copy) => {
impl Mat2x4Args for $a {
fn make(self) -> mat2x4 {
let x = self;
TMat2x4 { elems: [[x as f32,x as f32,x as f32,x as f32],
[x as f32,x as f32,x as f32,x as f32]] }
}
}
};
($a:ident,$b:ident,$c:ident,$d:ident,
$e:ident,$f:ident,$g:ident,$h:ident) => {
impl Mat2x4Args for ($a,$b,$c,$d,
$e,$f,$g,$h) {
fn make(self) -> mat2x4 {
let (a,b,c,d,
e,f,g,h) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
impl Mat2x4Args for (($a,$b,$c,$d),
$e,$f,$g,$h) {
fn make(self) -> mat2x4 {
let ((a,b,c,d),
e,f,g,h) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
impl Mat2x4Args for ($a,$b,$c,$d,
($e,$f,$g,$h)) {
fn make(self) -> mat2x4 {
let (a,b,c,d,
(e,f,g,h)) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
impl Mat2x4Args for (($a,$b,$c,$d),
($e,$f,$g,$h)) {
fn make(self) -> mat2x4 {
let ((a,b,c,d),
(e,f,g,h)) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
};
($a:ident 4,
$e:ident,$f:ident,$g:ident,$h:ident) => {
impl Mat2x4Args for ($a,
$e,$f,$g,$h) {
fn make(self) -> mat2x4 {
let (a,
e,f,g,h) = self;
TMat2x4 { elems: [[a.x as f32,a.y as f32,a.z as f32,a.w as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
impl Mat2x4Args for ($a,
($e,$f,$g,$h)) {
fn make(self) -> mat2x4 {
let (a,
(e,f,g,h)) = self;
TMat2x4 { elems: [[a.x as f32,a.y as f32,a.z as f32,a.w as f32],
[e as f32,f as f32,g as f32,h as f32]] }
}
}
};
($a:ident,$b:ident,$c:ident,$d:ident,
$e:ident 4) => {
impl Mat2x4Args for ($a,$b,$c,$d,
$e) {
fn make(self) -> mat2x4 {
let (a,b,c,d,
e) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e.x as f32,e.y as f32,e.z as f32,e.w as f32]] }
}
}
impl Mat2x4Args for (($a,$b,$c,$d),
$e) {
fn make(self) -> mat2x4 {
let ((a,b,c,d),
e) = self;
TMat2x4 { elems: [[a as f32,b as f32,c as f32,d as f32],
[e.x as f32,e.y as f32,e.z as f32,e.w as f32]] }
}
}
};
($a:ident 4,
$e:ident 4) => {
impl Mat2x4Args for ($a,
$e) {
fn make(self) -> mat2x4 {
let (a,
e) = self;
TMat2x4 { elems: [[a.x as f32,a.y as f32,a.z as f32,a.w as f32],
[e.x as f32,e.y as f32,e.z as f32,e.w as f32]] }
}
}
};
}
macro_rules! impl_Mat2x4Args_for_choice {
( $a:ident, $b:ident, $c:ident, $d:ident,
$e:ident, $f:ident, $g:ident, $h:ident, $($ignore:ident),*) => {
impl_Mat2x4Args_for!($a, $b, $c, $d, $e, $f, $g, $h)
}
}
impl_Mat2x4Args_for!(f32 copy)
impl_Mat2x4Args_for!(int copy)
all_choices!(impl_Mat2x4Args_for_choice :
todo: { (int | f32) (int | f32) (int | f32) (int | f32)
(int | f32) (int | f32) (int | f32) (int | f32) }
done: { (ignored) } )
impl_Mat2x4Args_for!(vec4 4,vec4 4)
double_dispatch_T!{Mul for TMat2 mul via TMat2x2MulRHS rev_mul}
double_dispatch_T!{Div for TMat2 div via TMat2x2DivRHS rev_div}
double_dispatch_T!{Mul for TMat2x3 mul via TMat2x3MulRHS rev_mul}
double_dispatch_T!{Div for TMat2x3 div via TMat2x3DivRHS rev_div}
double_dispatch_T!{Mul for TMat2x4 mul via TMat2x4MulRHS rev_mul}
double_dispatch_T!{Div for TMat2x4 div via TMat2x4DivRHS rev_div}
double_dispatch_T!{Mul for TMat3 mul via TMat3x3MulRHS rev_mul}
double_dispatch_T!{Div for TMat3 div via TMat3x3DivRHS rev_div}
double_dispatch_T!{Mul for TMat3x2 mul via TMat3x2MulRHS rev_mul}
double_dispatch_T!{Div for TMat3x2 div via TMat3x2DivRHS rev_div}
double_dispatch_T!{Mul for TMat3x4 mul via TMat3x4MulRHS rev_mul}
double_dispatch_T!{Div for TMat3x4 div via TMat3x4DivRHS rev_div}
double_dispatch_T!{Mul for TMat4 mul via TMat4x4MulRHS rev_mul}
double_dispatch_T!{Div for TMat4 div via TMat4x4DivRHS rev_div}
double_dispatch_T!{Mul for TMat4x2 mul via TMat4x2MulRHS rev_mul}
double_dispatch_T!{Div for TMat4x2 div via TMat4x2DivRHS rev_div}
double_dispatch_T!{Mul for TMat4x3 mul via TMat4x3MulRHS rev_mul}
double_dispatch_T!{Div for TMat4x3 div via TMat4x3DivRHS rev_div}
macro_rules! impl_Scalar_MulDiv_for {
($TMat:ident $TMatMulRHS:ident $TMatDivRHS:ident) => {
impl<T:Num> $TMatMulRHS<T,$TMat<T>> for S<T> {
fn rev_mul(&self, lhs: &$TMat<T>) -> $TMat<T> {
let S(ref f) = *self;
lhs.map(|x|*x * *f)
}
}
impl<T:Num> $TMatDivRHS<T,$TMat<T>> for S<T> {
fn rev_div(&self, lhs: &$TMat<T>) -> $TMat<T> {
let S(ref f) = *self;
lhs.map(|x|*x / *f)
}
}
impl<T:Num> SMulRHS<T,$TMat<T>> for $TMat<T> {
fn rev_mul(&self, lhs: &S<T>) -> $TMat<T> {
let S(ref f) = *lhs;
self.map(|x|*f * *x)
}
}
impl<T:Num> SDivRHS<T,$TMat<T>> for $TMat<T> {
fn rev_div(&self, lhs: &S<T>) -> $TMat<T> {
let S(ref f) = *lhs;
self.map(|x|*f / *x)
}
}
}
}
impl_Scalar_MulDiv_for!(TMat2 TMat2x2MulRHS TMat2x2DivRHS)
impl_Scalar_MulDiv_for!(TMat2x3 TMat2x3MulRHS TMat2x3DivRHS)
impl_Scalar_MulDiv_for!(TMat2x4 TMat2x4MulRHS TMat2x4DivRHS)
impl<T:Num> TMat2x2MulRHS<T,TMat2<T>> for TMat2<T> {
fn rev_mul(&self, lhs: &TMat2<T>) -> TMat2<T> {
let l = &lhs.elems;
let r = &self.elems;
let c00 = l[0][0] * r[0][0] + l[0][1] * r[1][0];
let c01 = l[0][0] * r[0][1] + l[0][1] * r[1][1];
let c10 = l[1][0] * r[0][0] + l[1][1] * r[1][0];
let c11 = l[1][0] * r[0][1] + l[1][1] * r[1][1];
TMat2 { elems: [[c00, c01],
[c10, c11]] }
}
}
impl<T:Num> TMat2x3MulRHS<T,TMat2<T>> for TMat3x2<T> {
fn rev_mul(&self, lhs: &TMat2x3<T>) -> TMat2<T> {
let l = &lhs.elems;
let r = &self.elems;
let c00 = l[0][0] * r[0][0] + l[0][1] * r[1][0] + l[0][2] * r[2][0];
let c01 = l[0][0] * r[0][1] + l[0][1] * r[1][1] + l[0][2] * r[2][1];
let c10 = l[1][0] * r[0][0] + l[1][1] * r[1][0] + l[1][2] * r[2][0];
let c11 = l[1][0] * r[0][1] + l[1][1] * r[1][1] + l[1][2] * r[2][1];
TMat2 { elems: [[c00, c01],
[c10, c11]] }
}
}
impl<T:Num> TMat2x3MulRHS<T,TVec3<T>> for TVec2<T> {
fn rev_mul(&self, lhs: &TMat2x3<T>) -> TVec3<T> {
TVec3{ x: lhs.elems[0][0] * self.x + lhs.elems[1][0] * self.y,
y: lhs.elems[0][1] * self.x + lhs.elems[1][1] * self.y,
z: lhs.elems[0][2] * self.x + lhs.elems[1][2] * self.y, }
}
}
impl<T:Num> TMat2x2MulRHS<T,TVec2<T>> for TVec2<T> {
fn rev_mul(&self, lhs: &TMat2<T>) -> TVec2<T> {
TVec2{ x: lhs.elems[0][0] * self.x + lhs.elems[1][0] * self.y,
y: lhs.elems[0][1] * self.x + lhs.elems[1][1] * self.y, }
}
}
impl<T:Num> TVec2MulRHS<T,TVec2<T>> for TMat2<T> {
fn rev_mul(&self, lhs: &TVec2<T>) -> TVec2<T> {
TVec2{ x: lhs.x * self.elems[0][0] + lhs.y * self.elems[0][1],
y: lhs.x * self.elems[1][0] + lhs.y * self.elems[1][1], }
}
}
impl<T:Num> TVec3MulRHS<T,TVec2<T>> for TMat2x3<T> {
fn rev_mul(&self, lhs: &TVec3<T>) -> TVec2<T> {
TVec2{ x: lhs.x * self.elems[0][0] + lhs.y * self.elems[0][1] + lhs.z * self.elems[0][2],
y: lhs.x * self.elems[1][0] + lhs.y * self.elems[1][1] + lhs.z * self.elems[1][2], }
}
}
impl<T:Num> TVec3MulRHS<T,TVec2<T>> for TMat3x2<T> {
fn rev_mul(&self, lhs: &TVec3<T>) -> TVec2<T> {
TVec2{ x: lhs.x * self.elems[0][0] + lhs.y * self.elems[1][0] + lhs.z * self.elems[2][0],
y: lhs.x * self.elems[0][1] + lhs.y * self.elems[1][1] + lhs.z * self.elems[2][1], }
}
}
impl<T:Num> TMat2x4MulRHS<T,TVec4<T>> for TVec2<T> {
fn rev_mul(&self, lhs: &TMat2x4<T>) -> TVec4<T> {
TVec4 { x: lhs.elems[0][0] * self.x + lhs.elems[1][0] * self.y,
y: lhs.elems[0][1] * self.x + lhs.elems[1][1] * self.y,
z: lhs.elems[0][2] * self.x + lhs.elems[1][2] * self.y,
w: lhs.elems[0][3] * self.x + lhs.elems[1][3] * self.y, }
}
}
impl<T:Num> TVec4MulRHS<T,TVec2<T>> for TMat2x4<T> {
fn rev_mul(&self, lhs: &TVec4<T>) -> TVec2<T> {
TVec2 { x: (lhs.x * self.elems[0][0] + lhs.y * self.elems[0][1] +
lhs.z * self.elems[0][2] + lhs.w * self.elems[0][3]),
y: (lhs.x * self.elems[1][0] + lhs.y * self.elems[1][1] +
lhs.z * self.elems[1][2] + lhs.w * self.elems[1][3]),
}
}
}
#[cfg(test)]
mod mat2x2_tests {
#![allow(uppercase_variables)]
use super::{mat2x2,mat2,mat2x3};
use super::{inverse};
use super::{Rows,Columns};
use src::operators::{EpsilonEq};
use src::typedefs::{vec2};
use src::vector::{vec2,vec3};
use src::scalar::S;
#[test]
fn basics() {
let m123_456 = mat2x3(((1,2,3),
(4,5,6)));
assert_eq!(m123_456.row(0), vec2((1,4)));
assert_eq!(m123_456.row(1), vec2((2,5)));
assert_eq!(m123_456.row(2), vec2((3,6)));
assert_eq!(m123_456.col(0), vec3((1,2,3)));
assert_eq!(m123_456.col(1), vec3((4,5,6)));
}
#[test]
fn test_operators() {
let l = mat2x2(1.0f32);
let m = mat2x2(1.0f32);
let u = vec2(1.0f32);
let v = vec2(1.0f32);
let x = S(1.0f32);
let a : vec2 = m * u;
let b : vec2 = v * m;
let n : mat2x2 = x / m;
let o : mat2x2 = m / x;
let p : mat2x2 = x * m;
let q : mat2x2 = m * x;
let _ = (a,b,n,o,p);
assert_eq!(m, q);
assert_eq!(m, l);
}
#[test]
fn test_inverse() {
let Matrix = mat2((1, 2, 3, 4));
let Inverse = inverse(&Matrix);
let Identity = Matrix * Inverse;
assert!(Identity.row(0).epsilon_eq(&vec2((1f32, 0f32)), &vec2(0.01f32)));
assert!(Identity.row(1).epsilon_eq(&vec2((0f32, 1f32)), &vec2(0.01f32)));
}
#[test]
fn test_ctr() {
let m0 = mat2x2((vec2((0,1)),
vec2((2, 3))));
let m1 = mat2x2((0, 1,
2, 3));
let m2 = mat2x2(((0, 1),
(2, 3)));
assert_eq!(m0, m2);
assert_eq!(m1, m2);
}
}
#[cfg(test)]
mod mat2x3_tests {
use super::{mat2x3};
use src::scalar::{S};
use src::typedefs::{vec2,vec3};
use src::vector::{vec2,vec3};
#[test]
fn test_operators() {
let l = mat2x3(1.0f32);
let m = mat2x3(1.0f32);
let u = vec2(1.0f32);
let v = vec3(1.0f32);
let x = S(1.0f32);
let a : vec3 = m * u;
let b : vec2 = v * m;
let n : mat2x3 = x / m;
let o : mat2x3 = m / x;
let p : mat2x3 = x * m;
let q : mat2x3 = m * x;
let _ = (a,b,n,o,p);
assert_eq!(m, q);
assert_eq!(m, l);
}
#[test]
fn test_ctr() {
let m0 = mat2x3((vec3((0, 1, 2)),
vec3((3, 4, 5))));
let m1 = mat2x3((0, 1, 2,
3, 4, 5));
let m2 = mat2x3(((0, 1, 2),
(3, 4, 5)));
assert_eq!(m0, m2);
assert_eq!(m1, m2);
}
}
#[cfg(test)]
mod mat2x4_tests {
use super::{mat2x4};
use src::scalar::S;
use src::typedefs::{vec2,vec4};
use src::vector::{vec2,vec4};
#[test]
fn test_operators() {
let l = mat2x4(1.0f32);
let m = mat2x4(1.0f32);
let u = vec2(1.0f32);
let v = vec4(1.0f32);
let x = S(1.0f32);
let a : vec4 = m * u;
let b : vec2 = v * m;
let n : mat2x4 = x / m;
let o : mat2x4 = m / x;
let p : mat2x4 = x * m;
let q : mat2x4 = m * x;
let _ = (a,b,n,o,p);
assert_eq!(m, q);
assert_eq!(m, l);
}
#[test]
fn test_ctr() {
let m0 = mat2x4((vec4((0, 1, 2, 3)),
vec4((4, 5, 6, 7))));
let m1 = mat2x4((0, 1, 2, 3,
4, 5, 6, 7));
let m2 = mat2x4(((0, 1, 2, 3),
(4, 5, 6, 7)));
assert_eq!(m0, m2);
assert_eq!(m1, m2);
}
}
|
use byteorder::{ByteOrder, NativeEndian, ReadBytesExt, WriteBytesExt};
use std::collections::{btree_map, BTreeMap, HashMap};
use std::collections::Bound::{Included, Excluded};
use std::mem;
use std::ptr;
use error::{EvalError, EvalResult};
use primval::PrimVal;
pub struct Memory {
alloc_map: HashMap<u64, Allocation>,
next_id: u64,
pub pointer_size: usize,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct AllocId(u64);
#[derive(Debug)]
pub struct Allocation {
pub bytes: Box<[u8]>,
pub relocations: BTreeMap<usize, AllocId>,
// TODO(tsion): undef mask
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Pointer {
pub alloc_id: AllocId,
pub offset: usize,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct FieldRepr {
pub offset: usize,
pub size: usize,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Repr {
/// Representation for a non-aggregate type such as a boolean, integer, character or pointer.
Primitive {
size: usize
},
/// The representation for aggregate types including structs, enums, and tuples.
Aggregate {
/// The size of the discriminant (an integer). Should be between 0 and 8. Always 0 for
/// structs and tuples.
discr_size: usize,
/// The size of the entire aggregate, including the discriminant.
size: usize,
/// The representations of the contents of each variant.
variants: Vec<Vec<FieldRepr>>,
},
Array {
elem_size: usize,
/// Number of elements.
length: usize,
},
}
impl Memory {
pub fn new() -> Self {
Memory {
alloc_map: HashMap::new(),
next_id: 0,
// TODO(tsion): Should this be host's or target's usize?
pointer_size: mem::size_of::<usize>(),
}
}
pub fn allocate(&mut self, size: usize) -> Pointer {
let id = AllocId(self.next_id);
let alloc = Allocation {
bytes: vec![0; size].into_boxed_slice(),
relocations: BTreeMap::new(),
};
self.alloc_map.insert(self.next_id, alloc);
self.next_id += 1;
Pointer {
alloc_id: id,
offset: 0,
}
}
pub fn get(&self, id: AllocId) -> EvalResult<&Allocation> {
self.alloc_map.get(&id.0).ok_or(EvalError::DanglingPointerDeref)
}
pub fn get_mut(&mut self, id: AllocId) -> EvalResult<&mut Allocation> {
self.alloc_map.get_mut(&id.0).ok_or(EvalError::DanglingPointerDeref)
}
fn get_bytes_unchecked(&self, ptr: Pointer, size: usize) -> EvalResult<&[u8]> {
let alloc = try!(self.get(ptr.alloc_id));
if ptr.offset + size > alloc.bytes.len() {
return Err(EvalError::PointerOutOfBounds);
}
Ok(&alloc.bytes[ptr.offset..ptr.offset + size])
}
fn get_bytes_unchecked_mut(&mut self, ptr: Pointer, size: usize) -> EvalResult<&mut [u8]> {
let alloc = try!(self.get_mut(ptr.alloc_id));
if ptr.offset + size > alloc.bytes.len() {
return Err(EvalError::PointerOutOfBounds);
}
Ok(&mut alloc.bytes[ptr.offset..ptr.offset + size])
}
fn get_bytes(&self, ptr: Pointer, size: usize) -> EvalResult<&[u8]> {
if try!(self.relocations(ptr, size)).count() != 0 {
return Err(EvalError::ReadPointerAsBytes);
}
// TODO(tsion): Track and check for undef bytes.
self.get_bytes_unchecked(ptr, size)
}
fn get_bytes_mut(&mut self, ptr: Pointer, size: usize) -> EvalResult<&mut [u8]> {
try!(self.clear_relocations(ptr, size));
self.get_bytes_unchecked_mut(ptr, size)
}
fn relocations(&self, ptr: Pointer, size: usize)
-> EvalResult<btree_map::Range<usize, AllocId>>
{
let start = ptr.offset.saturating_sub(self.pointer_size - 1);
let end = start + size;
Ok(try!(self.get(ptr.alloc_id)).relocations.range(Included(&start), Excluded(&end)))
}
fn clear_relocations(&mut self, ptr: Pointer, size: usize) -> EvalResult<()> {
let keys: Vec<_> = try!(self.relocations(ptr, size)).map(|(&k, _)| k).collect();
let alloc = try!(self.get_mut(ptr.alloc_id));
for k in keys {
alloc.relocations.remove(&k);
}
Ok(())
}
fn check_relocation_edges(&self, ptr: Pointer, size: usize) -> EvalResult<()> {
let overlapping_start = try!(self.relocations(ptr, 0)).count();
let overlapping_end = try!(self.relocations(ptr.offset(size as isize), 0)).count();
if overlapping_start + overlapping_end != 0 {
return Err(EvalError::ReadPointerAsBytes);
}
Ok(())
}
fn copy_relocations(&mut self, src: Pointer, dest: Pointer, size: usize) -> EvalResult<()> {
let relocations: Vec<_> = try!(self.relocations(src, size))
.map(|(&offset, &alloc_id)| {
// Update relocation offsets for the new positions in the destination allocation.
(offset + dest.offset - src.offset, alloc_id)
})
.collect();
try!(self.get_mut(dest.alloc_id)).relocations.extend(relocations);
Ok(())
}
pub fn copy(&mut self, src: Pointer, dest: Pointer, size: usize) -> EvalResult<()> {
// TODO(tsion): Track and check for undef bytes.
try!(self.check_relocation_edges(src, size));
let src_bytes = try!(self.get_bytes_unchecked_mut(src, size)).as_mut_ptr();
let dest_bytes = try!(self.get_bytes_mut(dest, size)).as_mut_ptr();
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
// `dest` could possibly overlap.
unsafe {
if src.alloc_id == dest.alloc_id {
ptr::copy(src_bytes, dest_bytes, size);
} else {
ptr::copy_nonoverlapping(src_bytes, dest_bytes, size);
}
}
self.copy_relocations(src, dest, size)
}
pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<()> {
self.get_bytes_mut(ptr, src.len()).map(|dest| dest.clone_from_slice(src))
}
pub fn read_ptr(&self, ptr: Pointer) -> EvalResult<Pointer> {
let size = self.pointer_size;
let offset = try!(self.get_bytes_unchecked(ptr, size))
.read_uint::<NativeEndian>(size).unwrap() as usize;
let alloc = try!(self.get(ptr.alloc_id));
match alloc.relocations.get(&ptr.offset) {
Some(&alloc_id) => Ok(Pointer { alloc_id: alloc_id, offset: offset }),
None => Err(EvalError::ReadBytesAsPointer),
}
}
pub fn write_ptr(&mut self, dest: Pointer, ptr: Pointer) -> EvalResult<()> {
{
let size = self.pointer_size;
let mut bytes = try!(self.get_bytes_mut(dest, size));
bytes.write_uint::<NativeEndian>(ptr.offset as u64, size).unwrap();
}
try!(self.get_mut(dest.alloc_id)).relocations.insert(dest.offset, ptr.alloc_id);
Ok(())
}
pub fn write_primval(&mut self, ptr: Pointer, val: PrimVal) -> EvalResult<()> {
let pointer_size = self.pointer_size;
match val {
PrimVal::Bool(b) => self.write_bool(ptr, b),
PrimVal::I8(n) => self.write_int(ptr, n as i64, 1),
PrimVal::I16(n) => self.write_int(ptr, n as i64, 2),
PrimVal::I32(n) => self.write_int(ptr, n as i64, 4),
PrimVal::I64(n) => self.write_int(ptr, n as i64, 8),
PrimVal::U8(n) => self.write_uint(ptr, n as u64, 1),
PrimVal::U16(n) => self.write_uint(ptr, n as u64, 2),
PrimVal::U32(n) => self.write_uint(ptr, n as u64, 4),
PrimVal::U64(n) => self.write_uint(ptr, n as u64, 8),
PrimVal::IntegerPtr(n) => self.write_uint(ptr, n as u64, pointer_size),
PrimVal::AbstractPtr(_p) => unimplemented!(),
}
}
pub fn read_bool(&self, ptr: Pointer) -> EvalResult<bool> {
let bytes = try!(self.get_bytes(ptr, 1));
match bytes[0] {
0 => Ok(false),
1 => Ok(true),
_ => Err(EvalError::InvalidBool),
}
}
pub fn write_bool(&mut self, ptr: Pointer, b: bool) -> EvalResult<()> {
self.get_bytes_mut(ptr, 1).map(|bytes| bytes[0] = b as u8)
}
pub fn read_int(&self, ptr: Pointer, size: usize) -> EvalResult<i64> {
self.get_bytes(ptr, size).map(|mut b| b.read_int::<NativeEndian>(size).unwrap())
}
pub fn write_int(&mut self, ptr: Pointer, n: i64, size: usize) -> EvalResult<()> {
self.get_bytes_mut(ptr, size).map(|mut b| b.write_int::<NativeEndian>(n, size).unwrap())
}
pub fn read_uint(&self, ptr: Pointer, size: usize) -> EvalResult<u64> {
self.get_bytes(ptr, size).map(|mut b| b.read_uint::<NativeEndian>(size).unwrap())
}
pub fn write_uint(&mut self, ptr: Pointer, n: u64, size: usize) -> EvalResult<()> {
self.get_bytes_mut(ptr, size).map(|mut b| b.write_uint::<NativeEndian>(n, size).unwrap())
}
pub fn read_isize(&self, ptr: Pointer) -> EvalResult<i64> {
self.read_int(ptr, self.pointer_size)
}
pub fn write_isize(&mut self, ptr: Pointer, n: i64) -> EvalResult<()> {
let size = self.pointer_size;
self.write_int(ptr, n, size)
}
pub fn read_usize(&self, ptr: Pointer) -> EvalResult<u64> {
self.read_uint(ptr, self.pointer_size)
}
pub fn write_usize(&mut self, ptr: Pointer, n: u64) -> EvalResult<()> {
let size = self.pointer_size;
self.write_uint(ptr, n, size)
}
}
impl Pointer {
pub fn offset(self, i: isize) -> Self {
Pointer { offset: (self.offset as isize + i) as usize, ..self }
}
}
impl Repr {
pub fn size(&self) -> usize {
match *self {
Repr::Primitive { size } => size,
Repr::Aggregate { size, .. } => size,
Repr::Array { elem_size, length } => elem_size * length,
}
}
}
Reorganize memory methods.
use byteorder::{ByteOrder, NativeEndian, ReadBytesExt, WriteBytesExt};
use std::collections::{btree_map, BTreeMap, HashMap};
use std::collections::Bound::{Included, Excluded};
use std::mem;
use std::ptr;
use error::{EvalError, EvalResult};
use primval::PrimVal;
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct AllocId(u64);
#[derive(Debug)]
pub struct Allocation {
pub bytes: Box<[u8]>,
pub relocations: BTreeMap<usize, AllocId>,
// TODO(tsion): undef mask
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Pointer {
pub alloc_id: AllocId,
pub offset: usize,
}
impl Pointer {
pub fn offset(self, i: isize) -> Self {
Pointer { offset: (self.offset as isize + i) as usize, ..self }
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct FieldRepr {
pub offset: usize,
pub size: usize,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Repr {
/// Representation for a non-aggregate type such as a boolean, integer, character or pointer.
Primitive {
size: usize
},
/// The representation for aggregate types including structs, enums, and tuples.
Aggregate {
/// The size of the discriminant (an integer). Should be between 0 and 8. Always 0 for
/// structs and tuples.
discr_size: usize,
/// The size of the entire aggregate, including the discriminant.
size: usize,
/// The representations of the contents of each variant.
variants: Vec<Vec<FieldRepr>>,
},
Array {
elem_size: usize,
/// Number of elements.
length: usize,
},
}
impl Repr {
pub fn size(&self) -> usize {
match *self {
Repr::Primitive { size } => size,
Repr::Aggregate { size, .. } => size,
Repr::Array { elem_size, length } => elem_size * length,
}
}
}
pub struct Memory {
alloc_map: HashMap<u64, Allocation>,
next_id: u64,
pub pointer_size: usize,
}
impl Memory {
pub fn new() -> Self {
Memory {
alloc_map: HashMap::new(),
next_id: 0,
// TODO(tsion): Should this be host's or target's usize?
pointer_size: mem::size_of::<usize>(),
}
}
pub fn allocate(&mut self, size: usize) -> Pointer {
let id = AllocId(self.next_id);
let alloc = Allocation {
bytes: vec![0; size].into_boxed_slice(),
relocations: BTreeMap::new(),
};
self.alloc_map.insert(self.next_id, alloc);
self.next_id += 1;
Pointer {
alloc_id: id,
offset: 0,
}
}
////////////////////////////////////////////////////////////////////////////////
// Allocation accessors
////////////////////////////////////////////////////////////////////////////////
pub fn get(&self, id: AllocId) -> EvalResult<&Allocation> {
self.alloc_map.get(&id.0).ok_or(EvalError::DanglingPointerDeref)
}
pub fn get_mut(&mut self, id: AllocId) -> EvalResult<&mut Allocation> {
self.alloc_map.get_mut(&id.0).ok_or(EvalError::DanglingPointerDeref)
}
////////////////////////////////////////////////////////////////////////////////
// Byte accessors
////////////////////////////////////////////////////////////////////////////////
fn get_bytes_unchecked(&self, ptr: Pointer, size: usize) -> EvalResult<&[u8]> {
let alloc = try!(self.get(ptr.alloc_id));
if ptr.offset + size > alloc.bytes.len() {
return Err(EvalError::PointerOutOfBounds);
}
Ok(&alloc.bytes[ptr.offset..ptr.offset + size])
}
fn get_bytes_unchecked_mut(&mut self, ptr: Pointer, size: usize) -> EvalResult<&mut [u8]> {
let alloc = try!(self.get_mut(ptr.alloc_id));
if ptr.offset + size > alloc.bytes.len() {
return Err(EvalError::PointerOutOfBounds);
}
Ok(&mut alloc.bytes[ptr.offset..ptr.offset + size])
}
fn get_bytes(&self, ptr: Pointer, size: usize) -> EvalResult<&[u8]> {
if try!(self.relocations(ptr, size)).count() != 0 {
return Err(EvalError::ReadPointerAsBytes);
}
// TODO(tsion): Track and check for undef bytes.
self.get_bytes_unchecked(ptr, size)
}
fn get_bytes_mut(&mut self, ptr: Pointer, size: usize) -> EvalResult<&mut [u8]> {
try!(self.clear_relocations(ptr, size));
self.get_bytes_unchecked_mut(ptr, size)
}
////////////////////////////////////////////////////////////////////////////////
// Reading and writing
////////////////////////////////////////////////////////////////////////////////
pub fn copy(&mut self, src: Pointer, dest: Pointer, size: usize) -> EvalResult<()> {
// TODO(tsion): Track and check for undef bytes.
try!(self.check_relocation_edges(src, size));
let src_bytes = try!(self.get_bytes_unchecked_mut(src, size)).as_mut_ptr();
let dest_bytes = try!(self.get_bytes_mut(dest, size)).as_mut_ptr();
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
// `dest` could possibly overlap.
unsafe {
if src.alloc_id == dest.alloc_id {
ptr::copy(src_bytes, dest_bytes, size);
} else {
ptr::copy_nonoverlapping(src_bytes, dest_bytes, size);
}
}
self.copy_relocations(src, dest, size)
}
pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<()> {
self.get_bytes_mut(ptr, src.len()).map(|dest| dest.clone_from_slice(src))
}
pub fn read_ptr(&self, ptr: Pointer) -> EvalResult<Pointer> {
let size = self.pointer_size;
let offset = try!(self.get_bytes_unchecked(ptr, size))
.read_uint::<NativeEndian>(size).unwrap() as usize;
let alloc = try!(self.get(ptr.alloc_id));
match alloc.relocations.get(&ptr.offset) {
Some(&alloc_id) => Ok(Pointer { alloc_id: alloc_id, offset: offset }),
None => Err(EvalError::ReadBytesAsPointer),
}
}
pub fn write_ptr(&mut self, dest: Pointer, ptr: Pointer) -> EvalResult<()> {
{
let size = self.pointer_size;
let mut bytes = try!(self.get_bytes_mut(dest, size));
bytes.write_uint::<NativeEndian>(ptr.offset as u64, size).unwrap();
}
try!(self.get_mut(dest.alloc_id)).relocations.insert(dest.offset, ptr.alloc_id);
Ok(())
}
pub fn write_primval(&mut self, ptr: Pointer, val: PrimVal) -> EvalResult<()> {
let pointer_size = self.pointer_size;
match val {
PrimVal::Bool(b) => self.write_bool(ptr, b),
PrimVal::I8(n) => self.write_int(ptr, n as i64, 1),
PrimVal::I16(n) => self.write_int(ptr, n as i64, 2),
PrimVal::I32(n) => self.write_int(ptr, n as i64, 4),
PrimVal::I64(n) => self.write_int(ptr, n as i64, 8),
PrimVal::U8(n) => self.write_uint(ptr, n as u64, 1),
PrimVal::U16(n) => self.write_uint(ptr, n as u64, 2),
PrimVal::U32(n) => self.write_uint(ptr, n as u64, 4),
PrimVal::U64(n) => self.write_uint(ptr, n as u64, 8),
PrimVal::IntegerPtr(n) => self.write_uint(ptr, n as u64, pointer_size),
PrimVal::AbstractPtr(_p) => unimplemented!(),
}
}
pub fn read_bool(&self, ptr: Pointer) -> EvalResult<bool> {
let bytes = try!(self.get_bytes(ptr, 1));
match bytes[0] {
0 => Ok(false),
1 => Ok(true),
_ => Err(EvalError::InvalidBool),
}
}
pub fn write_bool(&mut self, ptr: Pointer, b: bool) -> EvalResult<()> {
self.get_bytes_mut(ptr, 1).map(|bytes| bytes[0] = b as u8)
}
pub fn read_int(&self, ptr: Pointer, size: usize) -> EvalResult<i64> {
self.get_bytes(ptr, size).map(|mut b| b.read_int::<NativeEndian>(size).unwrap())
}
pub fn write_int(&mut self, ptr: Pointer, n: i64, size: usize) -> EvalResult<()> {
self.get_bytes_mut(ptr, size).map(|mut b| b.write_int::<NativeEndian>(n, size).unwrap())
}
pub fn read_uint(&self, ptr: Pointer, size: usize) -> EvalResult<u64> {
self.get_bytes(ptr, size).map(|mut b| b.read_uint::<NativeEndian>(size).unwrap())
}
pub fn write_uint(&mut self, ptr: Pointer, n: u64, size: usize) -> EvalResult<()> {
self.get_bytes_mut(ptr, size).map(|mut b| b.write_uint::<NativeEndian>(n, size).unwrap())
}
pub fn read_isize(&self, ptr: Pointer) -> EvalResult<i64> {
self.read_int(ptr, self.pointer_size)
}
pub fn write_isize(&mut self, ptr: Pointer, n: i64) -> EvalResult<()> {
let size = self.pointer_size;
self.write_int(ptr, n, size)
}
pub fn read_usize(&self, ptr: Pointer) -> EvalResult<u64> {
self.read_uint(ptr, self.pointer_size)
}
pub fn write_usize(&mut self, ptr: Pointer, n: u64) -> EvalResult<()> {
let size = self.pointer_size;
self.write_uint(ptr, n, size)
}
////////////////////////////////////////////////////////////////////////////////
// Relocations
////////////////////////////////////////////////////////////////////////////////
fn relocations(&self, ptr: Pointer, size: usize)
-> EvalResult<btree_map::Range<usize, AllocId>>
{
let start = ptr.offset.saturating_sub(self.pointer_size - 1);
let end = start + size;
Ok(try!(self.get(ptr.alloc_id)).relocations.range(Included(&start), Excluded(&end)))
}
fn clear_relocations(&mut self, ptr: Pointer, size: usize) -> EvalResult<()> {
let keys: Vec<_> = try!(self.relocations(ptr, size)).map(|(&k, _)| k).collect();
let alloc = try!(self.get_mut(ptr.alloc_id));
for k in keys {
alloc.relocations.remove(&k);
}
Ok(())
}
fn check_relocation_edges(&self, ptr: Pointer, size: usize) -> EvalResult<()> {
let overlapping_start = try!(self.relocations(ptr, 0)).count();
let overlapping_end = try!(self.relocations(ptr.offset(size as isize), 0)).count();
if overlapping_start + overlapping_end != 0 {
return Err(EvalError::ReadPointerAsBytes);
}
Ok(())
}
fn copy_relocations(&mut self, src: Pointer, dest: Pointer, size: usize) -> EvalResult<()> {
let relocations: Vec<_> = try!(self.relocations(src, size))
.map(|(&offset, &alloc_id)| {
// Update relocation offsets for the new positions in the destination allocation.
(offset + dest.offset - src.offset, alloc_id)
})
.collect();
try!(self.get_mut(dest.alloc_id)).relocations.extend(relocations);
Ok(())
}
}
|
#![allow(dead_code)]
use errors::*;
use libc::{c_int, c_uint};
use libmodbus_sys as ffi;
use std::io::Error;
/// Modbus protocol exceptions
///
/// Documentation source: https://en.wikipedia.org/wiki/Modbus#Main_Modbus_exception_codes
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Exception {
/// (1) Illegal Function - Function code received in the query is not recognized or allowed by slave
IllegalFunction = 1,
/// (2) Illegal Data Address - Data address of some or all the required entities are not allowed or do not exist in
/// slave
IllegalDataAddress = 2,
/// (3) Illegal Data Value - Value is not accepted by slave
IllegalDataValue = 3,
/// (4) Slave Device Failure - Unrecoverable error occurred while slave was attempting to perform requested action
SlaveOrServerFailure = 4,
/// (5) Acknowledge - Slave has accepted request and is processing it, but a long duration of time is required.
/// This response is returned to prevent a timeout error from occurring in the master. Master can next issue a Poll
/// Program Complete message to determine whether processing is completed
Acknowledge = 5,
/// (6) Slave Device Busy - Slave is engaged in processing a long-duration command. Master should retry later
SlaveDeviceBusy = 6,
/// (7) Negative Acknowledge - Slave cannot perform the programming functions. Master should request diagnostic or
/// error information from slave
NegativeAcknowledge = 7,
/// (8) Memory Parity Error - Slave detected a parity error in memory. Master can retry the request, but service
/// may be required on the slave device
MemoryParity = 8,
/// (9) Not defined
NotDefined = 9,
/// (10) Gateway Path Unavailable - Specialized for Modbus gateways. Indicates a misconfigured gateway
GatewayPath = 10,
/// (11) Gateway Target Device Failed to Respond - Specialized for Modbus gateways. Sent when slave fails to respond
GatewayTarget = 11,
}
/// Modbus function codes
///
/// Documentation source: https://en.wikipedia.org/wiki/Modbus#Supported_function_codes
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum FunctionCode {
/// 0x01 Read Coils
ReadCoils = 1,
/// 0x02 Read Discrete Inputs
ReadDiscreteInputs = 2,
/// 0x03 Read Multiple Holding Registers
ReadHoldingRegisters = 3,
/// 0x04 Read Input Registers
ReadInputRegisters = 4,
/// 0x05 Write Single Coil
WriteSingleCoil = 5,
/// 0x06 Write Single Holding Register
WriteSingleRegister = 6,
/// 0x07 Read Exception Status
ReadExceptionStatus = 7,
/// 0x15 Write Multiple Coils
WriteMultipleCoils = 15,
/// 0x16 Write Multiple Holding Registers
WriteMultipleRegisters = 16,
/// 0x17 Report Slave ID
ReportSlaveId = 17,
/// 0x22 Mask Write Register
MaskWriteRegister = 22,
/// 0x23 Read/Write Multiple Registers
WriteAndReadRegisters = 23,
}
#[derive(Debug, Copy, Clone)]
pub enum ErrorRecoveryMode {
Link,
Protocol,
}
impl ErrorRecoveryMode {
fn as_raw(&self) -> ffi::modbus_error_recovery_mode {
use ErrorRecoveryMode::*;
match *self {
Link => ffi::modbus_error_recovery_mode_MODBUS_ERROR_RECOVERY_LINK,
Protocol => ffi::modbus_error_recovery_mode_MODBUS_ERROR_RECOVERY_PROTOCOL,
}
}
}
/// Timeout struct
///
/// * The value of **usec** argument must be in the range 0 to 999999.
// For use with timeout methods such as get_byte_timeout and set_byte_timeout
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Timeout {
pub sec: u32,
pub usec: u32,
}
/// Safe interface for [libmodbus](http://libmodbus.org)
///
/// The different parts of libmodbus are implemented as traits. The modules of this crate contains these
/// traits and a implementation with a, hopefully safe, interface.
///
pub struct Modbus {
pub ctx: *mut ffi::modbus_t,
}
impl Modbus {
// Constants
pub const ENOBASE: u32 = ffi::MODBUS_ENOBASE;
pub const MAX_ADU_LENGTH: usize = ffi::MODBUS_MAX_ADU_LENGTH as usize;
pub const MAX_PDU_LENGTH: usize = ffi::MODBUS_MAX_PDU_LENGTH as usize;
pub const MAX_READ_BITS: usize = ffi::MODBUS_MAX_READ_BITS as usize;
pub const MAX_READ_REGISTERS: usize = ffi::MODBUS_MAX_READ_REGISTERS as usize;
pub const MAX_WR_READ_REGISTERS: usize = ffi::MODBUS_MAX_WR_READ_REGISTERS as usize;
pub const MAX_WR_WRITE_REGISTERS: usize = ffi::MODBUS_MAX_WR_WRITE_REGISTERS as usize;
pub const MAX_WRITE_BITS: usize = ffi::MODBUS_MAX_WRITE_BITS as usize;
pub const MAX_WRITE_REGISTERS: usize = ffi::MODBUS_MAX_WRITE_REGISTERS as usize;
pub const RTU_MAX_ADU_LENGTH: usize = ffi::MODBUS_RTU_MAX_ADU_LENGTH as usize;
pub const TCP_DEFAULT_PORT: u32 = ffi::MODBUS_TCP_DEFAULT_PORT;
pub const TCP_MAX_ADU_LENGTH: u32 = ffi::MODBUS_TCP_MAX_ADU_LENGTH;
pub const TCP_SLAVE: u32 = ffi::MODBUS_TCP_SLAVE;
/// `connect` - establish a Modbus connection
///
/// The [`connect()`](#method.connect) function shall establish a connection to a Modbus server,
/// a network or a bus.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP};
///
/// // create server
/// let mut server = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// // create client
/// let client = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// // start server in listen mode
/// let _ = server.tcp_listen(1).unwrap();
///
/// assert!(client.connect().is_ok())
/// ```
pub fn connect(&self) -> Result<()> {
unsafe {
match ffi::modbus_connect(self.ctx) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `flush` - flush non-transmitted data
///
/// The [`flush()`](#method.flush) function shall discard data received but not read to the socket or file
/// descriptor associated to the context ctx.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Examples
///
/// ```
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert!(modbus.flush().is_ok());
/// ```
pub fn flush(&self) -> Result<()> {
unsafe {
match ffi::modbus_flush(self.ctx) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `set_slave` - set slave number in the context
///
/// The [`set_slave()`](#method.set_slave) function shall set the slave number in the libmodbus context.
/// The behavior depends of network and the role of the device:
///
/// RTU
/// Define the slave ID of the remote device to talk in master mode or set the internal slave ID in slave mode.
/// According to the protocol, a Modbus device must only accept message holding its slave number or the special
/// broadcast number.
/// TCP
/// The slave number is only required in TCP if the message must reach a device on a serial network.
/// Some not compliant devices or software (such as modpoll) uses the slave ID as unit identifier,
/// that’s incorrect (cf page 23 of Modbus Messaging Implementation Guide v1.0b) but without the slave value,
/// the faulty remote device or software drops the requests!
/// The special value MODBUS_TCP_SLAVE (0xFF) can be used in TCP mode to restore the default value.
/// The broadcast address is MODBUS_BROADCAST_ADDRESS.
/// This special value must be use when you want all Modbus devices of the network receive the request.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Parameters
///
/// * `slave` - new slave ID
///
/// # Examples
///
/// ```
/// use libmodbus_rs::{Modbus, ModbusRTU};
///
/// const YOUR_DEVICE_ID: u8 = 1;
/// let mut modbus = Modbus::new_rtu("/dev/ttyUSB0", 115200, 'N', 8, 1).unwrap();
///
/// assert!(modbus.set_slave(YOUR_DEVICE_ID).is_ok());
/// ```
pub fn set_slave(&mut self, slave: u8) -> Result<()> {
unsafe {
match ffi::modbus_set_slave(self.ctx, slave as c_int) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `set_debug` - set debug flag of the context
///
/// The [`set_debug()`](#method.set_debug) function shall set the debug flag of the modbus_t context by using the
/// argument flag.
/// By default, the boolean flag is set to FALSE. When the flag value is set to TRUE, many verbose messages are
/// displayed on stdout and stderr.
/// For example, this flag is useful to display the bytes of the Modbus messages.
///
/// ```bash
/// [00][14][00][00][00][06][12][03][00][6B][00][03]
/// Waiting for a confirmation…
/// <00><14><00><00><00><09><12><03><06><02><2B><00><00><00><00>
/// ```
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Parameters
///
/// * `flag` - `true` of `false`, enables or disables debug mode
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP};
///
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert!(modbus.set_debug(true).is_ok());
/// ```
pub fn set_debug(&mut self, flag: bool) -> Result<()> {
unsafe {
match ffi::modbus_set_debug(self.ctx, flag as c_int) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `get_byte_timeout` - get timeout between bytes
///
/// [`get_byte_timeout()`](#method.get_byte_timeout) function returns a
/// [`Timeout`](struct.Timeout.html) with the timeout interval between
/// two consecutive bytes of the same message.
///
/// # Return value
///
/// The function return a Result containing a [`Timeout`](struct.Timeout.html) if successful.
/// Otherwise it contains an Error.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP, Timeout};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert_eq!(modbus.get_byte_timeout().unwrap(), Timeout { sec: 0, usec: 500000 });
/// ```
pub fn get_byte_timeout(&self) -> Result<Timeout> {
let mut timeout = Timeout { sec: 0, usec: 0 };
unsafe {
match ffi::modbus_get_byte_timeout(self.ctx, &mut timeout.sec, &mut timeout.usec) {
-1 => bail!(Error::last_os_error()),
0 => Ok(timeout),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `set_byte_timeout` - set timeout between bytes
///
/// The [`set_byte_timeout()`](#method.set_byte_timeout) function shall set the timeout interval between two
/// consecutive bytes of the same message.
/// The timeout is an upper bound on the amount of time elapsed before select() returns, if the time elapsed is
/// longer than the defined timeout,
/// an ETIMEDOUT error will be raised by the function waiting for a response.
///
/// The value of **usec** argument must be in the range 0 to 999999.
///
/// If both **sec** and **usec** are zero, this timeout will not be used at all. In this case,
/// [`set_byte_timeout()`](#method.set_byte_timeout)
/// governs the entire handling of the response, the full confirmation response must be received before expiration
/// of the response timeout.
/// When a byte timeout is set, the response timeout is only used to wait for until the first byte of the response.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Parameters
///
/// * `timeout` - Timeout struct with `sec` and `usec`
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP, Timeout};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let timeout = Timeout { sec: 1, usec: 500000 };
/// assert!(modbus.set_byte_timeout(timeout).is_ok());
/// ```
pub fn set_byte_timeout(&mut self, timeout: Timeout) -> Result<()> {
unsafe {
match ffi::modbus_set_byte_timeout(self.ctx, timeout.sec, timeout.usec) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `get_response_timeout` - get timeout for response
///
/// The [`get_response_timeout()`](#method.get_response_timeout) function shall return the timeout interval used to
/// wait for a response
/// in the **sec** and **usec** arguments.
///
/// # Return value
///
/// The function return a Result containing a [`Timeout`](struct.Timeout.html) if successful.
/// Otherwise it contains an Error.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP, Timeout};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert_eq!(modbus.get_response_timeout().unwrap(), Timeout { sec: 0, usec: 500000 });
/// ```
pub fn get_response_timeout(&self) -> Result<Timeout> {
let mut timeout = Timeout { sec: 0, usec: 0 };
unsafe {
match ffi::modbus_get_response_timeout(self.ctx, &mut timeout.sec, &mut timeout.usec) {
-1 => bail!(Error::last_os_error()),
0 => Ok(timeout),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `set_response_timeout` - set timeout for response
///
/// The [`set_response_timeout()`](#method.set_response_timeout) function shall set the timeout interval used to
/// wait for a response.
/// When a byte timeout is set, if elapsed time for the first byte of response is longer than the given timeout,
/// an ETIMEDOUT error will be raised by the function waiting for a response. When byte timeout is disabled,
/// the full confirmation response must be received before expiration of the response timeout.
///
///
/// If the [`Timeout`](struct.Timeout.html) members are both **sec** and **usec** are zero,
/// this timeout will not be used at all. In this case, [`set_response_timeout()`](#method.set_response_timeout)
/// governs the entire handling of the response, the full confirmation response must be received before expiration
/// of the response timeout.
/// When a byte timeout is set, the response timeout is only used to wait for until the first byte of the response.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Parameters
///
/// * [`Timeout`](struct.Timeout.html) - Timeout
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP, Timeout};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let timeout = Timeout { sec: 1, usec: 500000 };
/// assert!(modbus.set_response_timeout(timeout).is_ok());
/// ```
pub fn set_response_timeout(&mut self, timeout: Timeout) -> Result<()> {
unsafe {
match ffi::modbus_set_response_timeout(self.ctx, timeout.sec, timeout.usec) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `set_error_recovery` - set the error recovery mode
///
/// The [`set_error_recovery()`](#method.set_error_recovery) function shall set the error recovery mode to apply
/// when the connection fails or the byte received is not expected.
///
/// By default there is no error recovery so the application is responsible for controlling the error values
/// returned by libmodbus functions and for handling them if necessary.
///
/// When `ErrorRecoveryMode::Link` is set, the library will attempt an reconnection after a delay defined by
/// response timeout ([`set_response_timeout()`](#method.set_response_timeout)) of the libmodbus context.
/// This mode will try an infinite close/connect loop until success on send call and will just try one time to
/// re-establish the connection on select/read calls (if the connection was down, the values to read are certainly
/// not available any more after reconnection, except for slave/server).
/// This mode will also run flush requests after a delay based on the current response timeout in some situations
/// (eg. timeout of select call).
/// The reconnection attempt can hang for several seconds if the network to the remote target unit is down.
///
/// When `ErrorRecoveryMode::Protocol` is set, a sleep and flush sequence will be used to clean up the ongoing
/// communication, this can occurs when the message length is invalid, the TID is wrong or the received function
/// code is not the expected one.
/// The response timeout delay will be used to sleep.
///
/// The modes are mask values and so they are complementary.
///
/// It’s not recommended to enable error recovery for slave/server.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Parameters
///
/// * [`ErrorRecoveryMode`](struct.ErrorRecoveryMode.html) - Timeout
///
/// # Examples
///
/// ```rust,no_run
/// use libmodbus_rs::{Modbus, ModbusTCP, ErrorRecoveryMode};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert!(modbus.set_error_recovery(Some(&[ErrorRecoveryMode::Link, ErrorRecoveryMode::Protocol])).is_ok());
/// ```
pub fn set_error_recovery(&mut self, flags: Option<&[ErrorRecoveryMode]>) -> Result<()> {
let flags = flags.unwrap_or(&[])
.iter()
.fold(ffi::modbus_error_recovery_mode_MODBUS_ERROR_RECOVERY_NONE, |acc, v| acc | v.as_raw());
unsafe {
match ffi::modbus_set_error_recovery(self.ctx, flags) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
// TODO: Add examples from: http://zzeroo.github.io/libmodbus-rs/libmodbus/modbus_set_socket.html
/// `set_socket` - set socket of the context
///
/// The [`set_socket()`](#method.set_socket) function shall set the socket or file descriptor in the libmodbus
/// context.
/// This function is useful for managing multiple client connections to the same server.
///
/// # Return values
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert!(modbus.set_socket(1337).is_ok());
/// ```
pub fn set_socket(&mut self, socket: i32) -> Result<()> {
unsafe {
match ffi::modbus_set_socket(self.ctx, socket) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => unreachable!(),
}
}
}
/// `get_socket` - set socket of the context
///
/// The [`get_socket()`](#method.get_socket) function shall return the current socket or file descriptor of the
/// libmodbus context.
///
/// # Return value
///
/// The function returns a Result containing the current socket or file descriptor of the context if successful.
/// Otherwise it contains an Error.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let _ = modbus.set_socket(1337).unwrap();
/// assert_eq!(modbus.get_socket().unwrap(), 1337);
/// ```
pub fn get_socket(&self) -> Result<i32> {
unsafe {
match ffi::modbus_get_socket(self.ctx) {
-1 => bail!(Error::last_os_error()),
socket => Ok(socket),
}
}
}
/// `get_header_length` - retrieve the current header length
///
/// The [`get_header_length()`](#method.get_header_length) function shall retrieve the current header length from
/// the backend.
/// This function is convenient to manipulate a message and so its limited to low-level operations.
///
/// # Return values
///
/// The header length as integer value.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// assert_eq!(modbus.get_header_length(), 7);
/// ```
pub fn get_header_length(&self) -> i32 {
unsafe { ffi::modbus_get_header_length(self.ctx) }
}
/// `reply_exception` - send an exception reponse
///
/// The modbus_reply_exception() function shall send an exception response based on the exception_code in argument.
///
/// The libmodbus provides the following exception codes:
///
/// * Modbus::Exception::IllegalFunction (1)
/// * Modbus::Exception::IllegalDataAddress (2)
/// * Modbus::Exception::IllegalDataValue (3)
/// * Modbus::Exception::SlaveOrServerFailure (4)
/// * Modbus::Exception::Acknowledge (5)
/// * Modbus::Exception::SlaveDeviceBusy (6)
/// * Modbus::Exception::NegativeAcknowledge (7)
/// * Modbus::Exception::MemoryParity (8)
/// * Modbus::Exception::NotDefined (9)
/// * Modbus::Exception::GatewayPath (10)
/// * Modbus::Exception::GatewayTarget (11)
///
/// The initial request `request` is required to build a valid response.
///
/// # Return value
///
/// The function returns the length of the response sent if successful, or an Error.
///
/// # Parameters
///
/// * `request` - initial request, required to build a valid response
/// * `exception_code` - Exception Code
///
/// # Examples
///
/// ```rust,no_run
/// use libmodbus_rs::{Modbus, ModbusClient, ModbusTCP};
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// use libmodbus_rs::Exception;
///
/// let request: Vec<u8> = vec![0x01];
/// assert_eq!(modbus.reply_exception(&request, Exception::Acknowledge as u32).unwrap(), 9);
/// ```
pub fn reply_exception(&self, request: &[u8], exception_code: u32) -> Result<i32> {
unsafe {
match ffi::modbus_reply_exception(self.ctx, request.as_ptr(), exception_code) {
-1 => bail!(Error::last_os_error()),
len => Ok(len),
}
}
}
/// `close` - close a Modbus connection
///
/// The [`close()`](#method.close) function shall close the connection established with the backend set in the
/// context.
///
/// **It should not nessesary to call these function. Because rusts drop trait handles that for you!**
///
/// # Examples
///
/// ```
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// modbus.close();
/// ```
pub fn close(&self) {
unsafe {
ffi::modbus_close(self.ctx);
}
}
/// `free` - free a libmodbus context
///
/// The [`free()`](#method.free) function shall free an allocated modbus_t structure.
///
/// **It should not nessesary to call these function. Because rusts drop trait handles that for you!**
///
/// # Examples
///
/// ```
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// modbus.free();
/// ```
pub fn free(&mut self) {
unsafe {
ffi::modbus_free(self.ctx);
}
}
}
/// `set_bits_from_byte` - set many bits from a single byte value
///
/// The [`set_bits_from_byte()`](#method.set_bits_from_byte) function shall set many bits from a single byte.
/// All 8 bits from the byte value will be written to `dest` array starting at `index` position.
///
/// # Parameters
///
/// * `dest` - destination slice
/// * `index` - starting position where the bit should written
/// * `value` - set many bits from a single byte. All 8 bits from the byte `value` will be written to `dest` slice
/// starting at `index` position.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusMapping, ModbusTCP};
/// use libmodbus_rs::prelude::*;
///
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let modbus_mapping = ModbusMapping::new(5, 5, 5, 5).unwrap();
/// // before
/// assert_eq!(modbus_mapping.get_input_bits_mut(), [0u8, 0, 0, 0, 0]);
///
/// set_bits_from_byte(modbus_mapping.get_input_bits_mut(), 2, 0b1111_1111);
///
/// // after
/// assert_eq!(modbus_mapping.get_input_bits_mut(), [0u8, 0, 1, 1, 1]);
/// ```
pub fn set_bits_from_byte(dest: &mut [u8], index: u32, value: u8) {
unsafe { ffi::modbus_set_bits_from_byte(dest.as_mut_ptr(), index as c_int, value) }
}
/// `set_bits_from_bytes` - set many bits from an array of bytes
///
/// The [`set_bits_from_bytes()`](#method.set_bits_from_bytes) function shall set many bits from a single byte.
/// All 8 bits from the byte value will be written to `dest` array starting at index position.
///
/// # Parameters
///
/// * `dest` - destination slice
/// * `index` - starting position where the bit should written
/// * `num_bit` - how many bits should written
/// * `bytes` - All the bits of the `bytes` parameter, read from the first position of the vec `bytes` are written as
/// bits in the `dest` vec,
/// starting at position `index`
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusMapping, ModbusTCP};
/// use libmodbus_rs::prelude::*;
///
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let modbus_mapping = ModbusMapping::new(5, 5, 5, 5).unwrap();
///
/// // before
/// assert_eq!(modbus_mapping.get_input_bits_mut(), [0u8, 0, 0, 0, 0]);
///
/// set_bits_from_bytes(modbus_mapping.get_input_bits_mut(), 0, 2, &[0b0000_1111]);
///
/// // after
/// assert_eq!(modbus_mapping.get_input_bits_mut(), [1u8, 1, 0, 0, 0]);
/// ```
pub fn set_bits_from_bytes(dest: &mut [u8], index: u16, num_bit: u16, bytes: &[u8]) {
unsafe { ffi::modbus_set_bits_from_bytes(dest.as_mut_ptr(), index as c_int, num_bit as c_uint, bytes.as_ptr()) }
}
/// `get_byte_from_bits` - get the value from many bit
///
/// The [`get_byte_from_bits()`](#method.get_byte_from_bits) function shall extract a value from many bits.
/// All `num_bit` bits from `src` at position `index` will be read as a single value. To obtain a full byte, set `num_bit` to 8.
///
/// # Return value
///
/// The function shall return a byte containing the bits read.
///
/// # Parameters
///
/// * `src` - bits source
/// * `index` - starting position where the bit will be read
/// * `num_bit` - All `num_bit` bits from `src` at position `index` will be read as a single value. To obtain a full
/// byte, set `num_bit` to 8.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusMapping, ModbusTCP};
/// use libmodbus_rs::prelude::*;
///
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let modbus_mapping = ModbusMapping::new(5, 5, 5, 5).unwrap();
///
/// assert_eq!(get_byte_from_bits(&[0b1111_1111], 0 ,8), 255);
/// ```
pub fn get_byte_from_bits(src: &[u8], index: u8, num_bit: u8) -> u8 {
unsafe { ffi::modbus_get_byte_from_bits(src.as_ptr(), index as c_int, num_bit as c_uint) }
}
/// `get_float_abcd` - get a float value from 2 registers in `ABCD` byte order
///
/// The [`get_float_abcd()`](#method.get_float_abcd) function shall get a float from 4 bytes in usual Modbus format.
/// The `src` slice mut contain two `u16` values, for example, if the first word is set to `0x0020` and the
/// second to `0xF147`, the float value will be read as `123456.0`.
///
/// # Return value
///
/// The function shall return a float.
///
/// # Parameters
///
/// * `src` - slice of two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
/// assert_eq!(get_float_abcd(&[0x0020, 0xF147]), 123456.0);
/// ```
pub fn get_float_abcd(src: &[u16; 2]) -> f32 {
unsafe { ffi::modbus_get_float_abcd(src.as_ptr()) }
}
/// `set_float_abcd` - set a float value in 2 registers using `ABCD` byte order
///
/// The [`set_float_abcd()`](#method.set_float_abcd) function shall set a float to 4 bytes in usual Modbus format.
/// The `dest` slice must contain two `u16` values to be able to store the full result of the conversion.
///
/// # Parameters
///
/// * `src` - float to 4 bytes (`f32`)
/// * `dest` - slice must contain two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// let mut dest = vec![0; 2];
/// set_float_abcd(123456.0, &mut dest);
/// assert_eq!(&dest, &[0x0020, 0xF147]);
/// ```
pub fn set_float_abcd(src: f32, dest: &mut [u16]) {
// &mut [u16; 2] is not working here
unsafe { ffi::modbus_set_float_abcd(src, dest.as_mut_ptr()) }
}
/// `get_float_badc` - get a float value from 2 registers in `BADC` byte order
///
/// The [`get_float_badc()`](#method.get_float_badc) function shall get a float from 4 bytes with swapped bytes (`BADC`
/// instead of `ABCD`).
/// The `src` slice mut contain two `u16` values, for example, if the first word is set to `0x2000` and the second to
/// `0x47F1`, the float value will be read as `123456.0`.
///
/// # Return value
///
/// The function shall return a float.
///
/// # Parameters
///
/// * `src` - slice of two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// assert_eq!(get_float_badc(&[0x2000, 0x47F1]), 123456.0);
/// ```
pub fn get_float_badc(src: &[u16; 2]) -> f32 {
unsafe { ffi::modbus_get_float_badc(src.as_ptr()) }
}
/// `set_float_badc` - set a float value in 2 registers using `BADC` byte order
///
/// The [`set_float_badc()`](#method.set_float_badc) function shall set a float to 4 bytes in swapped bytes Modbus
/// format (`BADC` insted of `ABCD`).
/// The dest slice must contain two `u16` values to be able to store the full result of the conversion.
///
/// # Parameters
///
/// * `src` - float to 4 bytes (`f32`)
/// * `dest` - slice must contain two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// let mut dest = vec![0; 2];
/// set_float_badc(123456.0, &mut dest);
/// assert_eq!(&dest, &[0x2000, 0x47F1]);
/// ```
pub fn set_float_badc(src: f32, dest: &mut [u16]) {
// &mut [u16; 2] is not working here
unsafe { ffi::modbus_set_float_badc(src, dest.as_mut_ptr()) }
}
/// `get_float_cdab` - get a float value from 2 registers in `CDAB` byte order
///
/// The [`get_float_cdab()`](#method.get_float_cdab) function shall get a float from 4 bytes with swapped bytes (`CDAB`
/// instead of `ABCD`).
/// The `src` slice mut contain two `u16` values, for example, if the first word is set to `0x2000` and the second to
/// `0x47F1`, the float value will be read as `123456.0`.
///
/// # Return value
///
/// The function shall return a float.
///
/// # Parameters
///
/// * `src` - slice of two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// assert_eq!(get_float_cdab(&[0xF147, 0x0020]), 123456.0);
/// ```
pub fn get_float_cdab(src: &[u16; 2]) -> f32 {
unsafe { ffi::modbus_get_float_cdab(src.as_ptr()) }
}
/// `set_float_cdab` - set a float value in 2 registers using `CDAB` byte order
///
/// The [`set_float_cdab()`](#method.set_float_cdab) function shall set a float to 4 bytes in swapped bytes Modbus
/// format (`CDAB` insted of `ABCD`).
/// The `dest` slice must contain two `u16` values to be able to store the full result of the conversion.
///
/// # Parameters
///
/// * `src` - float to 4 bytes (`f32`)
/// * `dest` - slice must contain two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// let mut dest = vec![0; 2];
/// set_float_cdab(123456.0, &mut dest);
/// assert_eq!(&dest, &[0xF147, 0x0020]);
/// ```
pub fn set_float_cdab(src: f32, dest: &mut [u16]) {
// &mut [u16; 2] is not working here
unsafe { ffi::modbus_set_float_cdab(src, dest.as_mut_ptr()) }
}
/// `get_float_dcba` - get a float value from 2 registers in `DCBA` byte order
///
/// The [`get_float_dcba()`](#method.get_float_dcba) function shall get a float from 4 bytes with swapped bytes (`DCBA`
/// instead of `ABCD`).
/// The src slice mut contain two `u16` values, for example, if the first word is set to `0x2000` and the second to
/// `0x47F1`, the float value will be read as `123456.0`.
///
/// # Return value
///
/// The function shall return a float.
///
/// # Parameters
///
/// * `src` - slice of two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// assert_eq!(get_float_dcba(&[0x47F1, 0x2000]), 123456.0);
/// ```
pub fn get_float_dcba(src: &[u16; 2]) -> f32 {
unsafe { ffi::modbus_get_float_dcba(src.as_ptr()) }
}
/// `set_float_dcba` - set a float value in 2 registers using `DCBA` byte order
///
/// The [`set_float_dcba()`](#method.set_float_dcba) function shall set a float to 4 bytes in swapped bytes Modbus
/// format (`DCBA` insted of `ABCD`).
/// The `dest` slice must contain two `u16` values to be able to store the full result of the conversion.
///
/// # Parameters
///
/// * `src` - float to 4 bytes (`f32`)
/// * `dest` - slice must contain two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// let mut dest = vec![0; 2];
/// set_float_dcba(123456.0, &mut dest);
/// assert_eq!(&dest, &[0x47F1, 0x2000]);
/// ```
pub fn set_float_dcba(src: f32, dest: &mut [u16]) {
// &mut [u16; 2] is not working here
unsafe { ffi::modbus_set_float_dcba(src, dest.as_mut_ptr()) }
}
impl Drop for Modbus {
fn drop(&mut self) {
self.close();
self.free();
}
}
fix parameter size
#![allow(dead_code)]
use errors::*;
use libc::{c_int, c_uint};
use libmodbus_sys as ffi;
use std::io::Error;
/// Modbus protocol exceptions
///
/// Documentation source: https://en.wikipedia.org/wiki/Modbus#Main_Modbus_exception_codes
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Exception {
/// (1) Illegal Function - Function code received in the query is not recognized or allowed by slave
IllegalFunction = 1,
/// (2) Illegal Data Address - Data address of some or all the required entities are not allowed or do not exist in
/// slave
IllegalDataAddress = 2,
/// (3) Illegal Data Value - Value is not accepted by slave
IllegalDataValue = 3,
/// (4) Slave Device Failure - Unrecoverable error occurred while slave was attempting to perform requested action
SlaveOrServerFailure = 4,
/// (5) Acknowledge - Slave has accepted request and is processing it, but a long duration of time is required.
/// This response is returned to prevent a timeout error from occurring in the master. Master can next issue a Poll
/// Program Complete message to determine whether processing is completed
Acknowledge = 5,
/// (6) Slave Device Busy - Slave is engaged in processing a long-duration command. Master should retry later
SlaveDeviceBusy = 6,
/// (7) Negative Acknowledge - Slave cannot perform the programming functions. Master should request diagnostic or
/// error information from slave
NegativeAcknowledge = 7,
/// (8) Memory Parity Error - Slave detected a parity error in memory. Master can retry the request, but service
/// may be required on the slave device
MemoryParity = 8,
/// (9) Not defined
NotDefined = 9,
/// (10) Gateway Path Unavailable - Specialized for Modbus gateways. Indicates a misconfigured gateway
GatewayPath = 10,
/// (11) Gateway Target Device Failed to Respond - Specialized for Modbus gateways. Sent when slave fails to respond
GatewayTarget = 11,
}
/// Modbus function codes
///
/// Documentation source: https://en.wikipedia.org/wiki/Modbus#Supported_function_codes
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum FunctionCode {
/// 0x01 Read Coils
ReadCoils = 1,
/// 0x02 Read Discrete Inputs
ReadDiscreteInputs = 2,
/// 0x03 Read Multiple Holding Registers
ReadHoldingRegisters = 3,
/// 0x04 Read Input Registers
ReadInputRegisters = 4,
/// 0x05 Write Single Coil
WriteSingleCoil = 5,
/// 0x06 Write Single Holding Register
WriteSingleRegister = 6,
/// 0x07 Read Exception Status
ReadExceptionStatus = 7,
/// 0x15 Write Multiple Coils
WriteMultipleCoils = 15,
/// 0x16 Write Multiple Holding Registers
WriteMultipleRegisters = 16,
/// 0x17 Report Slave ID
ReportSlaveId = 17,
/// 0x22 Mask Write Register
MaskWriteRegister = 22,
/// 0x23 Read/Write Multiple Registers
WriteAndReadRegisters = 23,
}
#[derive(Debug, Copy, Clone)]
pub enum ErrorRecoveryMode {
Link,
Protocol,
}
impl ErrorRecoveryMode {
fn as_raw(&self) -> ffi::modbus_error_recovery_mode {
use ErrorRecoveryMode::*;
match *self {
Link => ffi::modbus_error_recovery_mode_MODBUS_ERROR_RECOVERY_LINK,
Protocol => ffi::modbus_error_recovery_mode_MODBUS_ERROR_RECOVERY_PROTOCOL,
}
}
}
/// Timeout struct
///
/// * The value of **usec** argument must be in the range 0 to 999999.
// For use with timeout methods such as get_byte_timeout and set_byte_timeout
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Timeout {
pub sec: u32,
pub usec: u32,
}
/// Safe interface for [libmodbus](http://libmodbus.org)
///
/// The different parts of libmodbus are implemented as traits. The modules of this crate contains these
/// traits and a implementation with a, hopefully safe, interface.
///
pub struct Modbus {
pub ctx: *mut ffi::modbus_t,
}
impl Modbus {
// Constants
pub const ENOBASE: u32 = ffi::MODBUS_ENOBASE;
pub const MAX_ADU_LENGTH: usize = ffi::MODBUS_MAX_ADU_LENGTH as usize;
pub const MAX_PDU_LENGTH: usize = ffi::MODBUS_MAX_PDU_LENGTH as usize;
pub const MAX_READ_BITS: usize = ffi::MODBUS_MAX_READ_BITS as usize;
pub const MAX_READ_REGISTERS: usize = ffi::MODBUS_MAX_READ_REGISTERS as usize;
pub const MAX_WR_READ_REGISTERS: usize = ffi::MODBUS_MAX_WR_READ_REGISTERS as usize;
pub const MAX_WR_WRITE_REGISTERS: usize = ffi::MODBUS_MAX_WR_WRITE_REGISTERS as usize;
pub const MAX_WRITE_BITS: usize = ffi::MODBUS_MAX_WRITE_BITS as usize;
pub const MAX_WRITE_REGISTERS: usize = ffi::MODBUS_MAX_WRITE_REGISTERS as usize;
pub const RTU_MAX_ADU_LENGTH: usize = ffi::MODBUS_RTU_MAX_ADU_LENGTH as usize;
pub const TCP_DEFAULT_PORT: u32 = ffi::MODBUS_TCP_DEFAULT_PORT;
pub const TCP_MAX_ADU_LENGTH: u32 = ffi::MODBUS_TCP_MAX_ADU_LENGTH;
pub const TCP_SLAVE: u32 = ffi::MODBUS_TCP_SLAVE;
/// `connect` - establish a Modbus connection
///
/// The [`connect()`](#method.connect) function shall establish a connection to a Modbus server,
/// a network or a bus.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP};
///
/// // create server
/// let mut server = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// // create client
/// let client = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// // start server in listen mode
/// let _ = server.tcp_listen(1).unwrap();
///
/// assert!(client.connect().is_ok())
/// ```
pub fn connect(&self) -> Result<()> {
unsafe {
match ffi::modbus_connect(self.ctx) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `flush` - flush non-transmitted data
///
/// The [`flush()`](#method.flush) function shall discard data received but not read to the socket or file
/// descriptor associated to the context ctx.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Examples
///
/// ```
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert!(modbus.flush().is_ok());
/// ```
pub fn flush(&self) -> Result<()> {
unsafe {
match ffi::modbus_flush(self.ctx) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `set_slave` - set slave number in the context
///
/// The [`set_slave()`](#method.set_slave) function shall set the slave number in the libmodbus context.
/// The behavior depends of network and the role of the device:
///
/// RTU
/// Define the slave ID of the remote device to talk in master mode or set the internal slave ID in slave mode.
/// According to the protocol, a Modbus device must only accept message holding its slave number or the special
/// broadcast number.
/// TCP
/// The slave number is only required in TCP if the message must reach a device on a serial network.
/// Some not compliant devices or software (such as modpoll) uses the slave ID as unit identifier,
/// that’s incorrect (cf page 23 of Modbus Messaging Implementation Guide v1.0b) but without the slave value,
/// the faulty remote device or software drops the requests!
/// The special value MODBUS_TCP_SLAVE (0xFF) can be used in TCP mode to restore the default value.
/// The broadcast address is MODBUS_BROADCAST_ADDRESS.
/// This special value must be use when you want all Modbus devices of the network receive the request.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Parameters
///
/// * `slave` - new slave ID
///
/// # Examples
///
/// ```
/// use libmodbus_rs::{Modbus, ModbusRTU};
///
/// const YOUR_DEVICE_ID: u8 = 1;
/// let mut modbus = Modbus::new_rtu("/dev/ttyUSB0", 115200, 'N', 8, 1).unwrap();
///
/// assert!(modbus.set_slave(YOUR_DEVICE_ID).is_ok());
/// ```
pub fn set_slave(&mut self, slave: u8) -> Result<()> {
unsafe {
match ffi::modbus_set_slave(self.ctx, slave as c_int) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `set_debug` - set debug flag of the context
///
/// The [`set_debug()`](#method.set_debug) function shall set the debug flag of the modbus_t context by using the
/// argument flag.
/// By default, the boolean flag is set to FALSE. When the flag value is set to TRUE, many verbose messages are
/// displayed on stdout and stderr.
/// For example, this flag is useful to display the bytes of the Modbus messages.
///
/// ```bash
/// [00][14][00][00][00][06][12][03][00][6B][00][03]
/// Waiting for a confirmation…
/// <00><14><00><00><00><09><12><03><06><02><2B><00><00><00><00>
/// ```
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Parameters
///
/// * `flag` - `true` of `false`, enables or disables debug mode
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP};
///
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert!(modbus.set_debug(true).is_ok());
/// ```
pub fn set_debug(&mut self, flag: bool) -> Result<()> {
unsafe {
match ffi::modbus_set_debug(self.ctx, flag as c_int) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `get_byte_timeout` - get timeout between bytes
///
/// [`get_byte_timeout()`](#method.get_byte_timeout) function returns a
/// [`Timeout`](struct.Timeout.html) with the timeout interval between
/// two consecutive bytes of the same message.
///
/// # Return value
///
/// The function return a Result containing a [`Timeout`](struct.Timeout.html) if successful.
/// Otherwise it contains an Error.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP, Timeout};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert_eq!(modbus.get_byte_timeout().unwrap(), Timeout { sec: 0, usec: 500000 });
/// ```
pub fn get_byte_timeout(&self) -> Result<Timeout> {
let mut timeout = Timeout { sec: 0, usec: 0 };
unsafe {
match ffi::modbus_get_byte_timeout(self.ctx, &mut timeout.sec, &mut timeout.usec) {
-1 => bail!(Error::last_os_error()),
0 => Ok(timeout),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `set_byte_timeout` - set timeout between bytes
///
/// The [`set_byte_timeout()`](#method.set_byte_timeout) function shall set the timeout interval between two
/// consecutive bytes of the same message.
/// The timeout is an upper bound on the amount of time elapsed before select() returns, if the time elapsed is
/// longer than the defined timeout,
/// an ETIMEDOUT error will be raised by the function waiting for a response.
///
/// The value of **usec** argument must be in the range 0 to 999999.
///
/// If both **sec** and **usec** are zero, this timeout will not be used at all. In this case,
/// [`set_byte_timeout()`](#method.set_byte_timeout)
/// governs the entire handling of the response, the full confirmation response must be received before expiration
/// of the response timeout.
/// When a byte timeout is set, the response timeout is only used to wait for until the first byte of the response.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Parameters
///
/// * `timeout` - Timeout struct with `sec` and `usec`
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP, Timeout};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let timeout = Timeout { sec: 1, usec: 500000 };
/// assert!(modbus.set_byte_timeout(timeout).is_ok());
/// ```
pub fn set_byte_timeout(&mut self, timeout: Timeout) -> Result<()> {
unsafe {
match ffi::modbus_set_byte_timeout(self.ctx, timeout.sec, timeout.usec) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `get_response_timeout` - get timeout for response
///
/// The [`get_response_timeout()`](#method.get_response_timeout) function shall return the timeout interval used to
/// wait for a response
/// in the **sec** and **usec** arguments.
///
/// # Return value
///
/// The function return a Result containing a [`Timeout`](struct.Timeout.html) if successful.
/// Otherwise it contains an Error.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP, Timeout};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert_eq!(modbus.get_response_timeout().unwrap(), Timeout { sec: 0, usec: 500000 });
/// ```
pub fn get_response_timeout(&self) -> Result<Timeout> {
let mut timeout = Timeout { sec: 0, usec: 0 };
unsafe {
match ffi::modbus_get_response_timeout(self.ctx, &mut timeout.sec, &mut timeout.usec) {
-1 => bail!(Error::last_os_error()),
0 => Ok(timeout),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `set_response_timeout` - set timeout for response
///
/// The [`set_response_timeout()`](#method.set_response_timeout) function shall set the timeout interval used to
/// wait for a response.
/// When a byte timeout is set, if elapsed time for the first byte of response is longer than the given timeout,
/// an ETIMEDOUT error will be raised by the function waiting for a response. When byte timeout is disabled,
/// the full confirmation response must be received before expiration of the response timeout.
///
///
/// If the [`Timeout`](struct.Timeout.html) members are both **sec** and **usec** are zero,
/// this timeout will not be used at all. In this case, [`set_response_timeout()`](#method.set_response_timeout)
/// governs the entire handling of the response, the full confirmation response must be received before expiration
/// of the response timeout.
/// When a byte timeout is set, the response timeout is only used to wait for until the first byte of the response.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Parameters
///
/// * [`Timeout`](struct.Timeout.html) - Timeout
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP, Timeout};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let timeout = Timeout { sec: 1, usec: 500000 };
/// assert!(modbus.set_response_timeout(timeout).is_ok());
/// ```
pub fn set_response_timeout(&mut self, timeout: Timeout) -> Result<()> {
unsafe {
match ffi::modbus_set_response_timeout(self.ctx, timeout.sec, timeout.usec) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
/// `set_error_recovery` - set the error recovery mode
///
/// The [`set_error_recovery()`](#method.set_error_recovery) function shall set the error recovery mode to apply
/// when the connection fails or the byte received is not expected.
///
/// By default there is no error recovery so the application is responsible for controlling the error values
/// returned by libmodbus functions and for handling them if necessary.
///
/// When `ErrorRecoveryMode::Link` is set, the library will attempt an reconnection after a delay defined by
/// response timeout ([`set_response_timeout()`](#method.set_response_timeout)) of the libmodbus context.
/// This mode will try an infinite close/connect loop until success on send call and will just try one time to
/// re-establish the connection on select/read calls (if the connection was down, the values to read are certainly
/// not available any more after reconnection, except for slave/server).
/// This mode will also run flush requests after a delay based on the current response timeout in some situations
/// (eg. timeout of select call).
/// The reconnection attempt can hang for several seconds if the network to the remote target unit is down.
///
/// When `ErrorRecoveryMode::Protocol` is set, a sleep and flush sequence will be used to clean up the ongoing
/// communication, this can occurs when the message length is invalid, the TID is wrong or the received function
/// code is not the expected one.
/// The response timeout delay will be used to sleep.
///
/// The modes are mask values and so they are complementary.
///
/// It’s not recommended to enable error recovery for slave/server.
///
/// # Return value
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Parameters
///
/// * [`ErrorRecoveryMode`](struct.ErrorRecoveryMode.html) - Timeout
///
/// # Examples
///
/// ```rust,no_run
/// use libmodbus_rs::{Modbus, ModbusTCP, ErrorRecoveryMode};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert!(modbus.set_error_recovery(Some(&[ErrorRecoveryMode::Link, ErrorRecoveryMode::Protocol])).is_ok());
/// ```
pub fn set_error_recovery(&mut self, flags: Option<&[ErrorRecoveryMode]>) -> Result<()> {
let flags = flags.unwrap_or(&[])
.iter()
.fold(ffi::modbus_error_recovery_mode_MODBUS_ERROR_RECOVERY_NONE, |acc, v| acc | v.as_raw());
unsafe {
match ffi::modbus_set_error_recovery(self.ctx, flags) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => panic!("libmodbus API incompatible response"),
}
}
}
// TODO: Add examples from: http://zzeroo.github.io/libmodbus-rs/libmodbus/modbus_set_socket.html
/// `set_socket` - set socket of the context
///
/// The [`set_socket()`](#method.set_socket) function shall set the socket or file descriptor in the libmodbus
/// context.
/// This function is useful for managing multiple client connections to the same server.
///
/// # Return values
///
/// The function return an OK Result if successful. Otherwise it contains an Error.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
///
/// assert!(modbus.set_socket(1337).is_ok());
/// ```
pub fn set_socket(&mut self, socket: i32) -> Result<()> {
unsafe {
match ffi::modbus_set_socket(self.ctx, socket) {
-1 => bail!(Error::last_os_error()),
0 => Ok(()),
_ => unreachable!(),
}
}
}
/// `get_socket` - set socket of the context
///
/// The [`get_socket()`](#method.get_socket) function shall return the current socket or file descriptor of the
/// libmodbus context.
///
/// # Return value
///
/// The function returns a Result containing the current socket or file descriptor of the context if successful.
/// Otherwise it contains an Error.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let _ = modbus.set_socket(1337).unwrap();
/// assert_eq!(modbus.get_socket().unwrap(), 1337);
/// ```
pub fn get_socket(&self) -> Result<i32> {
unsafe {
match ffi::modbus_get_socket(self.ctx) {
-1 => bail!(Error::last_os_error()),
socket => Ok(socket),
}
}
}
/// `get_header_length` - retrieve the current header length
///
/// The [`get_header_length()`](#method.get_header_length) function shall retrieve the current header length from
/// the backend.
/// This function is convenient to manipulate a message and so its limited to low-level operations.
///
/// # Return values
///
/// The header length as integer value.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// assert_eq!(modbus.get_header_length(), 7);
/// ```
pub fn get_header_length(&self) -> i32 {
unsafe { ffi::modbus_get_header_length(self.ctx) }
}
/// `reply_exception` - send an exception reponse
///
/// The modbus_reply_exception() function shall send an exception response based on the exception_code in argument.
///
/// The libmodbus provides the following exception codes:
///
/// * Modbus::Exception::IllegalFunction (1)
/// * Modbus::Exception::IllegalDataAddress (2)
/// * Modbus::Exception::IllegalDataValue (3)
/// * Modbus::Exception::SlaveOrServerFailure (4)
/// * Modbus::Exception::Acknowledge (5)
/// * Modbus::Exception::SlaveDeviceBusy (6)
/// * Modbus::Exception::NegativeAcknowledge (7)
/// * Modbus::Exception::MemoryParity (8)
/// * Modbus::Exception::NotDefined (9)
/// * Modbus::Exception::GatewayPath (10)
/// * Modbus::Exception::GatewayTarget (11)
///
/// The initial request `request` is required to build a valid response.
///
/// # Return value
///
/// The function returns the length of the response sent if successful, or an Error.
///
/// # Parameters
///
/// * `request` - initial request, required to build a valid response
/// * `exception_code` - Exception Code
///
/// # Examples
///
/// ```rust,no_run
/// use libmodbus_rs::{Modbus, ModbusClient, ModbusTCP};
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// use libmodbus_rs::Exception;
///
/// let request: Vec<u8> = vec![0x01];
/// assert_eq!(modbus.reply_exception(&request, Exception::Acknowledge as u32).unwrap(), 9);
/// ```
pub fn reply_exception(&self, request: &[u8], exception_code: u32) -> Result<i32> {
unsafe {
match ffi::modbus_reply_exception(self.ctx, request.as_ptr(), exception_code) {
-1 => bail!(Error::last_os_error()),
len => Ok(len),
}
}
}
/// `close` - close a Modbus connection
///
/// The [`close()`](#method.close) function shall close the connection established with the backend set in the
/// context.
///
/// **It should not nessesary to call these function. Because rusts drop trait handles that for you!**
///
/// # Examples
///
/// ```
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// modbus.close();
/// ```
pub fn close(&self) {
unsafe {
ffi::modbus_close(self.ctx);
}
}
/// `free` - free a libmodbus context
///
/// The [`free()`](#method.free) function shall free an allocated modbus_t structure.
///
/// **It should not nessesary to call these function. Because rusts drop trait handles that for you!**
///
/// # Examples
///
/// ```
/// use libmodbus_rs::{Modbus, ModbusTCP};
/// let mut modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// modbus.free();
/// ```
pub fn free(&mut self) {
unsafe {
ffi::modbus_free(self.ctx);
}
}
}
/// `set_bits_from_byte` - set many bits from a single byte value
///
/// The [`set_bits_from_byte()`](#method.set_bits_from_byte) function shall set many bits from a single byte.
/// All 8 bits from the byte value will be written to `dest` array starting at `index` position.
///
/// # Parameters
///
/// * `dest` - destination slice
/// * `index` - starting position where the bit should written
/// * `value` - set many bits from a single byte. All 8 bits from the byte `value` will be written to `dest` slice
/// starting at `index` position.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusMapping, ModbusTCP};
/// use libmodbus_rs::prelude::*;
///
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let modbus_mapping = ModbusMapping::new(5, 5, 5, 5).unwrap();
/// // before
/// assert_eq!(modbus_mapping.get_input_bits_mut(), [0u8, 0, 0, 0, 0]);
///
/// set_bits_from_byte(modbus_mapping.get_input_bits_mut(), 2, 0b1111_1111);
///
/// // after
/// assert_eq!(modbus_mapping.get_input_bits_mut(), [0u8, 0, 1, 1, 1]);
/// ```
pub fn set_bits_from_byte(dest: &mut [u8], index: u32, value: u8) {
unsafe { ffi::modbus_set_bits_from_byte(dest.as_mut_ptr(), index as c_int, value) }
}
/// `set_bits_from_bytes` - set many bits from an array of bytes
///
/// The [`set_bits_from_bytes()`](#method.set_bits_from_bytes) function shall set many bits from a single byte.
/// All 8 bits from the byte value will be written to `dest` array starting at index position.
///
/// # Parameters
///
/// * `dest` - destination slice
/// * `index` - starting position where the bit should written
/// * `num_bit` - how many bits should written
/// * `bytes` - All the bits of the `bytes` parameter, read from the first position of the vec `bytes` are written as
/// bits in the `dest` vec,
/// starting at position `index`
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusMapping, ModbusTCP};
/// use libmodbus_rs::prelude::*;
///
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let modbus_mapping = ModbusMapping::new(5, 5, 5, 5).unwrap();
///
/// // before
/// assert_eq!(modbus_mapping.get_input_bits_mut(), [0u8, 0, 0, 0, 0]);
///
/// set_bits_from_bytes(modbus_mapping.get_input_bits_mut(), 0, 2, &[0b0000_1111]);
///
/// // after
/// assert_eq!(modbus_mapping.get_input_bits_mut(), [1u8, 1, 0, 0, 0]);
/// ```
pub fn set_bits_from_bytes(dest: &mut [u8], index: u16, num_bit: u16, bytes: &[u8]) {
unsafe { ffi::modbus_set_bits_from_bytes(dest.as_mut_ptr(), index as c_int, num_bit as c_uint, bytes.as_ptr()) }
}
/// `get_byte_from_bits` - get the value from many bit
///
/// The [`get_byte_from_bits()`](#method.get_byte_from_bits) function shall extract a value from many bits.
/// All `num_bit` bits from `src` at position `index` will be read as a single value. To obtain a full byte, set `num_bit` to 8.
///
/// # Return value
///
/// The function shall return a byte containing the bits read.
///
/// # Parameters
///
/// * `src` - bits source
/// * `index` - starting position where the bit will be read
/// * `num_bit` - All `num_bit` bits from `src` at position `index` will be read as a single value. To obtain a full
/// byte, set `num_bit` to 8.
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::{Modbus, ModbusMapping, ModbusTCP};
/// use libmodbus_rs::prelude::*;
///
/// let modbus = Modbus::new_tcp("127.0.0.1", 1502).unwrap();
/// let modbus_mapping = ModbusMapping::new(5, 5, 5, 5).unwrap();
///
/// assert_eq!(get_byte_from_bits(&[0b1111_1111], 0 ,8), 255);
/// ```
pub fn get_byte_from_bits(src: &[u8], index: u8, num_bit: u16) -> u8 {
unsafe { ffi::modbus_get_byte_from_bits(src.as_ptr(), index as c_int, num_bit as c_uint) }
}
/// `get_float_abcd` - get a float value from 2 registers in `ABCD` byte order
///
/// The [`get_float_abcd()`](#method.get_float_abcd) function shall get a float from 4 bytes in usual Modbus format.
/// The `src` slice mut contain two `u16` values, for example, if the first word is set to `0x0020` and the
/// second to `0xF147`, the float value will be read as `123456.0`.
///
/// # Return value
///
/// The function shall return a float.
///
/// # Parameters
///
/// * `src` - slice of two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
/// assert_eq!(get_float_abcd(&[0x0020, 0xF147]), 123456.0);
/// ```
pub fn get_float_abcd(src: &[u16; 2]) -> f32 {
unsafe { ffi::modbus_get_float_abcd(src.as_ptr()) }
}
/// `set_float_abcd` - set a float value in 2 registers using `ABCD` byte order
///
/// The [`set_float_abcd()`](#method.set_float_abcd) function shall set a float to 4 bytes in usual Modbus format.
/// The `dest` slice must contain two `u16` values to be able to store the full result of the conversion.
///
/// # Parameters
///
/// * `src` - float to 4 bytes (`f32`)
/// * `dest` - slice must contain two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// let mut dest = vec![0; 2];
/// set_float_abcd(123456.0, &mut dest);
/// assert_eq!(&dest, &[0x0020, 0xF147]);
/// ```
pub fn set_float_abcd(src: f32, dest: &mut [u16]) {
// &mut [u16; 2] is not working here
unsafe { ffi::modbus_set_float_abcd(src, dest.as_mut_ptr()) }
}
/// `get_float_badc` - get a float value from 2 registers in `BADC` byte order
///
/// The [`get_float_badc()`](#method.get_float_badc) function shall get a float from 4 bytes with swapped bytes (`BADC`
/// instead of `ABCD`).
/// The `src` slice mut contain two `u16` values, for example, if the first word is set to `0x2000` and the second to
/// `0x47F1`, the float value will be read as `123456.0`.
///
/// # Return value
///
/// The function shall return a float.
///
/// # Parameters
///
/// * `src` - slice of two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// assert_eq!(get_float_badc(&[0x2000, 0x47F1]), 123456.0);
/// ```
pub fn get_float_badc(src: &[u16; 2]) -> f32 {
unsafe { ffi::modbus_get_float_badc(src.as_ptr()) }
}
/// `set_float_badc` - set a float value in 2 registers using `BADC` byte order
///
/// The [`set_float_badc()`](#method.set_float_badc) function shall set a float to 4 bytes in swapped bytes Modbus
/// format (`BADC` insted of `ABCD`).
/// The dest slice must contain two `u16` values to be able to store the full result of the conversion.
///
/// # Parameters
///
/// * `src` - float to 4 bytes (`f32`)
/// * `dest` - slice must contain two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// let mut dest = vec![0; 2];
/// set_float_badc(123456.0, &mut dest);
/// assert_eq!(&dest, &[0x2000, 0x47F1]);
/// ```
pub fn set_float_badc(src: f32, dest: &mut [u16]) {
// &mut [u16; 2] is not working here
unsafe { ffi::modbus_set_float_badc(src, dest.as_mut_ptr()) }
}
/// `get_float_cdab` - get a float value from 2 registers in `CDAB` byte order
///
/// The [`get_float_cdab()`](#method.get_float_cdab) function shall get a float from 4 bytes with swapped bytes (`CDAB`
/// instead of `ABCD`).
/// The `src` slice mut contain two `u16` values, for example, if the first word is set to `0x2000` and the second to
/// `0x47F1`, the float value will be read as `123456.0`.
///
/// # Return value
///
/// The function shall return a float.
///
/// # Parameters
///
/// * `src` - slice of two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// assert_eq!(get_float_cdab(&[0xF147, 0x0020]), 123456.0);
/// ```
pub fn get_float_cdab(src: &[u16; 2]) -> f32 {
unsafe { ffi::modbus_get_float_cdab(src.as_ptr()) }
}
/// `set_float_cdab` - set a float value in 2 registers using `CDAB` byte order
///
/// The [`set_float_cdab()`](#method.set_float_cdab) function shall set a float to 4 bytes in swapped bytes Modbus
/// format (`CDAB` insted of `ABCD`).
/// The `dest` slice must contain two `u16` values to be able to store the full result of the conversion.
///
/// # Parameters
///
/// * `src` - float to 4 bytes (`f32`)
/// * `dest` - slice must contain two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// let mut dest = vec![0; 2];
/// set_float_cdab(123456.0, &mut dest);
/// assert_eq!(&dest, &[0xF147, 0x0020]);
/// ```
pub fn set_float_cdab(src: f32, dest: &mut [u16]) {
// &mut [u16; 2] is not working here
unsafe { ffi::modbus_set_float_cdab(src, dest.as_mut_ptr()) }
}
/// `get_float_dcba` - get a float value from 2 registers in `DCBA` byte order
///
/// The [`get_float_dcba()`](#method.get_float_dcba) function shall get a float from 4 bytes with swapped bytes (`DCBA`
/// instead of `ABCD`).
/// The src slice mut contain two `u16` values, for example, if the first word is set to `0x2000` and the second to
/// `0x47F1`, the float value will be read as `123456.0`.
///
/// # Return value
///
/// The function shall return a float.
///
/// # Parameters
///
/// * `src` - slice of two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// assert_eq!(get_float_dcba(&[0x47F1, 0x2000]), 123456.0);
/// ```
pub fn get_float_dcba(src: &[u16; 2]) -> f32 {
unsafe { ffi::modbus_get_float_dcba(src.as_ptr()) }
}
/// `set_float_dcba` - set a float value in 2 registers using `DCBA` byte order
///
/// The [`set_float_dcba()`](#method.set_float_dcba) function shall set a float to 4 bytes in swapped bytes Modbus
/// format (`DCBA` insted of `ABCD`).
/// The `dest` slice must contain two `u16` values to be able to store the full result of the conversion.
///
/// # Parameters
///
/// * `src` - float to 4 bytes (`f32`)
/// * `dest` - slice must contain two `u16` values
///
/// # Examples
///
/// ```rust
/// use libmodbus_rs::prelude::*;
///
/// let mut dest = vec![0; 2];
/// set_float_dcba(123456.0, &mut dest);
/// assert_eq!(&dest, &[0x47F1, 0x2000]);
/// ```
pub fn set_float_dcba(src: f32, dest: &mut [u16]) {
// &mut [u16; 2] is not working here
unsafe { ffi::modbus_set_float_dcba(src, dest.as_mut_ptr()) }
}
impl Drop for Modbus {
fn drop(&mut self) {
self.close();
self.free();
}
}
|
use std::hash::{Hash};
use std::collections::HashSet;
use std::fmt;
use std::slice;
use std::iter;
use test;
// FIXME: These aren't stable, so a public wrapper of node/edge indices
// should be lifetimed just like pointers.
#[deriving(Copy, Clone, Show, PartialEq, PartialOrd, Eq, Hash)]
pub struct NodeIndex(uint);
#[deriving(Copy, Clone, Show, PartialEq, PartialOrd, Eq, Hash)]
pub struct EdgeIndex(uint);
pub const EdgeEnd: EdgeIndex = EdgeIndex(::std::uint::MAX);
//const InvalidNode: NodeIndex = NodeIndex(::std::uint::MAX);
/// Index into the NodeIndex and EdgeIndex arrays
#[deriving(Copy, Clone, Show, PartialEq)]
enum Dir {
Out = 0,
In = 1
}
const DIRECTIONS: [Dir, ..2] = [Dir::Out, Dir::In];
#[deriving(Show)]
pub struct Node<N> {
pub data: N,
/// Next edge in outgoing and incoming edge lists.
next: [EdgeIndex, ..2],
}
impl<N> Node<N>
{
pub fn next_edges(&self) -> [EdgeIndex, ..2]
{
self.next
}
}
#[deriving(Show, Copy)]
pub struct Edge<E> {
pub data: E,
/// Next edge in outgoing and incoming edge lists.
next: [EdgeIndex, ..2],
/// Start and End node index
node: [NodeIndex, ..2],
}
impl<E> Edge<E>
{
pub fn next_edges(&self) -> [EdgeIndex, ..2]
{
self.next
}
pub fn source(&self) -> NodeIndex
{
self.node[0]
}
pub fn target(&self) -> NodeIndex
{
self.node[1]
}
}
/// **OGraph\<N, E\>** is a directed graph using an adjacency list representation.
///
/// The graph maintains unique indices for nodes and edges, so both node and edge
/// data may be accessed mutably.
///
/// Based upon the graph implementation in rustc.
///
/// **NodeIndex** and **EdgeIndex** are types that act as references to nodes and edges,
/// but these are only stable across certain operations. Adding to the graph keeps
/// all indices stable, but removing a node will force another node to shift its index.
///
/// Removing an edge also shifts the index of another edge.
//#[deriving(Show)]
pub struct OGraph<N, E> {
nodes: Vec<Node<N>>,
edges: Vec<Edge<E>>,
}
impl<N: fmt::Show, E: fmt::Show> fmt::Show for OGraph<N, E>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for n in self.nodes.iter() {
try!(writeln!(f, "{}", n));
}
for n in self.edges.iter() {
try!(writeln!(f, "{}", n));
}
Ok(())
}
}
pub enum Pair<'a, T: 'a> {
Both(&'a mut T, &'a mut T),
One(&'a mut T),
None,
}
pub fn index_twice<T>(slc: &mut [T], a: uint, b: uint) -> Pair<T>
{
if a == b {
slc.get_mut(a).map_or(Pair::None, Pair::One)
} else {
if a >= slc.len() || b >= slc.len() {
Pair::None
} else {
// safe because a, b are in bounds and distinct
unsafe {
let ar = &mut *(slc.unsafe_mut(a) as *mut _);
let br = &mut *(slc.unsafe_mut(b) as *mut _);
Pair::Both(ar, br)
}
}
}
}
/// Iterate over an edge list.
fn walk_edge_list<E, F: FnMut(EdgeIndex, &mut Edge<E>) -> bool>(
fst: EdgeIndex, edges: &mut [Edge<E>], d: Dir, mut f: F)
{
let k = d as uint;
let mut cur = fst;
loop {
match edges.get_mut(cur.0) {
None => {
debug_assert!(cur == EdgeEnd);
break;
}
Some(curedge) => {
if !f(cur, curedge) {
break;
}
cur = curedge.next[k];
}
}
}
}
impl<N, E> OGraph<N, E>
//where N: fmt::Show
{
/// Create a new OGraph.
pub fn new() -> OGraph<N, E>
{
OGraph{nodes: Vec::new(), edges: Vec::new()}
}
/// Return the number of nodes (vertices) in the graph.
pub fn node_count(&self) -> uint
{
self.nodes.len()
}
/// Add a node with weight **data** to the graph.
pub fn add_node(&mut self, data: N) -> NodeIndex
{
let node = Node{data: data, next: [EdgeEnd, EdgeEnd]};
let node_idx = NodeIndex(self.nodes.len());
self.nodes.push(node);
node_idx
}
/// Access node data for node **a**.
pub fn node(&self, a: NodeIndex) -> Option<&N>
{
self.nodes.get(a.0).map(|n| &n.data)
}
/// Access node data for node **a**.
pub fn node_mut(&mut self, a: NodeIndex) -> Option<&mut N>
{
self.nodes.get_mut(a.0).map(|n| &mut n.data)
}
/// Return an iterator of all neighbors that have an edge from **a** to them.
///
/// Produces an empty iterator if the node doesn't exist.
///
/// Iterator element type is **NodeIndex**.
pub fn neighbors(&self, a: NodeIndex) -> Neighbors<N, E>
{
Neighbors{
graph: self,
next: match self.nodes.get(a.0) {
None => EdgeEnd,
Some(n) => n.next[0],
}
}
}
/// Return an iterator over the neighbors of node **a**, paired with their respective edge
/// weights.
///
/// Produces an empty iterator if the node doesn't exist.
///
/// Iterator element type is **(NodeIndex, &'a E)**.
pub fn edges(&self, a: NodeIndex) -> Edges<N, E>
{
Edges{
graph: self,
next: match self.nodes.get(a.0) {
None => EdgeEnd,
Some(n) => n.next[0],
}
}
}
/// Return an iterator over the edgs from **a** to its neighbors, then *to* **a** from its
/// neighbors.
///
/// Produces an empty iterator if the node doesn't exist.
///
/// Iterator element type is **(NodeIndex, &'a E)**.
pub fn edges_both(&self, a: NodeIndex) -> EdgesBoth<N, E>
{
EdgesBoth{
graph: self,
next: match self.nodes.get(a.0) {
None => [EdgeEnd, EdgeEnd],
Some(n) => n.next,
}
}
}
/// Return an iterator over nodes that have an edge to **a**.
///
/// Produces an empty iterator if the node doesn't exist.
///
/// Iterator element type is **(NodeIndex, &'a E)**.
pub fn in_edges(&self, a: NodeIndex) -> EdgesIn<N, E>
{
EdgesIn{
graph: self,
next: match self.nodes.get(a.0) {
None => EdgeEnd,
Some(n) => n.next[1],
}
}
}
/// Add an edge from **a** to **b** to the graph, with its edge weight.
///
/// **Panics** if any of the nodes don't exist.
pub fn add_edge(&mut self, a: NodeIndex, b: NodeIndex, data: E) -> EdgeIndex
{
let edge_idx = EdgeIndex(self.edges.len());
match index_twice(self.nodes[mut], a.0, b.0) {
Pair::None => panic!("NodeIndices out of bounds"),
Pair::One(an) => {
let edge = Edge {
data: data,
node: [a, b],
next: an.next,
};
an.next[0] = edge_idx;
an.next[1] = edge_idx;
self.edges.push(edge);
}
Pair::Both(an, bn) => {
// a and b are different indices
let edge = Edge {
data: data,
node: [a, b],
next: [an.next[0], bn.next[1]],
};
an.next[0] = edge_idx;
bn.next[1] = edge_idx;
self.edges.push(edge);
}
}
edge_idx
}
/// Remove **a** from the graph if it exists, and return its data value.
/// If it doesn't exist in the graph, return **None**.
pub fn remove_node(&mut self, a: NodeIndex) -> Option<N>
{
match self.nodes.get(a.0) {
None => return None,
_ => {}
}
for d in DIRECTIONS.iter() {
let k = *d as uint;
/*
println!("Starting edge removal for k={}, node={}", k, a);
for (i, n) in self.nodes.iter().enumerate() {
println!("Node {}: Edges={}", i, n.next);
}
for (i, ed) in self.edges.iter().enumerate() {
println!("Edge {}: {}", i, ed);
}
*/
// Remove all edges from and to this node.
loop {
let next = self.nodes[a.0].next[k];
if next == EdgeEnd {
break
}
let ret = self.remove_edge(next);
debug_assert!(ret.is_some());
let _ = ret;
}
}
// Use swap_remove -- only the swapped-in node is going to change
// NodeIndex, so we only have to walk its edges and update them.
let node = match self.nodes.swap_remove(a.0) {
None => return None,
Some(node) => node,
};
// Find the edge lists of the node that had to relocate.
// It may be that no node had to relocate, then we are done already.
let swap_edges = match self.nodes.get(a.0) {
None => return Some(node.data),
Some(ed) => ed.next,
};
// The swapped element's old index
let old_index = NodeIndex(self.nodes.len());
let new_index = a;
// Adjust the starts of the out edges, and ends of the in edges.
for &d in DIRECTIONS.iter() {
let k = d as uint;
walk_edge_list(swap_edges[k], self.edges[mut], d, |_, curedge| {
debug_assert!(curedge.node[k] == old_index);
curedge.node[k] = new_index;
true
});
}
Some(node.data)
}
pub fn edge_mut(&mut self, e: EdgeIndex) -> &mut Edge<E>
{
&mut self.edges[e.0]
}
/// Remove an edge and return its edge weight, or **None** if it didn't exist.
pub fn remove_edge(&mut self, e: EdgeIndex) -> Option<E>
{
// every edge is part of two lists,
// outgoing and incoming edges.
// Remove it from both
//debug_assert!(self.edges.get(e.0).is_some(), "No such edge: {}", e);
let (edge_node, edge_next) = match self.edges.get(e.0) {
None => return None,
Some(x) => (x.node, x.next),
};
// List out from A
// List in from B
for &d in DIRECTIONS.iter() {
let k = d as uint;
let node = match self.nodes.get_mut(edge_node[k].0) {
Some(r) => r,
None => {
debug_assert!(false, "Edge's endpoint dir={} index={} not found",
k, edge_node[k]);
return None
}
};
let fst = node.next[k];
if fst == e {
//println!("Updating first edge 0 for node {}, set to {}", edge_node[0], edge_next[0]);
node.next[k] = edge_next[k];
} else {
walk_edge_list(fst, self.edges[mut], d, |_i, curedge| {
if curedge.next[k] == e {
curedge.next[k] = edge_next[k];
false
} else { true }
});
}
}
self.remove_edge_adjust_indices(e)
}
fn remove_edge_adjust_indices(&mut self, e: EdgeIndex) -> Option<E>
{
// swap_remove the edge -- only the removed edge
// and the edge swapped into place are affected and need updating
// indices.
let edge = self.edges.swap_remove(e.0).unwrap();
let swap = match self.edges.get(e.0) {
// no elment needed to be swapped.
None => return Some(edge.data),
Some(ed) => ed.node,
};
let swapped_e = EdgeIndex(self.edges.len());
// List out from A
// List in to B
for &d in DIRECTIONS.iter() {
let k = d as uint;
let node = &mut self.nodes[swap[k].0];
let fst = node.next[k];
if fst == swapped_e {
node.next[k] = e;
} else {
walk_edge_list(fst, self.edges[mut], d, |_i, curedge| {
if curedge.next[k] == swapped_e {
curedge.next[k] = e;
false
} else { true }
});
}
}
let edge_data = edge.data;
Some(edge_data)
}
/// Lookup an edge from **a** to **b**.
pub fn find_edge(&self, a: NodeIndex, b: NodeIndex) -> Option<EdgeIndex>
{
match self.nodes.get(a.0) {
None => None,
Some(node) => {
let edix = node.next[0];
while edix != EdgeEnd {
let edge = &self.edges[edix.0];
if edge.node[1] == b {
return Some(edix)
}
}
None
}
}
}
pub fn first_out_edge(&self, a: NodeIndex) -> Option<EdgeIndex>
{
match self.nodes.get(a.0) {
None => None,
Some(node) => {
let edix = node.next[0];
if edix == EdgeEnd {
None
} else { Some(edix) }
}
}
}
pub fn next_out_edge(&self, e: EdgeIndex) -> Option<EdgeIndex>
{
match self.edges.get(e.0) {
None => None,
Some(node) => {
let edix = node.next[0];
if edix == EdgeEnd {
None
} else { Some(edix) }
}
}
}
pub fn first_in_edge(&self, a: NodeIndex) -> Option<EdgeIndex>
{
match self.nodes.get(a.0) {
None => None,
Some(node) => {
let edix = node.next[1];
if edix == EdgeEnd {
None
} else { Some(edix) }
}
}
}
pub fn next_in_edge(&self, e: EdgeIndex) -> Option<EdgeIndex>
{
match self.edges.get(e.0) {
None => None,
Some(node) => {
let edix = node.next[1];
if edix == EdgeEnd {
None
} else { Some(edix) }
}
}
}
/// Return an iterator over the nodes without incoming edges
pub fn initials(&self) -> Initials<N>
{
Initials{iter: self.nodes.iter().enumerate()}
}
}
pub struct Initials<'a, N: 'a> {
iter: iter::Enumerate<slice::Iter<'a, Node<N>>>,
}
impl<'a, N: 'a> Iterator<NodeIndex> for Initials<'a, N>
{
fn next(&mut self) -> Option<NodeIndex>
{
loop {
match self.iter.next() {
None => return None,
Some((index, node)) if node.next[1] == EdgeEnd => {
return Some(NodeIndex(index))
},
_ => continue,
}
}
}
}
/// Perform a topological sort of the graph.
///
/// Return a vector of nodes in topological order: each node is ordered
/// before its successors.
///
/// If the returned vec contains less than all the nodes of the graph, then
/// the graph was cyclic.
pub fn toposort<N, E>(g: &OGraph<N, E>) -> Vec<NodeIndex>
{
let mut order = Vec::with_capacity(g.node_count());
let mut tovisit = HashSet::new();
let mut ordered = HashSet::new();
// find all initial nodes
tovisit.extend(g.initials());
// Take an unvisited element and
while let Some(&nix) = tovisit.iter().next() {
tovisit.remove(&nix);
order.push(nix);
ordered.insert(nix);
for neigh in g.neighbors(nix) {
// Look at each neighbor, and those that only have incoming edges
// from the already ordered list, they are the next to visit.
if g.in_edges(neigh).all(|(b, _)| ordered.contains(&b)) {
tovisit.insert(neigh);
}
}
}
order
}
/// Iterator over the neighbors of a node.
///
/// Iterator element type is **NodeIndex**.
pub struct Neighbors<'a, N: 'a, E: 'a> {
graph: &'a OGraph<N, E>,
next: EdgeIndex,
}
impl<'a, N, E> Iterator<NodeIndex> for Neighbors<'a, N, E>
{
fn next(&mut self) -> Option<NodeIndex>
{
match self.graph.edges.get(self.next.0) {
None => None,
Some(edge) => {
self.next = edge.next[0];
Some(edge.node[1])
}
}
}
}
pub struct Edges<'a, N: 'a, E: 'a> {
graph: &'a OGraph<N, E>,
next: EdgeIndex,
}
impl<'a, N, E> Iterator<(NodeIndex, &'a E)> for Edges<'a, N, E>
{
fn next(&mut self) -> Option<(NodeIndex, &'a E)>
{
match self.graph.edges.get(self.next.0) {
None => None,
Some(edge) => {
self.next = edge.next[0];
Some((edge.node[1], &edge.data))
}
}
}
}
pub struct EdgesBoth<'a, N: 'a, E: 'a> {
graph: &'a OGraph<N, E>,
next: [EdgeIndex, ..2],
}
impl<'a, N, E> Iterator<(NodeIndex, &'a E)> for EdgesBoth<'a, N, E>
{
fn next(&mut self) -> Option<(NodeIndex, &'a E)>
{
// First any outgoing edges
match self.graph.edges.get(self.next[0].0) {
None => {}
Some(edge) => {
self.next[0] = edge.next[0];
return Some((edge.node[1], &edge.data))
}
}
// Then incoming edges
match self.graph.edges.get(self.next[1].0) {
None => None,
Some(edge) => {
self.next[1] = edge.next[1];
Some((edge.node[0], &edge.data))
}
}
}
}
pub struct EdgesIn<'a, N: 'a, E: 'a> {
graph: &'a OGraph<N, E>,
next: EdgeIndex,
}
impl<'a, N, E> Iterator<(NodeIndex, &'a E)> for EdgesIn<'a, N, E>
{
fn next(&mut self) -> Option<(NodeIndex, &'a E)>
{
match self.graph.edges.get(self.next.0) {
None => None,
Some(edge) => {
self.next = edge.next[1];
Some((edge.node[0], &edge.data))
}
}
}
}
#[bench]
fn bench_inser(b: &mut test::Bencher) {
let mut og = OGraph::new();
let fst = og.add_node(0i);
for x in range(1, 125) {
let n = og.add_node(x);
og.add_edge(fst, n, ());
}
b.iter(|| {
og.add_node(1)
})
}
#[bench]
fn bench_remove(b: &mut test::Bencher) {
// removal is very slow in a big graph.
// and this one doesn't even have many nodes.
let mut og = OGraph::new();
let fst = og.add_node(0i);
let mut prev = fst;
for x in range(1, 1250) {
let n = og.add_node(x);
og.add_edge(prev, n, ());
prev = n;
}
//println!("{}", og);
b.iter(|| {
for _ in range(0, 100i) {
og.remove_node(fst);
}
})
}
Add an edge mut iterator
use std::hash::{Hash};
use std::collections::HashSet;
use std::fmt;
use std::slice;
use std::iter;
use test;
// FIXME: These aren't stable, so a public wrapper of node/edge indices
// should be lifetimed just like pointers.
#[deriving(Copy, Clone, Show, PartialEq, PartialOrd, Eq, Hash)]
pub struct NodeIndex(uint);
#[deriving(Copy, Clone, Show, PartialEq, PartialOrd, Eq, Hash)]
pub struct EdgeIndex(uint);
pub const EdgeEnd: EdgeIndex = EdgeIndex(::std::uint::MAX);
//const InvalidNode: NodeIndex = NodeIndex(::std::uint::MAX);
/// Index into the NodeIndex and EdgeIndex arrays
#[deriving(Copy, Clone, Show, PartialEq)]
enum Dir {
Out = 0,
In = 1
}
const DIRECTIONS: [Dir, ..2] = [Dir::Out, Dir::In];
#[deriving(Show)]
pub struct Node<N> {
pub data: N,
/// Next edge in outgoing and incoming edge lists.
next: [EdgeIndex, ..2],
}
impl<N> Node<N>
{
pub fn next_edges(&self) -> [EdgeIndex, ..2]
{
self.next
}
}
#[deriving(Show, Copy)]
pub struct Edge<E> {
pub data: E,
/// Next edge in outgoing and incoming edge lists.
next: [EdgeIndex, ..2],
/// Start and End node index
node: [NodeIndex, ..2],
}
impl<E> Edge<E>
{
pub fn next_edges(&self) -> [EdgeIndex, ..2]
{
self.next
}
pub fn source(&self) -> NodeIndex
{
self.node[0]
}
pub fn target(&self) -> NodeIndex
{
self.node[1]
}
}
/// **OGraph\<N, E\>** is a directed graph using an adjacency list representation.
///
/// The graph maintains unique indices for nodes and edges, so both node and edge
/// data may be accessed mutably.
///
/// Based upon the graph implementation in rustc.
///
/// **NodeIndex** and **EdgeIndex** are types that act as references to nodes and edges,
/// but these are only stable across certain operations. Adding to the graph keeps
/// all indices stable, but removing a node will force another node to shift its index.
///
/// Removing an edge also shifts the index of another edge.
//#[deriving(Show)]
pub struct OGraph<N, E> {
nodes: Vec<Node<N>>,
edges: Vec<Edge<E>>,
}
impl<N: fmt::Show, E: fmt::Show> fmt::Show for OGraph<N, E>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for n in self.nodes.iter() {
try!(writeln!(f, "{}", n));
}
for n in self.edges.iter() {
try!(writeln!(f, "{}", n));
}
Ok(())
}
}
pub enum Pair<'a, T: 'a> {
Both(&'a mut T, &'a mut T),
One(&'a mut T),
None,
}
pub fn index_twice<T>(slc: &mut [T], a: uint, b: uint) -> Pair<T>
{
if a == b {
slc.get_mut(a).map_or(Pair::None, Pair::One)
} else {
if a >= slc.len() || b >= slc.len() {
Pair::None
} else {
// safe because a, b are in bounds and distinct
unsafe {
let ar = &mut *(slc.unsafe_mut(a) as *mut _);
let br = &mut *(slc.unsafe_mut(b) as *mut _);
Pair::Both(ar, br)
}
}
}
}
impl<N, E> OGraph<N, E>
//where N: fmt::Show
{
/// Create a new OGraph.
pub fn new() -> OGraph<N, E>
{
OGraph{nodes: Vec::new(), edges: Vec::new()}
}
/// Return the number of nodes (vertices) in the graph.
pub fn node_count(&self) -> uint
{
self.nodes.len()
}
/// Add a node with weight **data** to the graph.
pub fn add_node(&mut self, data: N) -> NodeIndex
{
let node = Node{data: data, next: [EdgeEnd, EdgeEnd]};
let node_idx = NodeIndex(self.nodes.len());
self.nodes.push(node);
node_idx
}
/// Access node data for node **a**.
pub fn node(&self, a: NodeIndex) -> Option<&N>
{
self.nodes.get(a.0).map(|n| &n.data)
}
/// Access node data for node **a**.
pub fn node_mut(&mut self, a: NodeIndex) -> Option<&mut N>
{
self.nodes.get_mut(a.0).map(|n| &mut n.data)
}
/// Return an iterator of all neighbors that have an edge from **a** to them.
///
/// Produces an empty iterator if the node doesn't exist.
///
/// Iterator element type is **NodeIndex**.
pub fn neighbors(&self, a: NodeIndex) -> Neighbors<N, E>
{
Neighbors{
graph: self,
next: match self.nodes.get(a.0) {
None => EdgeEnd,
Some(n) => n.next[0],
}
}
}
/// Return an iterator over the neighbors of node **a**, paired with their respective edge
/// weights.
///
/// Produces an empty iterator if the node doesn't exist.
///
/// Iterator element type is **(NodeIndex, &'a E)**.
pub fn edges(&self, a: NodeIndex) -> Edges<N, E>
{
Edges{
graph: self,
next: match self.nodes.get(a.0) {
None => EdgeEnd,
Some(n) => n.next[0],
}
}
}
/// Return an iterator over the edgs from **a** to its neighbors, then *to* **a** from its
/// neighbors.
///
/// Produces an empty iterator if the node doesn't exist.
///
/// Iterator element type is **(NodeIndex, &'a E)**.
pub fn edges_both(&self, a: NodeIndex) -> EdgesBoth<N, E>
{
EdgesBoth{
graph: self,
next: match self.nodes.get(a.0) {
None => [EdgeEnd, EdgeEnd],
Some(n) => n.next,
}
}
}
/// Return an iterator over nodes that have an edge to **a**.
///
/// Produces an empty iterator if the node doesn't exist.
///
/// Iterator element type is **(NodeIndex, &'a E)**.
pub fn in_edges(&self, a: NodeIndex) -> EdgesIn<N, E>
{
EdgesIn{
graph: self,
next: match self.nodes.get(a.0) {
None => EdgeEnd,
Some(n) => n.next[1],
}
}
}
/// Add an edge from **a** to **b** to the graph, with its edge weight.
///
/// **Panics** if any of the nodes don't exist.
pub fn add_edge(&mut self, a: NodeIndex, b: NodeIndex, data: E) -> EdgeIndex
{
let edge_idx = EdgeIndex(self.edges.len());
match index_twice(self.nodes[mut], a.0, b.0) {
Pair::None => panic!("NodeIndices out of bounds"),
Pair::One(an) => {
let edge = Edge {
data: data,
node: [a, b],
next: an.next,
};
an.next[0] = edge_idx;
an.next[1] = edge_idx;
self.edges.push(edge);
}
Pair::Both(an, bn) => {
// a and b are different indices
let edge = Edge {
data: data,
node: [a, b],
next: [an.next[0], bn.next[1]],
};
an.next[0] = edge_idx;
bn.next[1] = edge_idx;
self.edges.push(edge);
}
}
edge_idx
}
/// Remove **a** from the graph if it exists, and return its data value.
/// If it doesn't exist in the graph, return **None**.
pub fn remove_node(&mut self, a: NodeIndex) -> Option<N>
{
match self.nodes.get(a.0) {
None => return None,
_ => {}
}
for d in DIRECTIONS.iter() {
let k = *d as uint;
/*
println!("Starting edge removal for k={}, node={}", k, a);
for (i, n) in self.nodes.iter().enumerate() {
println!("Node {}: Edges={}", i, n.next);
}
for (i, ed) in self.edges.iter().enumerate() {
println!("Edge {}: {}", i, ed);
}
*/
// Remove all edges from and to this node.
loop {
let next = self.nodes[a.0].next[k];
if next == EdgeEnd {
break
}
let ret = self.remove_edge(next);
debug_assert!(ret.is_some());
let _ = ret;
}
}
// Use swap_remove -- only the swapped-in node is going to change
// NodeIndex, so we only have to walk its edges and update them.
let node = match self.nodes.swap_remove(a.0) {
None => return None,
Some(node) => node,
};
// Find the edge lists of the node that had to relocate.
// It may be that no node had to relocate, then we are done already.
let swap_edges = match self.nodes.get(a.0) {
None => return Some(node.data),
Some(ed) => ed.next,
};
// The swapped element's old index
let old_index = NodeIndex(self.nodes.len());
let new_index = a;
// Adjust the starts of the out edges, and ends of the in edges.
for &d in DIRECTIONS.iter() {
let k = d as uint;
for (_, curedge) in EdgesMut::new(self.edges[mut], swap_edges[k], d) {
debug_assert!(curedge.node[k] == old_index);
curedge.node[k] = new_index;
}
}
Some(node.data)
}
pub fn edge_mut(&mut self, e: EdgeIndex) -> &mut Edge<E>
{
&mut self.edges[e.0]
}
/// Remove an edge and return its edge weight, or **None** if it didn't exist.
pub fn remove_edge(&mut self, e: EdgeIndex) -> Option<E>
{
// every edge is part of two lists,
// outgoing and incoming edges.
// Remove it from both
//debug_assert!(self.edges.get(e.0).is_some(), "No such edge: {}", e);
let (edge_node, edge_next) = match self.edges.get(e.0) {
None => return None,
Some(x) => (x.node, x.next),
};
// List out from A
// List in from B
for &d in DIRECTIONS.iter() {
let k = d as uint;
let node = match self.nodes.get_mut(edge_node[k].0) {
Some(r) => r,
None => {
debug_assert!(false, "Edge's endpoint dir={} index={} not found",
k, edge_node[k]);
return None
}
};
let fst = node.next[k];
if fst == e {
//println!("Updating first edge 0 for node {}, set to {}", edge_node[0], edge_next[0]);
node.next[k] = edge_next[k];
} else {
for (_i, curedge) in EdgesMut::new(self.edges[mut], fst, d) {
if curedge.next[k] == e {
curedge.next[k] = edge_next[k];
}
}
}
}
self.remove_edge_adjust_indices(e)
}
fn remove_edge_adjust_indices(&mut self, e: EdgeIndex) -> Option<E>
{
// swap_remove the edge -- only the removed edge
// and the edge swapped into place are affected and need updating
// indices.
let edge = self.edges.swap_remove(e.0).unwrap();
let swap = match self.edges.get(e.0) {
// no elment needed to be swapped.
None => return Some(edge.data),
Some(ed) => ed.node,
};
let swapped_e = EdgeIndex(self.edges.len());
// List out from A
// List in to B
for &d in DIRECTIONS.iter() {
let k = d as uint;
let node = &mut self.nodes[swap[k].0];
let fst = node.next[k];
if fst == swapped_e {
node.next[k] = e;
} else {
for (_i, curedge) in EdgesMut::new(self.edges[mut], fst, d) {
if curedge.next[k] == swapped_e {
curedge.next[k] = e;
}
}
}
}
let edge_data = edge.data;
Some(edge_data)
}
/// Lookup an edge from **a** to **b**.
pub fn find_edge(&self, a: NodeIndex, b: NodeIndex) -> Option<EdgeIndex>
{
match self.nodes.get(a.0) {
None => None,
Some(node) => {
let edix = node.next[0];
while edix != EdgeEnd {
let edge = &self.edges[edix.0];
if edge.node[1] == b {
return Some(edix)
}
}
None
}
}
}
pub fn first_out_edge(&self, a: NodeIndex) -> Option<EdgeIndex>
{
match self.nodes.get(a.0) {
None => None,
Some(node) => {
let edix = node.next[0];
if edix == EdgeEnd {
None
} else { Some(edix) }
}
}
}
pub fn next_out_edge(&self, e: EdgeIndex) -> Option<EdgeIndex>
{
match self.edges.get(e.0) {
None => None,
Some(node) => {
let edix = node.next[0];
if edix == EdgeEnd {
None
} else { Some(edix) }
}
}
}
pub fn first_in_edge(&self, a: NodeIndex) -> Option<EdgeIndex>
{
match self.nodes.get(a.0) {
None => None,
Some(node) => {
let edix = node.next[1];
if edix == EdgeEnd {
None
} else { Some(edix) }
}
}
}
pub fn next_in_edge(&self, e: EdgeIndex) -> Option<EdgeIndex>
{
match self.edges.get(e.0) {
None => None,
Some(node) => {
let edix = node.next[1];
if edix == EdgeEnd {
None
} else { Some(edix) }
}
}
}
/// Return an iterator over the nodes without incoming edges
pub fn initials(&self) -> Initials<N>
{
Initials{iter: self.nodes.iter().enumerate()}
}
}
pub struct Initials<'a, N: 'a> {
iter: iter::Enumerate<slice::Iter<'a, Node<N>>>,
}
impl<'a, N: 'a> Iterator<NodeIndex> for Initials<'a, N>
{
fn next(&mut self) -> Option<NodeIndex>
{
loop {
match self.iter.next() {
None => return None,
Some((index, node)) if node.next[1] == EdgeEnd => {
return Some(NodeIndex(index))
},
_ => continue,
}
}
}
}
/// Perform a topological sort of the graph.
///
/// Return a vector of nodes in topological order: each node is ordered
/// before its successors.
///
/// If the returned vec contains less than all the nodes of the graph, then
/// the graph was cyclic.
pub fn toposort<N, E>(g: &OGraph<N, E>) -> Vec<NodeIndex>
{
let mut order = Vec::with_capacity(g.node_count());
let mut tovisit = HashSet::new();
let mut ordered = HashSet::new();
// find all initial nodes
tovisit.extend(g.initials());
// Take an unvisited element and
while let Some(&nix) = tovisit.iter().next() {
tovisit.remove(&nix);
order.push(nix);
ordered.insert(nix);
for neigh in g.neighbors(nix) {
// Look at each neighbor, and those that only have incoming edges
// from the already ordered list, they are the next to visit.
if g.in_edges(neigh).all(|(b, _)| ordered.contains(&b)) {
tovisit.insert(neigh);
}
}
}
order
}
/// Iterator over the neighbors of a node.
///
/// Iterator element type is **NodeIndex**.
pub struct Neighbors<'a, N: 'a, E: 'a> {
graph: &'a OGraph<N, E>,
next: EdgeIndex,
}
impl<'a, N, E> Iterator<NodeIndex> for Neighbors<'a, N, E>
{
fn next(&mut self) -> Option<NodeIndex>
{
match self.graph.edges.get(self.next.0) {
None => None,
Some(edge) => {
self.next = edge.next[0];
Some(edge.node[1])
}
}
}
}
pub struct Edges<'a, N: 'a, E: 'a> {
graph: &'a OGraph<N, E>,
next: EdgeIndex,
}
impl<'a, N, E> Iterator<(NodeIndex, &'a E)> for Edges<'a, N, E>
{
fn next(&mut self) -> Option<(NodeIndex, &'a E)>
{
match self.graph.edges.get(self.next.0) {
None => None,
Some(edge) => {
self.next = edge.next[0];
Some((edge.node[1], &edge.data))
}
}
}
}
pub struct EdgesMut<'a, E: 'a> {
edges: &'a mut [Edge<E>],
next: EdgeIndex,
dir: Dir,
}
impl<'a, E> EdgesMut<'a, E>
{
fn new(edges: &mut [Edge<E>], next: EdgeIndex, dir: Dir) -> EdgesMut<E>
{
EdgesMut{
edges: edges,
next: next,
dir: dir
}
}
}
impl<'a, E> Iterator<(EdgeIndex, &'a mut Edge<E>)> for EdgesMut<'a, E>
{
fn next(&mut self) -> Option<(EdgeIndex, &'a mut Edge<E>)>
{
let this_index = self.next;
let k = self.dir as uint;
match self.edges.get_mut(self.next.0) {
None => None,
Some(edge) => {
self.next = edge.next[k];
// We cannot in safe rust, derive a &'a mut from &self,
// because the life of &self is shorter than 'a.
//
// We guarantee that this will not allow two pointers to the same
// edge, and use unsafe to extend the life.
//
// See http://stackoverflow.com/a/25748645/3616050
let long_life_edge = unsafe {
&mut *(edge as *mut _)
};
Some((this_index, long_life_edge))
}
}
}
}
pub struct EdgesBoth<'a, N: 'a, E: 'a> {
graph: &'a OGraph<N, E>,
next: [EdgeIndex, ..2],
}
impl<'a, N, E> Iterator<(NodeIndex, &'a E)> for EdgesBoth<'a, N, E>
{
fn next(&mut self) -> Option<(NodeIndex, &'a E)>
{
// First any outgoing edges
match self.graph.edges.get(self.next[0].0) {
None => {}
Some(edge) => {
self.next[0] = edge.next[0];
return Some((edge.node[1], &edge.data))
}
}
// Then incoming edges
match self.graph.edges.get(self.next[1].0) {
None => None,
Some(edge) => {
self.next[1] = edge.next[1];
Some((edge.node[0], &edge.data))
}
}
}
}
pub struct EdgesIn<'a, N: 'a, E: 'a> {
graph: &'a OGraph<N, E>,
next: EdgeIndex,
}
impl<'a, N, E> Iterator<(NodeIndex, &'a E)> for EdgesIn<'a, N, E>
{
fn next(&mut self) -> Option<(NodeIndex, &'a E)>
{
match self.graph.edges.get(self.next.0) {
None => None,
Some(edge) => {
self.next = edge.next[1];
Some((edge.node[0], &edge.data))
}
}
}
}
#[bench]
fn bench_inser(b: &mut test::Bencher) {
let mut og = OGraph::new();
let fst = og.add_node(0i);
for x in range(1, 125) {
let n = og.add_node(x);
og.add_edge(fst, n, ());
}
b.iter(|| {
og.add_node(1)
})
}
#[bench]
fn bench_remove(b: &mut test::Bencher) {
// removal is very slow in a big graph.
// and this one doesn't even have many nodes.
let mut og = OGraph::new();
let fst = og.add_node(0i);
let mut prev = fst;
for x in range(1, 1250) {
let n = og.add_node(x);
og.add_edge(prev, n, ());
prev = n;
}
//println!("{}", og);
b.iter(|| {
for _ in range(0, 100i) {
og.remove_node(fst);
}
})
}
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <mail@beyermatthias.de> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::collections::HashMap;
use std::ops::Drop;
use std::path::PathBuf;
use std::result::Result as RResult;
use std::sync::Arc;
use std::sync::RwLock;
use std::collections::BTreeMap;
use std::io::Read;
use std::convert::From;
use std::convert::Into;
use std::sync::Mutex;
use std::ops::Deref;
use std::ops::DerefMut;
use std::fmt::Formatter;
use std::fmt::Debug;
use std::fmt::Error as FMTError;
use toml::{Table, Value};
use regex::Regex;
use glob::glob;
use walkdir::WalkDir;
use walkdir::Iter as WalkDirIter;
use error::{ParserErrorKind, ParserError};
use error::{StoreError as SE, StoreErrorKind as SEK};
use error::MapErrInto;
use storeid::{IntoStoreId, StoreId, StoreIdIterator};
use file_abstraction::FileAbstraction;
use hook::aspect::Aspect;
use hook::error::HookErrorKind;
use hook::result::HookResult;
use hook::accessor::{ MutableHookDataAccessor,
StoreIdAccessor};
use hook::position::HookPosition;
use hook::Hook;
use libimagerror::into::IntoError;
use libimagerror::trace::trace_error;
use libimagutil::iter::FoldResult;
use libimagutil::debug_result::*;
use self::glob_store_iter::*;
/// The Result Type returned by any interaction with the store that could fail
pub type Result<T> = RResult<T, SE>;
#[derive(Debug, PartialEq)]
enum StoreEntryStatus {
Present,
Borrowed
}
/// A store entry, depending on the option type it is either borrowed currently
/// or not.
#[derive(Debug)]
struct StoreEntry {
id: StoreId,
file: FileAbstraction,
status: StoreEntryStatus,
}
pub enum StoreObject {
Id(StoreId),
Collection(PathBuf),
}
pub struct Walk {
store_path: PathBuf,
dirwalker: WalkDirIter,
}
impl Walk {
fn new(mut store_path: PathBuf, mod_name: &str) -> Walk {
let pb = store_path.clone();
store_path.push(mod_name);
Walk {
store_path: pb,
dirwalker: WalkDir::new(store_path).into_iter(),
}
}
}
impl ::std::ops::Deref for Walk {
type Target = WalkDirIter;
fn deref(&self) -> &Self::Target {
&self.dirwalker
}
}
impl Iterator for Walk {
type Item = StoreObject;
fn next(&mut self) -> Option<Self::Item> {
while let Some(something) = self.dirwalker.next() {
match something {
Ok(next) => if next.file_type().is_dir() {
return Some(StoreObject::Collection(next.path().to_path_buf()))
} else if next.file_type().is_file() {
let n = next.path().to_path_buf();
let sid = match StoreId::new(Some(self.store_path.clone()), n) {
Err(e) => {
trace_error(&e);
continue;
},
Ok(o) => o,
};
return Some(StoreObject::Id(sid))
},
Err(e) => {
warn!("Error in Walker");
debug!("{:?}", e);
return None;
}
}
}
return None;
}
}
impl StoreEntry {
fn new(id: StoreId) -> Result<StoreEntry> {
let pb = try!(id.clone().into_pathbuf());
Ok(StoreEntry {
id: id,
file: FileAbstraction::Absent(pb),
status: StoreEntryStatus::Present,
})
}
/// The entry is currently borrowed, meaning that some thread is currently
/// mutating it
fn is_borrowed(&self) -> bool {
self.status == StoreEntryStatus::Borrowed
}
fn get_entry(&mut self) -> Result<Entry> {
if !self.is_borrowed() {
let file = self.file.get_file_content();
if let Err(err) = file {
if err.err_type() == SEK::FileNotFound {
Ok(Entry::new(self.id.clone()))
} else {
Err(err)
}
} else {
// TODO:
let entry = Entry::from_reader(self.id.clone(), &mut file.unwrap());
entry
}
} else {
Err(SE::new(SEK::EntryAlreadyBorrowed, None))
}
}
fn write_entry(&mut self, entry: &Entry) -> Result<()> {
if self.is_borrowed() {
assert_eq!(self.id, entry.location);
self.file.write_file_content(entry.to_str().as_bytes())
.map_err_into(SEK::FileError)
.map(|_| ())
} else {
Ok(())
}
}
}
/// The Store itself, through this object one can interact with IMAG's entries
pub struct Store {
location: PathBuf,
/**
* Configuration object of the store
*/
configuration: Option<Value>,
/*
* Registered hooks
*/
store_unload_aspects : Arc<Mutex<Vec<Aspect>>>,
pre_create_aspects : Arc<Mutex<Vec<Aspect>>>,
post_create_aspects : Arc<Mutex<Vec<Aspect>>>,
pre_retrieve_aspects : Arc<Mutex<Vec<Aspect>>>,
post_retrieve_aspects : Arc<Mutex<Vec<Aspect>>>,
pre_update_aspects : Arc<Mutex<Vec<Aspect>>>,
post_update_aspects : Arc<Mutex<Vec<Aspect>>>,
pre_delete_aspects : Arc<Mutex<Vec<Aspect>>>,
post_delete_aspects : Arc<Mutex<Vec<Aspect>>>,
pre_move_aspects : Arc<Mutex<Vec<Aspect>>>,
post_move_aspects : Arc<Mutex<Vec<Aspect>>>,
/**
* Internal Path->File cache map
*
* Caches the files, so they remain flock()ed
*
* Could be optimized for a threadsafe HashMap
*/
entries: Arc<RwLock<HashMap<StoreId, StoreEntry>>>,
}
impl Store {
/// Create a new Store object
pub fn new(location: PathBuf, store_config: Option<Value>) -> Result<Store> {
use configuration::*;
debug!("Validating Store configuration");
let _ = try!(config_is_valid(&store_config).map_err_into(SEK::ConfigurationError));
debug!("Building new Store object");
if !location.exists() {
if !config_implicit_store_create_allowed(store_config.as_ref()) {
warn!("Implicitely creating store directory is denied");
warn!(" -> Either because configuration does not allow it");
warn!(" -> or because there is no configuration");
return Err(SEK::CreateStoreDirDenied.into_error())
.map_err_into(SEK::FileError)
.map_err_into(SEK::IoError);
}
try!(FileAbstraction::create_dir_all(&location)
.map_err_into(SEK::StorePathCreate)
.map_dbg_err_str("Failed"));
} else if location.is_file() {
debug!("Store path exists as file");
return Err(SEK::StorePathExists.into_error());
}
let store_unload_aspects = get_store_unload_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let pre_create_aspects = get_pre_create_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let post_create_aspects = get_post_create_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let pre_retrieve_aspects = get_pre_retrieve_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let post_retrieve_aspects = get_post_retrieve_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let pre_update_aspects = get_pre_update_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let post_update_aspects = get_post_update_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let pre_delete_aspects = get_pre_delete_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let post_delete_aspects = get_post_delete_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let pre_move_aspects = get_pre_move_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let post_move_aspects = get_post_move_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let store = Store {
location: location.clone(),
configuration: store_config,
store_unload_aspects : Arc::new(Mutex::new(store_unload_aspects)),
pre_create_aspects : Arc::new(Mutex::new(pre_create_aspects)),
post_create_aspects : Arc::new(Mutex::new(post_create_aspects)),
pre_retrieve_aspects : Arc::new(Mutex::new(pre_retrieve_aspects)),
post_retrieve_aspects : Arc::new(Mutex::new(post_retrieve_aspects)),
pre_update_aspects : Arc::new(Mutex::new(pre_update_aspects)),
post_update_aspects : Arc::new(Mutex::new(post_update_aspects)),
pre_delete_aspects : Arc::new(Mutex::new(pre_delete_aspects)),
post_delete_aspects : Arc::new(Mutex::new(post_delete_aspects)),
pre_move_aspects : Arc::new(Mutex::new(pre_move_aspects)),
post_move_aspects : Arc::new(Mutex::new(post_move_aspects)),
entries: Arc::new(RwLock::new(HashMap::new())),
};
debug!("Store building succeeded");
debug!("------------------------");
debug!("{:?}", store);
debug!("------------------------");
Ok(store)
}
/// Get the store configuration
pub fn config(&self) -> Option<&Value> {
self.configuration.as_ref()
}
/// Verify the store.
///
/// This function is not intended to be called by normal programs but only by `imag-store`.
#[cfg(feature = "verify")]
pub fn verify(&self) -> bool {
info!("Header | Content length | Path");
info!("-------+----------------+-----");
WalkDir::new(self.location.clone())
.into_iter()
.map(|res| {
match res {
Ok(dent) => {
if dent.file_type().is_file() {
match self.get(PathBuf::from(dent.path())) {
Ok(Some(fle)) => {
let p = fle.get_location();
let content_len = fle.get_content().len();
let header = if fle.get_header().verify().is_ok() {
"ok"
} else {
"broken"
};
info!("{: >6} | {: >14} | {:?}", header, content_len, p.deref());
},
Ok(None) => {
info!("{: >6} | {: >14} | {:?}", "?", "couldn't load", dent.path());
},
Err(e) => {
debug!("{:?}", e);
},
}
} else {
info!("{: >6} | {: >14} | {:?}", "?", "<no file>", dent.path());
}
},
Err(e) => {
debug!("{:?}", e);
},
}
true
})
.all(|b| b)
}
/// Creates the Entry at the given location (inside the entry)
pub fn create<'a, S: IntoStoreId>(&'a self, id: S) -> Result<FileLockEntry<'a>> {
let id = try!(id.into_storeid()).with_base(self.path().clone());
if let Err(e) = self.execute_hooks_for_id(self.pre_create_aspects.clone(), &id) {
return Err(e)
.map_err_into(SEK::PreHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::CreateCallError)
}
{
let mut hsmap = match self.entries.write() {
Err(_) => return Err(SEK::LockPoisoned.into_error()).map_err_into(SEK::CreateCallError),
Ok(s) => s,
};
if hsmap.contains_key(&id) {
return Err(SEK::EntryAlreadyExists.into_error()).map_err_into(SEK::CreateCallError);
}
hsmap.insert(id.clone(), {
let mut se = try!(StoreEntry::new(id.clone()));
se.status = StoreEntryStatus::Borrowed;
se
});
}
let mut fle = FileLockEntry::new(self, Entry::new(id));
self.execute_hooks_for_mut_file(self.post_create_aspects.clone(), &mut fle)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::CreateCallError)
.map(|_| fle)
}
/// Borrow a given Entry. When the `FileLockEntry` is either `update`d or
/// dropped, the new Entry is written to disk
///
/// Implicitely creates a entry in the store if there is no entry with the id `id`. For a
/// non-implicitely-create look at `Store::get`.
pub fn retrieve<'a, S: IntoStoreId>(&'a self, id: S) -> Result<FileLockEntry<'a>> {
let id = try!(id.into_storeid()).with_base(self.path().clone());
if let Err(e) = self.execute_hooks_for_id(self.pre_retrieve_aspects.clone(), &id) {
return Err(e)
.map_err_into(SEK::PreHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::RetrieveCallError)
}
let entry = try!({
self.entries
.write()
.map_err(|_| SE::new(SEK::LockPoisoned, None))
.and_then(|mut es| {
let new_se = try!(StoreEntry::new(id.clone()));
let mut se = es.entry(id.clone()).or_insert(new_se);
let entry = se.get_entry();
se.status = StoreEntryStatus::Borrowed;
entry
})
.map_err_into(SEK::RetrieveCallError)
});
let mut fle = FileLockEntry::new(self, entry);
self.execute_hooks_for_mut_file(self.post_retrieve_aspects.clone(), &mut fle)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::RetrieveCallError)
.and(Ok(fle))
}
/// Get an entry from the store if it exists.
///
/// This executes the {pre,post}_retrieve_aspects hooks.
pub fn get<'a, S: IntoStoreId + Clone>(&'a self, id: S) -> Result<Option<FileLockEntry<'a>>> {
let id = try!(id.into_storeid()).with_base(self.path().clone());
let exists = try!(self.entries
.read()
.map(|map| map.contains_key(&id))
.map_err(|_| SE::new(SEK::LockPoisoned, None))
.map_err_into(SEK::GetCallError)
);
if !exists && !id.exists() {
debug!("Does not exist in internal cache or filesystem: {:?}", id);
return Ok(None);
}
self.retrieve(id).map(Some).map_err_into(SEK::GetCallError)
}
/// Iterate over all StoreIds for one module name
pub fn retrieve_for_module(&self, mod_name: &str) -> Result<StoreIdIterator> {
let mut path = self.path().clone();
path.push(mod_name);
path.to_str()
.ok_or(SE::new(SEK::EncodingError, None))
.and_then(|path| {
let path = [ path, "/**/*" ].join("");
debug!("glob()ing with '{}'", path);
glob(&path[..]).map_err_into(SEK::GlobError)
})
.map(|paths| GlobStoreIdIterator::new(paths, self.path().clone()).into())
.map_err_into(SEK::GlobError)
.map_err_into(SEK::RetrieveForModuleCallError)
}
// Walk the store tree for the module
pub fn walk<'a>(&'a self, mod_name: &str) -> Walk {
Walk::new(self.path().clone(), mod_name)
}
/// Return the `FileLockEntry` and write to disk
pub fn update<'a>(&'a self, mut entry: FileLockEntry<'a>) -> Result<()> {
self._update(&mut entry, false).map_err_into(SEK::UpdateCallError)
}
/// Internal method to write to the filesystem store.
///
/// # Assumptions
/// This method assumes that entry is dropped _right after_ the call, hence
/// it is not public.
fn _update<'a>(&'a self, mut entry: &mut FileLockEntry<'a>, modify_presence: bool) -> Result<()> {
let _ = try!(self.execute_hooks_for_mut_file(self.pre_update_aspects.clone(), &mut entry)
.map_err_into(SEK::PreHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::UpdateCallError)
);
let mut hsmap = match self.entries.write() {
Err(_) => return Err(SE::new(SEK::LockPoisoned, None)),
Ok(e) => e,
};
let mut se = try!(hsmap.get_mut(&entry.location).ok_or(SE::new(SEK::IdNotFound, None)));
assert!(se.is_borrowed(), "Tried to update a non borrowed entry.");
debug!("Verifying Entry");
try!(entry.entry.verify());
debug!("Writing Entry");
try!(se.write_entry(&entry.entry));
if modify_presence {
se.status = StoreEntryStatus::Present;
}
self.execute_hooks_for_mut_file(self.post_update_aspects.clone(), &mut entry)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::UpdateCallError)
}
/// Retrieve a copy of a given entry, this cannot be used to mutate
/// the one on disk
pub fn retrieve_copy<S: IntoStoreId>(&self, id: S) -> Result<Entry> {
let id = try!(id.into_storeid()).with_base(self.path().clone());
let entries = match self.entries.write() {
Err(_) => {
return Err(SE::new(SEK::LockPoisoned, None))
.map_err_into(SEK::RetrieveCopyCallError);
},
Ok(e) => e,
};
// if the entry is currently modified by the user, we cannot drop it
if entries.get(&id).map(|e| e.is_borrowed()).unwrap_or(false) {
return Err(SE::new(SEK::IdLocked, None)).map_err_into(SEK::RetrieveCopyCallError);
}
try!(StoreEntry::new(id)).get_entry()
}
/// Delete an entry
pub fn delete<S: IntoStoreId>(&self, id: S) -> Result<()> {
let id = try!(id.into_storeid()).with_base(self.path().clone());
if let Err(e) = self.execute_hooks_for_id(self.pre_delete_aspects.clone(), &id) {
return Err(e)
.map_err_into(SEK::PreHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::DeleteCallError)
}
{
let mut entries = match self.entries.write() {
Err(_) => return Err(SE::new(SEK::LockPoisoned, None))
.map_err_into(SEK::DeleteCallError),
Ok(e) => e,
};
// if the entry is currently modified by the user, we cannot drop it
match entries.get(&id) {
None => {
return Err(SEK::FileNotFound.into_error()).map_err_into(SEK::DeleteCallError)
},
Some(e) => if e.is_borrowed() {
return Err(SE::new(SEK::IdLocked, None)).map_err_into(SEK::DeleteCallError)
}
}
// remove the entry first, then the file
entries.remove(&id);
let pb = try!(id.clone().with_base(self.path().clone()).into_pathbuf());
if let Err(e) = FileAbstraction::remove_file(&pb) {
return Err(SEK::FileError.into_error_with_cause(Box::new(e)))
.map_err_into(SEK::DeleteCallError);
}
}
self.execute_hooks_for_id(self.post_delete_aspects.clone(), &id)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::DeleteCallError)
}
/// Save a copy of the Entry in another place
/// Executes the post_move_aspects for the new id
pub fn save_to(&self, entry: &FileLockEntry, new_id: StoreId) -> Result<()> {
self.save_to_other_location(entry, new_id, false)
}
/// Save an Entry in another place
/// Removes the original entry
/// Executes the post_move_aspects for the new id
pub fn save_as(&self, entry: FileLockEntry, new_id: StoreId) -> Result<()> {
self.save_to_other_location(&entry, new_id, true)
}
fn save_to_other_location(&self, entry: &FileLockEntry, new_id: StoreId, remove_old: bool)
-> Result<()>
{
let new_id = new_id.with_base(self.path().clone());
let hsmap = self.entries.write();
if hsmap.is_err() {
return Err(SE::new(SEK::LockPoisoned, None)).map_err_into(SEK::MoveCallError)
}
if hsmap.unwrap().contains_key(&new_id) {
return Err(SE::new(SEK::EntryAlreadyExists, None)).map_err_into(SEK::MoveCallError)
}
let old_id = entry.get_location().clone();
let old_id_as_path = try!(old_id.clone().with_base(self.path().clone()).into_pathbuf());
let new_id_as_path = try!(new_id.clone().with_base(self.path().clone()).into_pathbuf());
FileAbstraction::copy(&old_id_as_path, &new_id_as_path)
.and_then(|_| {
if remove_old {
FileAbstraction::remove_file(&old_id_as_path)
} else {
Ok(())
}
})
.map_err_into(SEK::FileError)
.and_then(|_| self.execute_hooks_for_id(self.post_move_aspects.clone(), &new_id)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError))
.map_err_into(SEK::MoveCallError)
}
/// Move an entry without loading
///
/// This function moves an entry from one path to another.
///
/// Generally, this function shouldn't be used by library authors, if they "just" want to move
/// something around. A library for moving entries while caring about meta-data and links.
///
/// # Errors
///
/// This function returns an error in certain cases:
///
/// * If pre-move-hooks error (if they return an error which indicates that the action should be
/// aborted)
/// * If the about-to-be-moved entry is borrowed
/// * If the lock on the internal data structure cannot be aquired
/// * If the new path already exists
/// * If the about-to-be-moved entry does not exist
/// * If the FS-operation failed
/// * If the post-move-hooks error (though the operation has succeeded then).
///
/// # Warnings
///
/// This should be used with _great_ care, as moving an entry from `a` to `b` might result in
/// dangling links (see below).
///
/// ## Moving linked entries
///
/// If the entry which is moved is linked to another entry, these links get invalid (but we do
/// not detect this here). As links are always two-way-links, so `a` is not only linked to `b`,
/// but also the other way round, moving `b` to `c` results in the following scenario:
///
/// * `a` links to `b`, which does not exist anymore.
/// * `c` links to `a`, which does exist.
///
/// So the link is _partly dangling_, so to say.
///
pub fn move_by_id(&self, old_id: StoreId, new_id: StoreId) -> Result<()> {
let new_id = new_id.with_base(self.path().clone());
let old_id = old_id.with_base(self.path().clone());
if let Err(e) = self.execute_hooks_for_id(self.pre_move_aspects.clone(), &old_id) {
return Err(e)
.map_err_into(SEK::PreHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::MoveByIdCallError)
}
{
let mut hsmap = match self.entries.write() {
Err(_) => return Err(SE::new(SEK::LockPoisoned, None)),
Ok(m) => m,
};
if hsmap.contains_key(&new_id) {
return Err(SEK::EntryAlreadyExists.into_error());
}
// if we do not have an entry here, we fail in `FileAbstraction::rename()` below.
// if we have one, but it is borrowed, we really should not rename it, as this might
// lead to strange errors
if hsmap.get(&old_id).map(|e| e.is_borrowed()).unwrap_or(false) {
return Err(SEK::EntryAlreadyBorrowed.into_error());
}
let old_id_pb = try!(old_id.clone().with_base(self.path().clone()).into_pathbuf());
let new_id_pb = try!(new_id.clone().with_base(self.path().clone()).into_pathbuf());
match FileAbstraction::rename(&old_id_pb, &new_id_pb) {
Err(e) => return Err(SEK::EntryRenameError.into_error_with_cause(Box::new(e))),
Ok(_) => {
debug!("Rename worked on filesystem");
// assert enforced through check hsmap.contains_key(&new_id) above.
// Should therefor never fail
assert!(hsmap
.remove(&old_id)
.and_then(|mut entry| {
entry.id = new_id.clone();
hsmap.insert(new_id.clone(), entry)
}).is_none())
}
}
}
self.execute_hooks_for_id(self.pre_move_aspects.clone(), &new_id)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::MoveByIdCallError)
}
/// Gets the path where this store is on the disk
pub fn path(&self) -> &PathBuf {
&self.location
}
pub fn register_hook(&mut self,
position: HookPosition,
aspect_name: &str,
mut h: Box<Hook>)
-> Result<()>
{
debug!("Registering hook: {:?}", h);
debug!(" in position: {:?}", position);
debug!(" with aspect: {:?}", aspect_name);
let guard = match position {
HookPosition::StoreUnload => self.store_unload_aspects.clone(),
HookPosition::PreCreate => self.pre_create_aspects.clone(),
HookPosition::PostCreate => self.post_create_aspects.clone(),
HookPosition::PreRetrieve => self.pre_retrieve_aspects.clone(),
HookPosition::PostRetrieve => self.post_retrieve_aspects.clone(),
HookPosition::PreUpdate => self.pre_update_aspects.clone(),
HookPosition::PostUpdate => self.post_update_aspects.clone(),
HookPosition::PreDelete => self.pre_delete_aspects.clone(),
HookPosition::PostDelete => self.post_delete_aspects.clone(),
};
let mut guard = match guard.deref().lock().map_err(|_| SE::new(SEK::LockError, None)) {
Err(e) => return Err(SEK::HookRegisterError.into_error_with_cause(Box::new(e))),
Ok(g) => g,
};
for mut aspect in guard.deref_mut() {
if aspect.name().clone() == aspect_name.clone() {
debug!("Trying to find configuration for hook: {:?}", h);
self.get_config_for_hook(h.name()).map(|config| h.set_config(config));
debug!("Trying to register hook in aspect: {:?} <- {:?}", aspect, h);
aspect.register_hook(h);
return Ok(());
}
}
let annfe = SEK::AspectNameNotFoundError.into_error();
Err(SEK::HookRegisterError.into_error_with_cause(Box::new(annfe)))
}
fn get_config_for_hook(&self, name: &str) -> Option<&Value> {
match self.configuration {
Some(Value::Table(ref tabl)) => {
debug!("Trying to head 'hooks' section from {:?}", tabl);
tabl.get("hooks")
.map(|hook_section| {
debug!("Found hook section: {:?}", hook_section);
debug!("Reading section key: {:?}", name);
match *hook_section {
Value::Table(ref tabl) => tabl.get(name),
_ => None
}
})
.unwrap_or(None)
},
_ => None,
}
}
fn execute_hooks_for_id(&self,
aspects: Arc<Mutex<Vec<Aspect>>>,
id: &StoreId)
-> HookResult<()>
{
match aspects.lock() {
Err(_) => return Err(HookErrorKind::HookExecutionError.into()),
Ok(g) => g
}.iter().fold_defresult(|aspect| {
debug!("[Aspect][exec]: {:?}", aspect);
(aspect as &StoreIdAccessor).access(id)
}).map_err(Box::new)
.map_err(|e| HookErrorKind::HookExecutionError.into_error_with_cause(e))
}
fn execute_hooks_for_mut_file(&self,
aspects: Arc<Mutex<Vec<Aspect>>>,
fle: &mut FileLockEntry)
-> HookResult<()>
{
match aspects.lock() {
Err(_) => return Err(HookErrorKind::HookExecutionError.into()),
Ok(g) => g
}.iter().fold_defresult(|aspect| {
debug!("[Aspect][exec]: {:?}", aspect);
aspect.access_mut(fle)
}).map_err(Box::new)
.map_err(|e| HookErrorKind::HookExecutionError.into_error_with_cause(e))
}
}
impl Debug for Store {
fn fmt(&self, fmt: &mut Formatter) -> RResult<(), FMTError> {
try!(write!(fmt, " --- Store ---\n"));
try!(write!(fmt, "\n"));
try!(write!(fmt, " - location : {:?}\n", self.location));
try!(write!(fmt, " - configuration : {:?}\n", self.configuration));
try!(write!(fmt, " - pre_create_aspects : {:?}\n", self.pre_create_aspects ));
try!(write!(fmt, " - post_create_aspects : {:?}\n", self.post_create_aspects ));
try!(write!(fmt, " - pre_retrieve_aspects : {:?}\n", self.pre_retrieve_aspects ));
try!(write!(fmt, " - post_retrieve_aspects : {:?}\n", self.post_retrieve_aspects ));
try!(write!(fmt, " - pre_update_aspects : {:?}\n", self.pre_update_aspects ));
try!(write!(fmt, " - post_update_aspects : {:?}\n", self.post_update_aspects ));
try!(write!(fmt, " - pre_delete_aspects : {:?}\n", self.pre_delete_aspects ));
try!(write!(fmt, " - post_delete_aspects : {:?}\n", self.post_delete_aspects ));
try!(write!(fmt, "\n"));
try!(write!(fmt, "Entries:\n"));
try!(write!(fmt, "{:?}", self.entries));
try!(write!(fmt, "\n"));
Ok(())
}
}
impl Drop for Store {
/**
* Unlock all files on drop
*
* TODO: Unlock them
*/
fn drop(&mut self) {
match StoreId::new(Some(self.location.clone()), PathBuf::from(".")) {
Err(e) => {
trace_error(&e);
warn!("Cannot construct StoreId for Store to execute hooks!");
warn!("Will close Store without executing hooks!");
},
Ok(store_id) => {
if let Err(e) = self.execute_hooks_for_id(self.store_unload_aspects.clone(), &store_id) {
debug!("Store-load hooks execution failed. Cannot create store object.");
warn!("Store Unload Hook error: {:?}", e);
}
},
};
debug!("Dropping store");
}
}
/// A struct that allows you to borrow an Entry
pub struct FileLockEntry<'a> {
store: &'a Store,
entry: Entry,
}
impl<'a> FileLockEntry<'a, > {
fn new(store: &'a Store, entry: Entry) -> FileLockEntry<'a> {
FileLockEntry {
store: store,
entry: entry,
}
}
}
impl<'a> Debug for FileLockEntry<'a> {
fn fmt(&self, fmt: &mut Formatter) -> RResult<(), FMTError> {
write!(fmt, "FileLockEntry(Store = {})", self.store.location.to_str()
.unwrap_or("Unknown Path"))
}
}
impl<'a> Deref for FileLockEntry<'a> {
type Target = Entry;
fn deref(&self) -> &Self::Target {
&self.entry
}
}
impl<'a> DerefMut for FileLockEntry<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.entry
}
}
#[cfg(not(test))]
impl<'a> Drop for FileLockEntry<'a> {
/// This will silently ignore errors, use `Store::update` if you want to catch the errors
fn drop(&mut self) {
let _ = self.store._update(self, true);
}
}
#[cfg(test)]
impl<'a> Drop for FileLockEntry<'a> {
/// This will not silently ignore errors but prints the result of the _update() call for testing
fn drop(&mut self) {
let _ = self.store._update(self, true).map_err(|e| trace_error(&e));
}
}
/// `EntryContent` type
pub type EntryContent = String;
/// `EntryHeader`
///
/// This is basically a wrapper around `toml::Table` which provides convenience to the user of the
/// library.
#[derive(Debug, Clone)]
pub struct EntryHeader {
header: Value,
}
pub type EntryResult<V> = RResult<V, ParserError>;
#[derive(Debug, Clone, PartialEq, Eq)]
enum Token {
Key(String),
Index(usize),
}
/**
* Wrapper type around file header (TOML) object
*/
impl EntryHeader {
pub fn new() -> EntryHeader {
EntryHeader {
header: build_default_header()
}
}
pub fn header(&self) -> &Value {
&self.header
}
fn from_table(t: Table) -> EntryHeader {
EntryHeader {
header: Value::Table(t)
}
}
pub fn parse(s: &str) -> EntryResult<EntryHeader> {
use toml::Parser;
let mut parser = Parser::new(s);
parser.parse()
.ok_or(ParserErrorKind::TOMLParserErrors.into())
.and_then(verify_header_consistency)
.map(EntryHeader::from_table)
}
pub fn verify(&self) -> Result<()> {
match self.header {
Value::Table(ref t) => verify_header(&t),
_ => Err(SE::new(SEK::HeaderTypeFailure, None)),
}
}
/**
* Insert a header field by a string-spec
*
* ```ignore
* insert("something.in.a.field", Boolean(true));
* ```
*
* If an array field was accessed which is _out of bounds_ of the array available, the element
* is appended to the array.
*
* Inserts a Boolean in the section "something" -> "in" -> "a" -> "field"
* A JSON equivalent would be
*
* {
* something: {
* in: {
* a: {
* field: true
* }
* }
* }
* }
*
* Returns true if header field was set, false if there is already a value
*/
pub fn insert(&mut self, spec: &str, v: Value) -> Result<bool> {
self.insert_with_sep(spec, '.', v)
}
pub fn insert_with_sep(&mut self, spec: &str, sep: char, v: Value) -> Result<bool> {
let tokens = match EntryHeader::tokenize(spec, sep) {
Err(e) => return Err(e),
Ok(t) => t
};
let destination = match tokens.iter().last() {
None => return Err(SE::new(SEK::HeaderPathSyntaxError, None)),
Some(d) => d,
};
let path_to_dest = tokens[..(tokens.len() - 1)].into(); // N - 1 tokens
// walk N-1 tokens
let value = match EntryHeader::walk_header(&mut self.header, path_to_dest) {
Err(e) => return Err(e),
Ok(v) => v
};
// There is already an value at this place
if EntryHeader::extract(value, destination).is_ok() {
return Ok(false);
}
match *destination {
Token::Key(ref s) => { // if the destination shall be an map key
match *value {
/*
* Put it in there if we have a map
*/
Value::Table(ref mut t) => {
t.insert(s.clone(), v);
}
/*
* Fail if there is no map here
*/
_ => return Err(SE::new(SEK::HeaderPathTypeFailure, None)),
}
},
Token::Index(i) => { // if the destination shall be an array
match *value {
/*
* Put it in there if we have an array
*/
Value::Array(ref mut a) => {
a.push(v); // push to the end of the array
// if the index is inside the array, we swap-remove the element at this
// index
if a.len() < i {
a.swap_remove(i);
}
},
/*
* Fail if there is no array here
*/
_ => return Err(SE::new(SEK::HeaderPathTypeFailure, None)),
}
},
}
Ok(true)
}
/**
* Set a header field by a string-spec
*
* ```ignore
* set("something.in.a.field", Boolean(true));
* ```
*
* Sets a Boolean in the section "something" -> "in" -> "a" -> "field"
* A JSON equivalent would be
*
* {
* something: {
* in: {
* a: {
* field: true
* }
* }
* }
* }
*
* If there is already a value at this place, this value will be overridden and the old value
* will be returned
*/
pub fn set(&mut self, spec: &str, v: Value) -> Result<Option<Value>> {
self.set_with_sep(spec, '.', v)
}
pub fn set_with_sep(&mut self, spec: &str, sep: char, v: Value) -> Result<Option<Value>> {
let tokens = match EntryHeader::tokenize(spec, sep) {
Err(e) => return Err(e),
Ok(t) => t,
};
debug!("tokens = {:?}", tokens);
let destination = match tokens.iter().last() {
None => return Err(SE::new(SEK::HeaderPathSyntaxError, None)),
Some(d) => d
};
debug!("destination = {:?}", destination);
let path_to_dest = tokens[..(tokens.len() - 1)].into(); // N - 1 tokens
// walk N-1 tokens
let value = match EntryHeader::walk_header(&mut self.header, path_to_dest) {
Err(e) => return Err(e),
Ok(v) => v
};
debug!("walked value = {:?}", value);
match *destination {
Token::Key(ref s) => { // if the destination shall be an map key->value
match *value {
/*
* Put it in there if we have a map
*/
Value::Table(ref mut t) => {
debug!("Matched Key->Table");
return Ok(t.insert(s.clone(), v));
}
/*
* Fail if there is no map here
*/
_ => {
debug!("Matched Key->NON-Table");
return Err(SE::new(SEK::HeaderPathTypeFailure, None));
}
}
},
Token::Index(i) => { // if the destination shall be an array
match *value {
/*
* Put it in there if we have an array
*/
Value::Array(ref mut a) => {
debug!("Matched Index->Array");
a.push(v); // push to the end of the array
// if the index is inside the array, we swap-remove the element at this
// index
if a.len() > i {
debug!("Swap-Removing in Array {:?}[{:?}] <- {:?}", a, i, a[a.len()-1]);
return Ok(Some(a.swap_remove(i)));
}
debug!("Appended");
return Ok(None);
},
/*
* Fail if there is no array here
*/
_ => {
debug!("Matched Index->NON-Array");
return Err(SE::new(SEK::HeaderPathTypeFailure, None));
},
}
},
}
Ok(None)
}
/**
* Read a header field by a string-spec
*
* ```ignore
* let value = read("something.in.a.field");
* ```
*
* Reads a Value in the section "something" -> "in" -> "a" -> "field"
* A JSON equivalent would be
*
* {
* something: {
* in: {
* a: {
* field: true
* }
* }
* }
* }
*
* If there is no a value at this place, None will be returned. This also holds true for Arrays
* which are accessed at an index which is not yet there, even if the accessed index is much
* larger than the array length.
*/
pub fn read(&self, spec: &str) -> Result<Option<Value>> {
self.read_with_sep(spec, '.')
}
pub fn read_with_sep(&self, spec: &str, splitchr: char) -> Result<Option<Value>> {
let tokens = match EntryHeader::tokenize(spec, splitchr) {
Err(e) => return Err(e),
Ok(t) => t,
};
let mut header_clone = self.header.clone(); // we clone as READing is simpler this way
// walk N-1 tokens
match EntryHeader::walk_header(&mut header_clone, tokens) {
Err(e) => match e.err_type() {
// We cannot find the header key, as there is no path to it
SEK::HeaderKeyNotFound => Ok(None),
_ => Err(e),
},
Ok(v) => Ok(Some(v.clone())),
}
}
pub fn delete(&mut self, spec: &str) -> Result<Option<Value>> {
let tokens = match EntryHeader::tokenize(spec, '.') {
Err(e) => return Err(e),
Ok(t) => t
};
let destination = match tokens.iter().last() {
None => return Err(SE::new(SEK::HeaderPathSyntaxError, None)),
Some(d) => d
};
debug!("destination = {:?}", destination);
let path_to_dest = tokens[..(tokens.len() - 1)].into(); // N - 1 tokens
// walk N-1 tokens
let mut value = match EntryHeader::walk_header(&mut self.header, path_to_dest) {
Err(e) => return Err(e),
Ok(v) => v
};
debug!("walked value = {:?}", value);
match *destination {
Token::Key(ref s) => { // if the destination shall be an map key->value
match *value {
Value::Table(ref mut t) => {
debug!("Matched Key->Table, removing {:?}", s);
return Ok(t.remove(s));
},
_ => {
debug!("Matched Key->NON-Table");
return Err(SE::new(SEK::HeaderPathTypeFailure, None));
}
}
},
Token::Index(i) => { // if the destination shall be an array
match *value {
Value::Array(ref mut a) => {
// if the index is inside the array, we swap-remove the element at this
// index
if a.len() > i {
debug!("Removing in Array {:?}[{:?}]", a, i);
return Ok(Some(a.remove(i)));
} else {
return Ok(None);
}
},
_ => {
debug!("Matched Index->NON-Array");
return Err(SE::new(SEK::HeaderPathTypeFailure, None));
},
}
},
}
Ok(None)
}
fn tokenize(spec: &str, splitchr: char) -> Result<Vec<Token>> {
use std::str::FromStr;
spec.split(splitchr)
.map(|s| {
usize::from_str(s)
.map(Token::Index)
.or_else(|_| Ok(Token::Key(String::from(s))))
})
.collect()
}
fn walk_header(v: &mut Value, tokens: Vec<Token>) -> Result<&mut Value> {
use std::vec::IntoIter;
fn walk_iter<'a>(v: Result<&'a mut Value>, i: &mut IntoIter<Token>) -> Result<&'a mut Value> {
let next = i.next();
v.and_then(move |value| {
if let Some(token) = next {
walk_iter(EntryHeader::extract(value, &token), i)
} else {
Ok(value)
}
})
}
walk_iter(Ok(v), &mut tokens.into_iter())
}
fn extract_from_table<'a>(v: &'a mut Value, s: &str) -> Result<&'a mut Value> {
match *v {
Value::Table(ref mut t) => {
t.get_mut(&s[..])
.ok_or(SE::new(SEK::HeaderKeyNotFound, None))
},
_ => Err(SE::new(SEK::HeaderPathTypeFailure, None)),
}
}
fn extract_from_array(v: &mut Value, i: usize) -> Result<&mut Value> {
match *v {
Value::Array(ref mut a) => {
if a.len() < i {
Err(SE::new(SEK::HeaderKeyNotFound, None))
} else {
Ok(&mut a[i])
}
},
_ => Err(SE::new(SEK::HeaderPathTypeFailure, None)),
}
}
fn extract<'a>(v: &'a mut Value, token: &Token) -> Result<&'a mut Value> {
match *token {
Token::Key(ref s) => EntryHeader::extract_from_table(v, s),
Token::Index(i) => EntryHeader::extract_from_array(v, i),
}
}
}
impl Into<Table> for EntryHeader {
fn into(self) -> Table {
match self.header {
Value::Table(t) => t,
_ => panic!("EntryHeader is not a table!"),
}
}
}
impl From<Table> for EntryHeader {
fn from(t: Table) -> EntryHeader {
EntryHeader { header: Value::Table(t) }
}
}
fn build_default_header() -> Value { // BTreeMap<String, Value>
let mut m = BTreeMap::new();
m.insert(String::from("imag"), {
let mut imag_map = BTreeMap::<String, Value>::new();
imag_map.insert(String::from("version"), Value::String(String::from(version!())));
imag_map.insert(String::from("links"), Value::Array(vec![]));
Value::Table(imag_map)
});
Value::Table(m)
}
fn verify_header(t: &Table) -> Result<()> {
if !has_main_section(t) {
Err(SE::from(ParserErrorKind::MissingMainSection.into_error()))
} else if !has_imag_version_in_main_section(t) {
Err(SE::from(ParserErrorKind::MissingVersionInfo.into_error()))
} else if !has_only_tables(t) {
debug!("Could not verify that it only has tables in its base table");
Err(SE::from(ParserErrorKind::NonTableInBaseTable.into_error()))
} else {
Ok(())
}
}
fn verify_header_consistency(t: Table) -> EntryResult<Table> {
verify_header(&t)
.map_err(Box::new)
.map_err(|e| ParserErrorKind::HeaderInconsistency.into_error_with_cause(e))
.map(|_| t)
}
fn has_only_tables(t: &Table) -> bool {
debug!("Verifying that table has only tables");
t.iter().all(|(_, x)| if let Value::Table(_) = *x { true } else { false })
}
fn has_main_section(t: &Table) -> bool {
t.contains_key("imag") &&
match t.get("imag") {
Some(&Value::Table(_)) => true,
Some(_) => false,
None => false,
}
}
fn has_imag_version_in_main_section(t: &Table) -> bool {
use semver::Version;
match *t.get("imag").unwrap() {
Value::Table(ref sec) => {
sec.get("version")
.and_then(|v| {
match *v {
Value::String(ref s) => Some(Version::parse(&s[..]).is_ok()),
_ => Some(false),
}
})
.unwrap_or(false)
}
_ => false,
}
}
/**
* An Entry of the store
*
* Contains location, header and content part.
*/
#[derive(Debug, Clone)]
pub struct Entry {
location: StoreId,
header: EntryHeader,
content: EntryContent,
}
impl Entry {
pub fn new(loc: StoreId) -> Entry {
Entry {
location: loc,
header: EntryHeader::new(),
content: EntryContent::new()
}
}
pub fn from_reader<S: IntoStoreId>(loc: S, file: &mut Read) -> Result<Entry> {
let text = {
let mut s = String::new();
try!(file.read_to_string(&mut s));
s
};
Self::from_str(loc, &text[..])
}
pub fn from_str<S: IntoStoreId>(loc: S, s: &str) -> Result<Entry> {
debug!("Building entry from string");
lazy_static! {
static ref RE: Regex = Regex::new(r"(?smx)
^---$
(?P<header>.*) # Header
^---$\n
(?P<content>.*) # Content
").unwrap();
}
let matches = match RE.captures(s) {
None => return Err(SE::new(SEK::MalformedEntry, None)),
Some(s) => s,
};
let header = match matches.name("header") {
None => return Err(SE::new(SEK::MalformedEntry, None)),
Some(s) => s
};
let content = matches.name("content").unwrap_or("");
debug!("Header and content found. Yay! Building Entry object now");
Ok(Entry {
location: try!(loc.into_storeid()),
header: try!(EntryHeader::parse(header)),
content: content.into(),
})
}
pub fn to_str(&self) -> String {
format!("---\n{header}---\n{content}",
header = ::toml::encode_str(&self.header.header),
content = self.content)
}
pub fn get_location(&self) -> &StoreId {
&self.location
}
pub fn get_header(&self) -> &EntryHeader {
&self.header
}
pub fn get_header_mut(&mut self) -> &mut EntryHeader {
&mut self.header
}
pub fn get_content(&self) -> &EntryContent {
&self.content
}
pub fn get_content_mut(&mut self) -> &mut EntryContent {
&mut self.content
}
pub fn verify(&self) -> Result<()> {
self.header.verify()
}
}
mod glob_store_iter {
use std::fmt::{Debug, Formatter};
use std::fmt::Error as FmtError;
use std::path::PathBuf;
use glob::Paths;
use storeid::StoreId;
use storeid::StoreIdIterator;
use error::StoreErrorKind as SEK;
use error::MapErrInto;
use libimagerror::trace::trace_error;
pub struct GlobStoreIdIterator {
store_path: PathBuf,
paths: Paths,
}
impl Debug for GlobStoreIdIterator {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), FmtError> {
write!(fmt, "GlobStoreIdIterator")
}
}
impl Into<StoreIdIterator> for GlobStoreIdIterator {
fn into(self) -> StoreIdIterator {
StoreIdIterator::new(Box::new(self))
}
}
impl GlobStoreIdIterator {
pub fn new(paths: Paths, store_path: PathBuf) -> GlobStoreIdIterator {
debug!("Create a GlobStoreIdIterator(store_path = {:?}, /* ... */)", store_path);
GlobStoreIdIterator {
store_path: store_path,
paths: paths,
}
}
}
impl Iterator for GlobStoreIdIterator {
type Item = StoreId;
fn next(&mut self) -> Option<StoreId> {
self.paths
.next()
.and_then(|o| {
debug!("GlobStoreIdIterator::next() => {:?}", o);
o.map_err_into(SEK::StoreIdHandlingError)
.and_then(|p| StoreId::from_full_path(&self.store_path, p))
.map_err(|e| {
debug!("GlobStoreIdIterator error: {:?}", e);
trace_error(&e);
}).ok()
})
}
}
}
#[cfg(test)]
mod test {
extern crate env_logger;
use std::collections::BTreeMap;
use super::EntryHeader;
use super::Token;
use storeid::StoreId;
use toml::Value;
#[test]
fn test_imag_section() {
use super::has_main_section;
let mut map = BTreeMap::new();
map.insert("imag".into(), Value::Table(BTreeMap::new()));
assert!(has_main_section(&map));
}
#[test]
fn test_imag_invalid_section_type() {
use super::has_main_section;
let mut map = BTreeMap::new();
map.insert("imag".into(), Value::Boolean(false));
assert!(!has_main_section(&map));
}
#[test]
fn test_imag_abscent_main_section() {
use super::has_main_section;
let mut map = BTreeMap::new();
map.insert("not_imag".into(), Value::Boolean(false));
assert!(!has_main_section(&map));
}
#[test]
fn test_main_section_without_version() {
use super::has_imag_version_in_main_section;
let mut map = BTreeMap::new();
map.insert("imag".into(), Value::Table(BTreeMap::new()));
assert!(!has_imag_version_in_main_section(&map));
}
#[test]
fn test_main_section_with_version() {
use super::has_imag_version_in_main_section;
let mut map = BTreeMap::new();
let mut sub = BTreeMap::new();
sub.insert("version".into(), Value::String("0.0.0".into()));
map.insert("imag".into(), Value::Table(sub));
assert!(has_imag_version_in_main_section(&map));
}
#[test]
fn test_main_section_with_version_in_wrong_type() {
use super::has_imag_version_in_main_section;
let mut map = BTreeMap::new();
let mut sub = BTreeMap::new();
sub.insert("version".into(), Value::Boolean(false));
map.insert("imag".into(), Value::Table(sub));
assert!(!has_imag_version_in_main_section(&map));
}
#[test]
fn test_verification_good() {
use super::verify_header_consistency;
let mut header = BTreeMap::new();
let sub = {
let mut sub = BTreeMap::new();
sub.insert("version".into(), Value::String(String::from("0.0.0")));
Value::Table(sub)
};
header.insert("imag".into(), sub);
assert!(verify_header_consistency(header).is_ok());
}
#[test]
fn test_verification_invalid_versionstring() {
use super::verify_header_consistency;
let mut header = BTreeMap::new();
let sub = {
let mut sub = BTreeMap::new();
sub.insert("version".into(), Value::String(String::from("000")));
Value::Table(sub)
};
header.insert("imag".into(), sub);
assert!(!verify_header_consistency(header).is_ok());
}
#[test]
fn test_verification_current_version() {
use super::verify_header_consistency;
let mut header = BTreeMap::new();
let sub = {
let mut sub = BTreeMap::new();
sub.insert("version".into(), Value::String(String::from(version!())));
Value::Table(sub)
};
header.insert("imag".into(), sub);
assert!(verify_header_consistency(header).is_ok());
}
static TEST_ENTRY : &'static str = "---
[imag]
version = \"0.0.3\"
---
Hai";
#[test]
fn test_entry_from_str() {
use super::Entry;
use std::path::PathBuf;
println!("{}", TEST_ENTRY);
let entry = Entry::from_str(StoreId::new_baseless(PathBuf::from("test/foo~1.3")).unwrap(),
TEST_ENTRY).unwrap();
assert_eq!(entry.content, "Hai");
}
#[test]
fn test_entry_to_str() {
use super::Entry;
use std::path::PathBuf;
println!("{}", TEST_ENTRY);
let entry = Entry::from_str(StoreId::new_baseless(PathBuf::from("test/foo~1.3")).unwrap(),
TEST_ENTRY).unwrap();
let string = entry.to_str();
assert_eq!(TEST_ENTRY, string);
}
#[test]
fn test_walk_header_simple() {
let tokens = EntryHeader::tokenize("a", '.').unwrap();
assert!(tokens.len() == 1, "1 token was expected, {} were parsed", tokens.len());
assert!(tokens.iter().next().unwrap() == &Token::Key(String::from("a")),
"'a' token was expected, {:?} was parsed", tokens.iter().next());
let mut header = BTreeMap::new();
header.insert(String::from("a"), Value::Integer(1));
let mut v_header = Value::Table(header);
let res = EntryHeader::walk_header(&mut v_header, tokens);
assert_eq!(&mut Value::Integer(1), res.unwrap());
}
#[test]
fn test_walk_header_with_array() {
let tokens = EntryHeader::tokenize("a.0", '.').unwrap();
assert!(tokens.len() == 2, "2 token was expected, {} were parsed", tokens.len());
assert!(tokens.iter().next().unwrap() == &Token::Key(String::from("a")),
"'a' token was expected, {:?} was parsed", tokens.iter().next());
let mut header = BTreeMap::new();
let ary = Value::Array(vec![Value::Integer(1)]);
header.insert(String::from("a"), ary);
let mut v_header = Value::Table(header);
let res = EntryHeader::walk_header(&mut v_header, tokens);
assert_eq!(&mut Value::Integer(1), res.unwrap());
}
#[test]
fn test_walk_header_extract_array() {
let tokens = EntryHeader::tokenize("a", '.').unwrap();
assert!(tokens.len() == 1, "1 token was expected, {} were parsed", tokens.len());
assert!(tokens.iter().next().unwrap() == &Token::Key(String::from("a")),
"'a' token was expected, {:?} was parsed", tokens.iter().next());
let mut header = BTreeMap::new();
let ary = Value::Array(vec![Value::Integer(1)]);
header.insert(String::from("a"), ary);
let mut v_header = Value::Table(header);
let res = EntryHeader::walk_header(&mut v_header, tokens);
assert_eq!(&mut Value::Array(vec![Value::Integer(1)]), res.unwrap());
}
/**
* Creates a big testing header.
*
* JSON equivalent:
*
* ```json
* {
* "a": {
* "array": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
* },
* "b": {
* "array": [ "string1", "string2", "string3", "string4" ]
* },
* "c": {
* "array": [ 1, "string2", 3, "string4" ]
* },
* "d": {
* "array": [
* {
* "d1": 1
* },
* {
* "d2": 2
* },
* {
* "d3": 3
* },
* ],
*
* "something": "else",
*
* "and": {
* "something": {
* "totally": "different"
* }
* }
* }
* }
* ```
*
* The sections "a", "b", "c", "d" are created in the respective helper functions
* create_header_section_a, create_header_section_b, create_header_section_c and
* create_header_section_d.
*
* These functions can also be used for testing.
*
*/
fn create_header() -> Value {
let a = create_header_section_a();
let b = create_header_section_b();
let c = create_header_section_c();
let d = create_header_section_d();
let mut header = BTreeMap::new();
header.insert(String::from("a"), a);
header.insert(String::from("b"), b);
header.insert(String::from("c"), c);
header.insert(String::from("d"), d);
Value::Table(header)
}
fn create_header_section_a() -> Value {
// 0..10 is exclusive 10
let a_ary = Value::Array((0..10).map(|x| Value::Integer(x)).collect());
let mut a_obj = BTreeMap::new();
a_obj.insert(String::from("array"), a_ary);
Value::Table(a_obj)
}
fn create_header_section_b() -> Value {
let b_ary = Value::Array((0..9)
.map(|x| Value::String(format!("string{}", x)))
.collect());
let mut b_obj = BTreeMap::new();
b_obj.insert(String::from("array"), b_ary);
Value::Table(b_obj)
}
fn create_header_section_c() -> Value {
let c_ary = Value::Array(
vec![
Value::Integer(1),
Value::String(String::from("string2")),
Value::Integer(3),
Value::String(String::from("string4"))
]);
let mut c_obj = BTreeMap::new();
c_obj.insert(String::from("array"), c_ary);
Value::Table(c_obj)
}
fn create_header_section_d() -> Value {
let d_ary = Value::Array(
vec![
{
let mut tab = BTreeMap::new();
tab.insert(String::from("d1"), Value::Integer(1));
tab
},
{
let mut tab = BTreeMap::new();
tab.insert(String::from("d2"), Value::Integer(2));
tab
},
{
let mut tab = BTreeMap::new();
tab.insert(String::from("d3"), Value::Integer(3));
tab
},
].into_iter().map(Value::Table).collect());
let and_obj = Value::Table({
let mut tab = BTreeMap::new();
let something_tab = Value::Table({
let mut tab = BTreeMap::new();
tab.insert(String::from("totally"), Value::String(String::from("different")));
tab
});
tab.insert(String::from("something"), something_tab);
tab
});
let mut d_obj = BTreeMap::new();
d_obj.insert(String::from("array"), d_ary);
d_obj.insert(String::from("something"), Value::String(String::from("else")));
d_obj.insert(String::from("and"), and_obj);
Value::Table(d_obj)
}
#[test]
fn test_walk_header_big_a() {
test_walk_header_extract_section("a", &create_header_section_a());
}
#[test]
fn test_walk_header_big_b() {
test_walk_header_extract_section("b", &create_header_section_b());
}
#[test]
fn test_walk_header_big_c() {
test_walk_header_extract_section("c", &create_header_section_c());
}
#[test]
fn test_walk_header_big_d() {
test_walk_header_extract_section("d", &create_header_section_d());
}
fn test_walk_header_extract_section(secname: &str, expected: &Value) {
let tokens = EntryHeader::tokenize(secname, '.').unwrap();
assert!(tokens.len() == 1, "1 token was expected, {} were parsed", tokens.len());
assert!(tokens.iter().next().unwrap() == &Token::Key(String::from(secname)),
"'{}' token was expected, {:?} was parsed", secname, tokens.iter().next());
let mut header = create_header();
let res = EntryHeader::walk_header(&mut header, tokens);
assert_eq!(expected, res.unwrap());
}
#[test]
fn test_walk_header_extract_numbers() {
test_extract_number("a", 0, 0);
test_extract_number("a", 1, 1);
test_extract_number("a", 2, 2);
test_extract_number("a", 3, 3);
test_extract_number("a", 4, 4);
test_extract_number("a", 5, 5);
test_extract_number("a", 6, 6);
test_extract_number("a", 7, 7);
test_extract_number("a", 8, 8);
test_extract_number("a", 9, 9);
test_extract_number("c", 0, 1);
test_extract_number("c", 2, 3);
}
fn test_extract_number(sec: &str, idx: usize, exp: i64) {
let tokens = EntryHeader::tokenize(&format!("{}.array.{}", sec, idx)[..], '.').unwrap();
assert!(tokens.len() == 3, "3 token was expected, {} were parsed", tokens.len());
{
let mut iter = tokens.iter();
let tok = iter.next().unwrap();
let exp = Token::Key(String::from(sec));
assert!(tok == &exp, "'{}' token was expected, {:?} was parsed", sec, tok);
let tok = iter.next().unwrap();
let exp = Token::Key(String::from("array"));
assert!(tok == &exp, "'array' token was expected, {:?} was parsed", tok);
let tok = iter.next().unwrap();
let exp = Token::Index(idx);
assert!(tok == &exp, "'{}' token was expected, {:?} was parsed", idx, tok);
}
let mut header = create_header();
let res = EntryHeader::walk_header(&mut header, tokens);
assert_eq!(&mut Value::Integer(exp), res.unwrap());
}
#[test]
fn test_header_read() {
let v = create_header();
let h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
assert!(if let Ok(Some(Value::Table(_))) = h.read("a") { true } else { false });
assert!(if let Ok(Some(Value::Array(_))) = h.read("a.array") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.array.1") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.array.9") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.read("c") { true } else { false });
assert!(if let Ok(Some(Value::Array(_))) = h.read("c.array") { true } else { false });
assert!(if let Ok(Some(Value::String(_))) = h.read("c.array.1") { true } else { false });
assert!(if let Ok(None) = h.read("c.array.9") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("d.array.0.d1") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.0.d2") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.0.d3") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.1.d1") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("d.array.1.d2") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.1.d3") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.2.d1") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.2.d2") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("d.array.2.d3") { true } else { false });
assert!(if let Ok(Some(Value::String(_))) = h.read("d.something") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.read("d.and") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.read("d.and.something") { true } else { false });
assert!(if let Ok(Some(Value::String(_))) = h.read("d.and.something.totally") { true } else { false });
}
#[test]
fn test_header_set_override() {
let _ = env_logger::init();
let v = create_header();
let mut h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
println!("Testing index 0");
assert_eq!(h.read("a.array.0").unwrap().unwrap(), Value::Integer(0));
println!("Altering index 0");
assert_eq!(h.set("a.array.0", Value::Integer(42)).unwrap().unwrap(), Value::Integer(0));
println!("Values now: {:?}", h);
println!("Testing all indexes");
assert_eq!(h.read("a.array.0").unwrap().unwrap(), Value::Integer(42));
assert_eq!(h.read("a.array.1").unwrap().unwrap(), Value::Integer(1));
assert_eq!(h.read("a.array.2").unwrap().unwrap(), Value::Integer(2));
assert_eq!(h.read("a.array.3").unwrap().unwrap(), Value::Integer(3));
assert_eq!(h.read("a.array.4").unwrap().unwrap(), Value::Integer(4));
assert_eq!(h.read("a.array.5").unwrap().unwrap(), Value::Integer(5));
assert_eq!(h.read("a.array.6").unwrap().unwrap(), Value::Integer(6));
assert_eq!(h.read("a.array.7").unwrap().unwrap(), Value::Integer(7));
assert_eq!(h.read("a.array.8").unwrap().unwrap(), Value::Integer(8));
assert_eq!(h.read("a.array.9").unwrap().unwrap(), Value::Integer(9));
}
#[test]
fn test_header_set_new() {
let _ = env_logger::init();
let v = create_header();
let mut h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
assert!(h.read("a.foo").is_ok());
assert!(h.read("a.foo").unwrap().is_none());
{
let v = h.set("a.foo", Value::Integer(42));
assert!(v.is_ok());
assert!(v.unwrap().is_none());
assert!(if let Ok(Some(Value::Table(_))) = h.read("a") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.foo") { true } else { false });
}
{
let v = h.set("new", Value::Table(BTreeMap::new()));
assert!(v.is_ok());
assert!(v.unwrap().is_none());
let v = h.set("new.subset", Value::Table(BTreeMap::new()));
assert!(v.is_ok());
assert!(v.unwrap().is_none());
let v = h.set("new.subset.dest", Value::Integer(1337));
assert!(v.is_ok());
assert!(v.unwrap().is_none());
assert!(if let Ok(Some(Value::Table(_))) = h.read("new") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.read("new.subset") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("new.subset.dest") { true } else { false });
}
}
#[test]
fn test_header_insert_override() {
let _ = env_logger::init();
let v = create_header();
let mut h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
println!("Testing index 0");
assert_eq!(h.read("a.array.0").unwrap().unwrap(), Value::Integer(0));
println!("Altering index 0");
assert_eq!(h.insert("a.array.0", Value::Integer(42)).unwrap(), false);
println!("...should have failed");
println!("Testing all indexes");
assert_eq!(h.read("a.array.0").unwrap().unwrap(), Value::Integer(0));
assert_eq!(h.read("a.array.1").unwrap().unwrap(), Value::Integer(1));
assert_eq!(h.read("a.array.2").unwrap().unwrap(), Value::Integer(2));
assert_eq!(h.read("a.array.3").unwrap().unwrap(), Value::Integer(3));
assert_eq!(h.read("a.array.4").unwrap().unwrap(), Value::Integer(4));
assert_eq!(h.read("a.array.5").unwrap().unwrap(), Value::Integer(5));
assert_eq!(h.read("a.array.6").unwrap().unwrap(), Value::Integer(6));
assert_eq!(h.read("a.array.7").unwrap().unwrap(), Value::Integer(7));
assert_eq!(h.read("a.array.8").unwrap().unwrap(), Value::Integer(8));
assert_eq!(h.read("a.array.9").unwrap().unwrap(), Value::Integer(9));
}
#[test]
fn test_header_insert_new() {
let _ = env_logger::init();
let v = create_header();
let mut h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
assert!(h.read("a.foo").is_ok());
assert!(h.read("a.foo").unwrap().is_none());
{
let v = h.insert("a.foo", Value::Integer(42));
assert!(v.is_ok());
assert_eq!(v.unwrap(), true);
assert!(if let Ok(Some(Value::Table(_))) = h.read("a") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.foo") { true } else { false });
}
{
let v = h.insert("new", Value::Table(BTreeMap::new()));
assert!(v.is_ok());
assert_eq!(v.unwrap(), true);
let v = h.insert("new.subset", Value::Table(BTreeMap::new()));
assert!(v.is_ok());
assert_eq!(v.unwrap(), true);
let v = h.insert("new.subset.dest", Value::Integer(1337));
assert!(v.is_ok());
assert_eq!(v.unwrap(), true);
assert!(if let Ok(Some(Value::Table(_))) = h.read("new") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.read("new.subset") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("new.subset.dest") { true } else { false });
}
}
#[test]
fn test_header_delete() {
let _ = env_logger::init();
let v = create_header();
let mut h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
assert!(if let Ok(Some(Value::Table(_))) = h.read("a") { true } else { false });
assert!(if let Ok(Some(Value::Array(_))) = h.read("a.array") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.array.1") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.array.9") { true } else { false });
assert!(if let Ok(Some(Value::Integer(1))) = h.delete("a.array.1") { true } else { false });
assert!(if let Ok(Some(Value::Integer(9))) = h.delete("a.array.8") { true } else { false });
assert!(if let Ok(Some(Value::Array(_))) = h.delete("a.array") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.delete("a") { true } else { false });
}
}
#[cfg(test)]
mod store_tests {
use std::path::PathBuf;
use super::Store;
pub fn get_store() -> Store {
Store::new(PathBuf::from("/"), None).unwrap()
}
#[test]
fn test_store_instantiation() {
let store = get_store();
assert_eq!(store.location, PathBuf::from("/"));
assert!(store.entries.read().unwrap().is_empty());
assert!(store.store_unload_aspects.lock().unwrap().is_empty());
assert!(store.pre_create_aspects.lock().unwrap().is_empty());
assert!(store.post_create_aspects.lock().unwrap().is_empty());
assert!(store.pre_retrieve_aspects.lock().unwrap().is_empty());
assert!(store.post_retrieve_aspects.lock().unwrap().is_empty());
assert!(store.pre_update_aspects.lock().unwrap().is_empty());
assert!(store.post_update_aspects.lock().unwrap().is_empty());
assert!(store.pre_delete_aspects.lock().unwrap().is_empty());
assert!(store.post_delete_aspects.lock().unwrap().is_empty());
assert!(store.pre_move_aspects.lock().unwrap().is_empty());
assert!(store.post_move_aspects.lock().unwrap().is_empty());
}
#[test]
fn test_store_create() {
let store = get_store();
for n in 1..100 {
let s = format!("test-{}", n);
let entry = store.create(PathBuf::from(s.clone())).unwrap();
assert!(entry.verify().is_ok());
let loc = entry.get_location().clone().into_pathbuf().unwrap();
assert!(loc.starts_with("/"));
assert!(loc.ends_with(s));
}
}
#[test]
fn test_store_get_create_get_delete_get() {
let store = get_store();
for n in 1..100 {
let res = store.get(PathBuf::from(format!("test-{}", n)));
assert!(match res { Ok(None) => true, _ => false, })
}
for n in 1..100 {
let s = format!("test-{}", n);
let entry = store.create(PathBuf::from(s.clone())).unwrap();
assert!(entry.verify().is_ok());
let loc = entry.get_location().clone().into_pathbuf().unwrap();
assert!(loc.starts_with("/"));
assert!(loc.ends_with(s));
}
for n in 1..100 {
let res = store.get(PathBuf::from(format!("test-{}", n)));
assert!(match res { Ok(Some(_)) => true, _ => false, })
}
for n in 1..100 {
assert!(store.delete(PathBuf::from(format!("test-{}", n))).is_ok())
}
for n in 1..100 {
let res = store.get(PathBuf::from(format!("test-{}", n)));
assert!(match res { Ok(None) => true, _ => false, })
}
}
#[test]
fn test_store_create_twice() {
use error::StoreErrorKind as SEK;
let store = get_store();
for n in 1..100 {
let s = format!("test-{}", n % 50);
store.create(PathBuf::from(s.clone()))
.map_err(|e| assert!(is_match!(e.err_type(), SEK::CreateCallError) && n >= 50))
.ok()
.map(|entry| {
assert!(entry.verify().is_ok());
let loc = entry.get_location().clone().into_pathbuf().unwrap();
assert!(loc.starts_with("/"));
assert!(loc.ends_with(s));
});
}
}
#[test]
fn test_store_create_in_hm() {
use storeid::StoreId;
let store = get_store();
for n in 1..100 {
let pb = StoreId::new_baseless(PathBuf::from(format!("test-{}", n))).unwrap();
assert!(store.entries.read().unwrap().get(&pb).is_none());
assert!(store.create(pb.clone()).is_ok());
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
}
}
#[test]
fn test_store_retrieve_in_hm() {
use storeid::StoreId;
let store = get_store();
for n in 1..100 {
let pb = StoreId::new_baseless(PathBuf::from(format!("test-{}", n))).unwrap();
assert!(store.entries.read().unwrap().get(&pb).is_none());
assert!(store.retrieve(pb.clone()).is_ok());
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
}
}
#[test]
fn test_get_none() {
let store = get_store();
for n in 1..100 {
match store.get(PathBuf::from(format!("test-{}", n))) {
Ok(None) => assert!(true),
_ => assert!(false),
}
}
}
#[test]
fn test_delete_none() {
let store = get_store();
for n in 1..100 {
match store.delete(PathBuf::from(format!("test-{}", n))) {
Err(_) => assert!(true),
_ => assert!(false),
}
}
}
// Disabled because we cannot test this by now, as we rely on glob() in
// Store::retieve_for_module(), which accesses the filesystem and tests run in-memory, so there
// are no files on the filesystem in this test after Store::create().
//
// #[test]
// fn test_retrieve_for_module() {
// let pathes = vec![
// "foo/1", "foo/2", "foo/3", "foo/4", "foo/5",
// "bar/1", "bar/2", "bar/3", "bar/4", "bar/5",
// "bla/1", "bla/2", "bla/3", "bla/4", "bla/5",
// "boo/1", "boo/2", "boo/3", "boo/4", "boo/5",
// "glu/1", "glu/2", "glu/3", "glu/4", "glu/5",
// ];
// fn test(store: &Store, modulename: &str) {
// use std::path::Component;
// use storeid::StoreId;
// let retrieved = store.retrieve_for_module(modulename);
// assert!(retrieved.is_ok());
// let v : Vec<StoreId> = retrieved.unwrap().collect();
// println!("v = {:?}", v);
// assert!(v.len() == 5);
// let retrieved = store.retrieve_for_module(modulename);
// assert!(retrieved.is_ok());
// assert!(retrieved.unwrap().all(|e| {
// let first = e.components().next();
// assert!(first.is_some());
// match first.unwrap() {
// Component::Normal(s) => s == modulename,
// _ => false,
// }
// }))
// }
// let store = get_store();
// for path in pathes {
// assert!(store.create(PathBuf::from(path)).is_ok());
// }
// test(&store, "foo");
// test(&store, "bar");
// test(&store, "bla");
// test(&store, "boo");
// test(&store, "glu");
// }
#[test]
fn test_store_move_moves_in_hm() {
use storeid::StoreId;
let store = get_store();
for n in 1..100 {
if n % 2 == 0 { // every second
let id = StoreId::new_baseless(PathBuf::from(format!("t-{}", n))).unwrap();
let id_mv = StoreId::new_baseless(PathBuf::from(format!("t-{}", n - 1))).unwrap();
{
assert!(store.entries.read().unwrap().get(&id).is_none());
}
{
assert!(store.create(id.clone()).is_ok());
}
{
let id_with_base = id.clone().with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&id_with_base).is_some());
}
let r = store.move_by_id(id.clone(), id_mv.clone());
assert!(r.map_err(|e| println!("ERROR: {:?}", e)).is_ok());
{
let id_mv_with_base = id_mv.clone().with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&id_mv_with_base).is_some());
}
assert!(match store.get(id.clone()) { Ok(None) => true, _ => false },
"Moved id ({:?}) is still there", id);
assert!(match store.get(id_mv.clone()) { Ok(Some(_)) => true, _ => false },
"New id ({:?}) is not in store...", id_mv);
}
}
}
}
#[cfg(test)]
mod store_hook_tests {
mod test_hook {
use hook::Hook;
use hook::accessor::HookDataAccessor;
use hook::accessor::HookDataAccessorProvider;
use hook::position::HookPosition;
use self::accessor::TestHookAccessor as DHA;
use toml::Value;
#[derive(Debug)]
pub struct TestHook {
position: HookPosition,
accessor: DHA,
}
impl TestHook {
pub fn new(pos: HookPosition, succeed: bool, error_aborting: bool) -> TestHook {
TestHook { position: pos.clone(), accessor: DHA::new(pos, succeed, error_aborting) }
}
}
impl Hook for TestHook {
fn name(&self) -> &'static str { "testhook_succeeding" }
fn set_config(&mut self, _: &Value) { }
}
impl HookDataAccessorProvider for TestHook {
fn accessor(&self) -> HookDataAccessor {
use hook::position::HookPosition as HP;
use hook::accessor::HookDataAccessor as HDA;
match self.position {
HP::StoreUnload |
HP::PreCreate |
HP::PreRetrieve |
HP::PreDelete |
HP::PostDelete => HDA::StoreIdAccess(&self.accessor),
HP::PostCreate |
HP::PostRetrieve |
HP::PreUpdate |
HP::PostUpdate => HDA::MutableAccess(&self.accessor),
}
}
}
pub mod accessor {
use hook::result::HookResult;
use hook::accessor::MutableHookDataAccessor;
use hook::accessor::NonMutableHookDataAccessor;
use hook::accessor::StoreIdAccessor;
use hook::position::HookPosition;
use store::FileLockEntry;
use storeid::StoreId;
use hook::error::HookErrorKind as HEK;
use hook::error::CustomData;
use libimagerror::into::IntoError;
#[derive(Debug)]
pub struct TestHookAccessor {
pos: HookPosition,
succeed: bool,
error_aborting: bool
}
impl TestHookAccessor {
pub fn new(position: HookPosition, succeed: bool, error_aborting: bool)
-> TestHookAccessor
{
TestHookAccessor {
pos: position,
succeed: succeed,
error_aborting: error_aborting,
}
}
}
fn get_result(succeed: bool, abort: bool) -> HookResult<()> {
println!("Generting result: succeed = {}, abort = {}", succeed, abort);
if succeed {
println!("Generating result: Ok(())");
Ok(())
} else {
if abort {
println!("Generating result: Err(_), aborting");
Err(HEK::HookExecutionError.into_error())
} else {
println!("Generating result: Err(_), not aborting");
let custom = CustomData::default().aborting(false);
Err(HEK::HookExecutionError.into_error().with_custom_data(custom))
}
}
}
impl StoreIdAccessor for TestHookAccessor {
fn access(&self, id: &StoreId) -> HookResult<()> {
get_result(self.succeed, self.error_aborting)
}
}
impl MutableHookDataAccessor for TestHookAccessor {
fn access_mut(&self, fle: &mut FileLockEntry) -> HookResult<()> {
get_result(self.succeed, self.error_aborting)
}
}
impl NonMutableHookDataAccessor for TestHookAccessor {
fn access(&self, fle: &FileLockEntry) -> HookResult<()> {
get_result(self.succeed, self.error_aborting)
}
}
}
}
use std::path::PathBuf;
use hook::position::HookPosition as HP;
use storeid::StoreId;
use store::Store;
use self::test_hook::TestHook;
fn get_store_with_config() -> Store {
use toml::Parser;
let cfg = Parser::new(mini_config()).parse().unwrap();
println!("Config parsed: {:?}", cfg);
Store::new(PathBuf::from("/"), Some(cfg.get("store").cloned().unwrap())).unwrap()
}
fn mini_config() -> &'static str {
r#"
[store]
store-unload-hook-aspects = [ "test" ]
pre-create-hook-aspects = [ "test" ]
post-create-hook-aspects = [ "test" ]
pre-move-hook-aspects = [ "test" ]
post-move-hook-aspects = [ "test" ]
pre-retrieve-hook-aspects = [ "test" ]
post-retrieve-hook-aspects = [ "test" ]
pre-update-hook-aspects = [ "test" ]
post-update-hook-aspects = [ "test" ]
pre-delete-hook-aspects = [ "test" ]
post-delete-hook-aspects = [ "test" ]
[store.aspects.test]
parallel = false
mutable_hooks = true
[store.hooks.testhook_succeeding]
aspect = "test"
"#
}
fn test_hook_execution(hook_positions: &[HP], storeid_name: &str) {
let mut store = get_store_with_config();
let pos = HP::PreCreate;
let hook = TestHook::new(pos.clone(), true, false);
println!("Registering hooks...");
for pos in hook_positions {
let hook = TestHook::new(pos.clone(), true, false);
println!("\tRegistering: {:?}", pos);
assert!(store.register_hook(pos.clone(), "test", Box::new(hook))
.map_err(|e| println!("{:?}", e))
.is_ok()
);
}
println!("... done.");
let pb = StoreId::new_baseless(PathBuf::from(storeid_name)).unwrap();
let pb_moved = StoreId::new_baseless(PathBuf::from(format!("{}-moved", storeid_name))).unwrap();
println!("Creating {:?}", pb);
assert!(store.create(pb.clone()).is_ok());
{
println!("Getting {:?} -> Some?", pb);
assert!(match store.get(pb.clone()) {
Ok(Some(_)) => true,
_ => false,
});
}
{
println!("Getting {:?} -> None?", pb_moved);
assert!(match store.get(pb_moved.clone()) {
Ok(None) => true,
_ => false,
});
}
{
println!("Moving {:?} -> {:?}", pb, pb_moved);
assert!(store.move_by_id(pb.clone(), pb_moved.clone()).map_err(|e| println!("ERROR MOVING: {:?}", e)).is_ok());
}
{
println!("Getting {:?} -> None", pb);
assert!(match store.get(pb.clone()) {
Ok(None) => true,
_ => false,
});
}
{
println!("Getting {:?} -> Some", pb_moved);
assert!(match store.get(pb_moved.clone()) {
Ok(Some(_)) => true,
_ => false,
});
}
{
println!("Getting {:?} -> Some -> updating", pb_moved);
assert!(match store.get(pb_moved.clone()).map_err(|e| println!("ERROR GETTING: {:?}", e)) {
Ok(Some(fle)) => store.update(fle).map_err(|e| println!("ERROR UPDATING: {:?}", e)).is_ok(),
_ => false,
});
}
println!("Deleting {:?}", pb_moved);
assert!(store.delete(pb_moved).is_ok());
}
#[test]
fn test_storeunload() {
test_hook_execution(&[HP::StoreUnload], "test_storeunload");
}
#[test]
fn test_precreate() {
test_hook_execution(&[HP::PreCreate], "test_precreate");
}
#[test]
fn test_postcreate() {
test_hook_execution(&[HP::PostCreate], "test_postcreate");
}
#[test]
fn test_preretrieve() {
test_hook_execution(&[HP::PreRetrieve], "test_preretrieve");
}
#[test]
fn test_postretrieve() {
test_hook_execution(&[HP::PostRetrieve], "test_postretrieve");
}
#[test]
fn test_preupdate() {
test_hook_execution(&[HP::PreUpdate], "test_preupdate");
}
#[test]
fn test_postupdate() {
test_hook_execution(&[HP::PostUpdate], "test_postupdate");
}
#[test]
fn test_predelete() {
test_hook_execution(&[HP::PreDelete], "test_predelete");
}
#[test]
fn test_postdelete() {
test_hook_execution(&[HP::PostDelete], "test_postdelete");
}
#[test]
fn test_multiple_same_position() {
let positions = [ HP::StoreUnload, HP::PreCreate, HP::PostCreate, HP::PreRetrieve,
HP::PostRetrieve, HP::PreUpdate, HP::PostUpdate, HP::PreDelete, HP::PostDelete ];
for position in positions.iter() {
for n in 2..10 {
let mut v = Vec::with_capacity(n);
for x in 0..n { v.push(position.clone()); }
test_hook_execution(&v, "test_multiple_same_position");
}
}
}
fn get_store_with_aborting_hook_at_pos(pos: HP) -> Store {
let mut store = get_store_with_config();
let hook = TestHook::new(pos.clone(), false, true);
assert!(store.register_hook(pos, "test", Box::new(hook)).map_err(|e| println!("{:?}", e)).is_ok());
store
}
fn default_test_id() -> StoreId {
StoreId::new_baseless(PathBuf::from("test")).unwrap()
}
#[test]
fn test_pre_create_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_create_error")).unwrap();
let store = get_store_with_aborting_hook_at_pos(HP::PreCreate);
assert!(store.create(storeid).is_err());
}
#[test]
fn test_pre_retrieve_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_retrieve_error")).unwrap();
let store = get_store_with_aborting_hook_at_pos(HP::PreRetrieve);
assert!(store.retrieve(storeid).is_err());
}
#[test]
fn test_pre_delete_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_delete_error")).unwrap();
let store = get_store_with_aborting_hook_at_pos(HP::PreDelete);
assert!(store.delete(storeid).is_err());
}
#[test]
fn test_pre_update_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_update_error")).unwrap();
let store = get_store_with_aborting_hook_at_pos(HP::PreUpdate);
let fle = store.create(storeid).unwrap();
assert!(store.update(fle).is_err());
}
#[test]
fn test_post_create_error() {
let store = get_store_with_aborting_hook_at_pos(HP::PostCreate);
let pb = StoreId::new_baseless(PathBuf::from("test_post_create_error")).unwrap();
assert!(store.create(pb.clone()).is_err());
// But the entry exists, as the hook fails post-create
assert!(store.entries.read().unwrap().get(&pb.with_base(store.path().clone())).is_some());
}
#[test]
fn test_post_retrieve_error() {
let store = get_store_with_aborting_hook_at_pos(HP::PostRetrieve);
let pb = StoreId::new_baseless(PathBuf::from("test_post_retrieve_error")).unwrap();
assert!(store.retrieve(pb.clone()).is_err());
// But the entry exists, as the hook fails post-retrieve
assert!(store.entries.read().unwrap().get(&pb.with_base(store.path().clone())).is_some());
}
#[test]
fn test_post_delete_error() {
let store = get_store_with_aborting_hook_at_pos(HP::PostDelete);
let pb = StoreId::new_baseless(PathBuf::from("test_post_delete_error")).unwrap();
assert!(store.create(pb.clone()).is_ok());
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
assert!(store.delete(pb.clone()).is_err());
// But the entry is removed, as we fail post-delete
assert!(store.entries.read().unwrap().get(&pb).is_none());
}
#[test]
fn test_post_update_error() {
let store = get_store_with_aborting_hook_at_pos(HP::PostUpdate);
let pb = StoreId::new_baseless(PathBuf::from("test_post_update_error")).unwrap();
let fle = store.create(pb.clone()).unwrap();
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
assert!(store.update(fle).is_err());
}
fn get_store_with_allowed_error_hook_at_pos(pos: HP) -> Store {
let mut store = get_store_with_config();
let hook = TestHook::new(pos.clone(), false, false);
assert!(store.register_hook(pos, "test", Box::new(hook)).map_err(|e| println!("{:?}", e)).is_ok());
store
}
#[test]
fn test_pre_create_allowed_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_create_allowed_error")).unwrap();
let store = get_store_with_allowed_error_hook_at_pos(HP::PreCreate);
assert!(store.create(storeid).is_ok());
}
#[test]
fn test_pre_retrieve_allowed_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_retrieve_allowed_error")).unwrap();
let store = get_store_with_allowed_error_hook_at_pos(HP::PreRetrieve);
assert!(store.retrieve(storeid).is_ok());
}
#[test]
fn test_pre_delete_allowed_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_delete_allowed_error")).unwrap();
let store = get_store_with_allowed_error_hook_at_pos(HP::PreDelete);
assert!(store.retrieve(storeid.clone()).is_ok());
assert!(store.delete(storeid).map_err(|e| println!("{:?}", e)).is_ok());
}
#[test]
fn test_pre_update_allowed_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_update_allowed_error")).unwrap();
let store = get_store_with_allowed_error_hook_at_pos(HP::PreUpdate);
let fle = store.create(storeid).unwrap();
assert!(store.update(fle).is_ok());
}
#[test]
fn test_post_create_allowed_error() {
let store = get_store_with_allowed_error_hook_at_pos(HP::PostCreate);
let pb = StoreId::new_baseless(PathBuf::from("test_pre_create_allowed_error")).unwrap();
assert!(store.create(pb.clone()).is_ok());
// But the entry exists, as the hook fails post-create
assert!(store.entries.read().unwrap().get(&pb.with_base(store.path().clone())).is_some());
}
#[test]
fn test_post_retrieve_allowed_error() {
let store = get_store_with_allowed_error_hook_at_pos(HP::PostRetrieve);
let pb = StoreId::new_baseless(PathBuf::from("test_pre_retrieve_allowed_error")).unwrap();
assert!(store.retrieve(pb.clone()).is_ok());
// But the entry exists, as the hook fails post-retrieve
assert!(store.entries.read().unwrap().get(&pb.with_base(store.path().clone())).is_some());
}
#[test]
fn test_post_delete_allowed_error() {
let store = get_store_with_allowed_error_hook_at_pos(HP::PostDelete);
let pb = StoreId::new_baseless(PathBuf::from("test_pre_delete_allowed_error")).unwrap();
assert!(store.create(pb.clone()).is_ok());
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
assert!(store.delete(pb.clone()).is_ok());
// But the entry is removed, as we fail post-delete
assert!(store.entries.read().unwrap().get(&pb).is_none());
}
#[test]
fn test_post_update_allowed_error() {
let store = get_store_with_allowed_error_hook_at_pos(HP::PostUpdate);
let pb = StoreId::new_baseless(PathBuf::from("test_pre_update_allowed_error")).unwrap();
let fle = store.create(pb.clone()).unwrap();
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
assert!(store.update(fle).is_ok());
}
}
Simplify hashmap fetching and error construction
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <mail@beyermatthias.de> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::collections::HashMap;
use std::ops::Drop;
use std::path::PathBuf;
use std::result::Result as RResult;
use std::sync::Arc;
use std::sync::RwLock;
use std::collections::BTreeMap;
use std::io::Read;
use std::convert::From;
use std::convert::Into;
use std::sync::Mutex;
use std::ops::Deref;
use std::ops::DerefMut;
use std::fmt::Formatter;
use std::fmt::Debug;
use std::fmt::Error as FMTError;
use toml::{Table, Value};
use regex::Regex;
use glob::glob;
use walkdir::WalkDir;
use walkdir::Iter as WalkDirIter;
use error::{ParserErrorKind, ParserError};
use error::{StoreError as SE, StoreErrorKind as SEK};
use error::MapErrInto;
use storeid::{IntoStoreId, StoreId, StoreIdIterator};
use file_abstraction::FileAbstraction;
use hook::aspect::Aspect;
use hook::error::HookErrorKind;
use hook::result::HookResult;
use hook::accessor::{ MutableHookDataAccessor,
StoreIdAccessor};
use hook::position::HookPosition;
use hook::Hook;
use libimagerror::into::IntoError;
use libimagerror::trace::trace_error;
use libimagutil::iter::FoldResult;
use libimagutil::debug_result::*;
use self::glob_store_iter::*;
/// The Result Type returned by any interaction with the store that could fail
pub type Result<T> = RResult<T, SE>;
#[derive(Debug, PartialEq)]
enum StoreEntryStatus {
Present,
Borrowed
}
/// A store entry, depending on the option type it is either borrowed currently
/// or not.
#[derive(Debug)]
struct StoreEntry {
id: StoreId,
file: FileAbstraction,
status: StoreEntryStatus,
}
pub enum StoreObject {
Id(StoreId),
Collection(PathBuf),
}
pub struct Walk {
store_path: PathBuf,
dirwalker: WalkDirIter,
}
impl Walk {
fn new(mut store_path: PathBuf, mod_name: &str) -> Walk {
let pb = store_path.clone();
store_path.push(mod_name);
Walk {
store_path: pb,
dirwalker: WalkDir::new(store_path).into_iter(),
}
}
}
impl ::std::ops::Deref for Walk {
type Target = WalkDirIter;
fn deref(&self) -> &Self::Target {
&self.dirwalker
}
}
impl Iterator for Walk {
type Item = StoreObject;
fn next(&mut self) -> Option<Self::Item> {
while let Some(something) = self.dirwalker.next() {
match something {
Ok(next) => if next.file_type().is_dir() {
return Some(StoreObject::Collection(next.path().to_path_buf()))
} else if next.file_type().is_file() {
let n = next.path().to_path_buf();
let sid = match StoreId::new(Some(self.store_path.clone()), n) {
Err(e) => {
trace_error(&e);
continue;
},
Ok(o) => o,
};
return Some(StoreObject::Id(sid))
},
Err(e) => {
warn!("Error in Walker");
debug!("{:?}", e);
return None;
}
}
}
return None;
}
}
impl StoreEntry {
fn new(id: StoreId) -> Result<StoreEntry> {
let pb = try!(id.clone().into_pathbuf());
Ok(StoreEntry {
id: id,
file: FileAbstraction::Absent(pb),
status: StoreEntryStatus::Present,
})
}
/// The entry is currently borrowed, meaning that some thread is currently
/// mutating it
fn is_borrowed(&self) -> bool {
self.status == StoreEntryStatus::Borrowed
}
fn get_entry(&mut self) -> Result<Entry> {
if !self.is_borrowed() {
let file = self.file.get_file_content();
if let Err(err) = file {
if err.err_type() == SEK::FileNotFound {
Ok(Entry::new(self.id.clone()))
} else {
Err(err)
}
} else {
// TODO:
let entry = Entry::from_reader(self.id.clone(), &mut file.unwrap());
entry
}
} else {
Err(SE::new(SEK::EntryAlreadyBorrowed, None))
}
}
fn write_entry(&mut self, entry: &Entry) -> Result<()> {
if self.is_borrowed() {
assert_eq!(self.id, entry.location);
self.file.write_file_content(entry.to_str().as_bytes())
.map_err_into(SEK::FileError)
.map(|_| ())
} else {
Ok(())
}
}
}
/// The Store itself, through this object one can interact with IMAG's entries
pub struct Store {
location: PathBuf,
/**
* Configuration object of the store
*/
configuration: Option<Value>,
/*
* Registered hooks
*/
store_unload_aspects : Arc<Mutex<Vec<Aspect>>>,
pre_create_aspects : Arc<Mutex<Vec<Aspect>>>,
post_create_aspects : Arc<Mutex<Vec<Aspect>>>,
pre_retrieve_aspects : Arc<Mutex<Vec<Aspect>>>,
post_retrieve_aspects : Arc<Mutex<Vec<Aspect>>>,
pre_update_aspects : Arc<Mutex<Vec<Aspect>>>,
post_update_aspects : Arc<Mutex<Vec<Aspect>>>,
pre_delete_aspects : Arc<Mutex<Vec<Aspect>>>,
post_delete_aspects : Arc<Mutex<Vec<Aspect>>>,
pre_move_aspects : Arc<Mutex<Vec<Aspect>>>,
post_move_aspects : Arc<Mutex<Vec<Aspect>>>,
/**
* Internal Path->File cache map
*
* Caches the files, so they remain flock()ed
*
* Could be optimized for a threadsafe HashMap
*/
entries: Arc<RwLock<HashMap<StoreId, StoreEntry>>>,
}
impl Store {
/// Create a new Store object
pub fn new(location: PathBuf, store_config: Option<Value>) -> Result<Store> {
use configuration::*;
debug!("Validating Store configuration");
let _ = try!(config_is_valid(&store_config).map_err_into(SEK::ConfigurationError));
debug!("Building new Store object");
if !location.exists() {
if !config_implicit_store_create_allowed(store_config.as_ref()) {
warn!("Implicitely creating store directory is denied");
warn!(" -> Either because configuration does not allow it");
warn!(" -> or because there is no configuration");
return Err(SEK::CreateStoreDirDenied.into_error())
.map_err_into(SEK::FileError)
.map_err_into(SEK::IoError);
}
try!(FileAbstraction::create_dir_all(&location)
.map_err_into(SEK::StorePathCreate)
.map_dbg_err_str("Failed"));
} else if location.is_file() {
debug!("Store path exists as file");
return Err(SEK::StorePathExists.into_error());
}
let store_unload_aspects = get_store_unload_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let pre_create_aspects = get_pre_create_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let post_create_aspects = get_post_create_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let pre_retrieve_aspects = get_pre_retrieve_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let post_retrieve_aspects = get_post_retrieve_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let pre_update_aspects = get_pre_update_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let post_update_aspects = get_post_update_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let pre_delete_aspects = get_pre_delete_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let post_delete_aspects = get_post_delete_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let pre_move_aspects = get_pre_move_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let post_move_aspects = get_post_move_aspect_names(&store_config)
.into_iter().map(|n| {
let cfg = AspectConfig::get_for(&store_config, n.clone());
Aspect::new(n, cfg)
}).collect();
let store = Store {
location: location.clone(),
configuration: store_config,
store_unload_aspects : Arc::new(Mutex::new(store_unload_aspects)),
pre_create_aspects : Arc::new(Mutex::new(pre_create_aspects)),
post_create_aspects : Arc::new(Mutex::new(post_create_aspects)),
pre_retrieve_aspects : Arc::new(Mutex::new(pre_retrieve_aspects)),
post_retrieve_aspects : Arc::new(Mutex::new(post_retrieve_aspects)),
pre_update_aspects : Arc::new(Mutex::new(pre_update_aspects)),
post_update_aspects : Arc::new(Mutex::new(post_update_aspects)),
pre_delete_aspects : Arc::new(Mutex::new(pre_delete_aspects)),
post_delete_aspects : Arc::new(Mutex::new(post_delete_aspects)),
pre_move_aspects : Arc::new(Mutex::new(pre_move_aspects)),
post_move_aspects : Arc::new(Mutex::new(post_move_aspects)),
entries: Arc::new(RwLock::new(HashMap::new())),
};
debug!("Store building succeeded");
debug!("------------------------");
debug!("{:?}", store);
debug!("------------------------");
Ok(store)
}
/// Get the store configuration
pub fn config(&self) -> Option<&Value> {
self.configuration.as_ref()
}
/// Verify the store.
///
/// This function is not intended to be called by normal programs but only by `imag-store`.
#[cfg(feature = "verify")]
pub fn verify(&self) -> bool {
info!("Header | Content length | Path");
info!("-------+----------------+-----");
WalkDir::new(self.location.clone())
.into_iter()
.map(|res| {
match res {
Ok(dent) => {
if dent.file_type().is_file() {
match self.get(PathBuf::from(dent.path())) {
Ok(Some(fle)) => {
let p = fle.get_location();
let content_len = fle.get_content().len();
let header = if fle.get_header().verify().is_ok() {
"ok"
} else {
"broken"
};
info!("{: >6} | {: >14} | {:?}", header, content_len, p.deref());
},
Ok(None) => {
info!("{: >6} | {: >14} | {:?}", "?", "couldn't load", dent.path());
},
Err(e) => {
debug!("{:?}", e);
},
}
} else {
info!("{: >6} | {: >14} | {:?}", "?", "<no file>", dent.path());
}
},
Err(e) => {
debug!("{:?}", e);
},
}
true
})
.all(|b| b)
}
/// Creates the Entry at the given location (inside the entry)
pub fn create<'a, S: IntoStoreId>(&'a self, id: S) -> Result<FileLockEntry<'a>> {
let id = try!(id.into_storeid()).with_base(self.path().clone());
if let Err(e) = self.execute_hooks_for_id(self.pre_create_aspects.clone(), &id) {
return Err(e)
.map_err_into(SEK::PreHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::CreateCallError)
}
{
let mut hsmap = match self.entries.write() {
Err(_) => return Err(SEK::LockPoisoned.into_error()).map_err_into(SEK::CreateCallError),
Ok(s) => s,
};
if hsmap.contains_key(&id) {
return Err(SEK::EntryAlreadyExists.into_error()).map_err_into(SEK::CreateCallError);
}
hsmap.insert(id.clone(), {
let mut se = try!(StoreEntry::new(id.clone()));
se.status = StoreEntryStatus::Borrowed;
se
});
}
let mut fle = FileLockEntry::new(self, Entry::new(id));
self.execute_hooks_for_mut_file(self.post_create_aspects.clone(), &mut fle)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::CreateCallError)
.map(|_| fle)
}
/// Borrow a given Entry. When the `FileLockEntry` is either `update`d or
/// dropped, the new Entry is written to disk
///
/// Implicitely creates a entry in the store if there is no entry with the id `id`. For a
/// non-implicitely-create look at `Store::get`.
pub fn retrieve<'a, S: IntoStoreId>(&'a self, id: S) -> Result<FileLockEntry<'a>> {
let id = try!(id.into_storeid()).with_base(self.path().clone());
if let Err(e) = self.execute_hooks_for_id(self.pre_retrieve_aspects.clone(), &id) {
return Err(e)
.map_err_into(SEK::PreHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::RetrieveCallError)
}
let entry = try!({
self.entries
.write()
.map_err(|_| SE::new(SEK::LockPoisoned, None))
.and_then(|mut es| {
let new_se = try!(StoreEntry::new(id.clone()));
let mut se = es.entry(id.clone()).or_insert(new_se);
let entry = se.get_entry();
se.status = StoreEntryStatus::Borrowed;
entry
})
.map_err_into(SEK::RetrieveCallError)
});
let mut fle = FileLockEntry::new(self, entry);
self.execute_hooks_for_mut_file(self.post_retrieve_aspects.clone(), &mut fle)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::RetrieveCallError)
.and(Ok(fle))
}
/// Get an entry from the store if it exists.
///
/// This executes the {pre,post}_retrieve_aspects hooks.
pub fn get<'a, S: IntoStoreId + Clone>(&'a self, id: S) -> Result<Option<FileLockEntry<'a>>> {
let id = try!(id.into_storeid()).with_base(self.path().clone());
let exists = try!(self.entries
.read()
.map(|map| map.contains_key(&id))
.map_err(|_| SE::new(SEK::LockPoisoned, None))
.map_err_into(SEK::GetCallError)
);
if !exists && !id.exists() {
debug!("Does not exist in internal cache or filesystem: {:?}", id);
return Ok(None);
}
self.retrieve(id).map(Some).map_err_into(SEK::GetCallError)
}
/// Iterate over all StoreIds for one module name
pub fn retrieve_for_module(&self, mod_name: &str) -> Result<StoreIdIterator> {
let mut path = self.path().clone();
path.push(mod_name);
path.to_str()
.ok_or(SE::new(SEK::EncodingError, None))
.and_then(|path| {
let path = [ path, "/**/*" ].join("");
debug!("glob()ing with '{}'", path);
glob(&path[..]).map_err_into(SEK::GlobError)
})
.map(|paths| GlobStoreIdIterator::new(paths, self.path().clone()).into())
.map_err_into(SEK::GlobError)
.map_err_into(SEK::RetrieveForModuleCallError)
}
// Walk the store tree for the module
pub fn walk<'a>(&'a self, mod_name: &str) -> Walk {
Walk::new(self.path().clone(), mod_name)
}
/// Return the `FileLockEntry` and write to disk
pub fn update<'a>(&'a self, mut entry: FileLockEntry<'a>) -> Result<()> {
self._update(&mut entry, false).map_err_into(SEK::UpdateCallError)
}
/// Internal method to write to the filesystem store.
///
/// # Assumptions
/// This method assumes that entry is dropped _right after_ the call, hence
/// it is not public.
fn _update<'a>(&'a self, mut entry: &mut FileLockEntry<'a>, modify_presence: bool) -> Result<()> {
let _ = try!(self.execute_hooks_for_mut_file(self.pre_update_aspects.clone(), &mut entry)
.map_err_into(SEK::PreHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::UpdateCallError)
);
let mut hsmap = match self.entries.write() {
Err(_) => return Err(SE::new(SEK::LockPoisoned, None)),
Ok(e) => e,
};
let mut se = try!(hsmap.get_mut(&entry.location).ok_or(SE::new(SEK::IdNotFound, None)));
assert!(se.is_borrowed(), "Tried to update a non borrowed entry.");
debug!("Verifying Entry");
try!(entry.entry.verify());
debug!("Writing Entry");
try!(se.write_entry(&entry.entry));
if modify_presence {
se.status = StoreEntryStatus::Present;
}
self.execute_hooks_for_mut_file(self.post_update_aspects.clone(), &mut entry)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::UpdateCallError)
}
/// Retrieve a copy of a given entry, this cannot be used to mutate
/// the one on disk
pub fn retrieve_copy<S: IntoStoreId>(&self, id: S) -> Result<Entry> {
let id = try!(id.into_storeid()).with_base(self.path().clone());
let entries = match self.entries.write() {
Err(_) => {
return Err(SE::new(SEK::LockPoisoned, None))
.map_err_into(SEK::RetrieveCopyCallError);
},
Ok(e) => e,
};
// if the entry is currently modified by the user, we cannot drop it
if entries.get(&id).map(|e| e.is_borrowed()).unwrap_or(false) {
return Err(SE::new(SEK::IdLocked, None)).map_err_into(SEK::RetrieveCopyCallError);
}
try!(StoreEntry::new(id)).get_entry()
}
/// Delete an entry
pub fn delete<S: IntoStoreId>(&self, id: S) -> Result<()> {
let id = try!(id.into_storeid()).with_base(self.path().clone());
if let Err(e) = self.execute_hooks_for_id(self.pre_delete_aspects.clone(), &id) {
return Err(e)
.map_err_into(SEK::PreHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::DeleteCallError)
}
{
let mut entries = match self.entries.write() {
Err(_) => return Err(SE::new(SEK::LockPoisoned, None))
.map_err_into(SEK::DeleteCallError),
Ok(e) => e,
};
// if the entry is currently modified by the user, we cannot drop it
match entries.get(&id) {
None => {
return Err(SEK::FileNotFound.into_error()).map_err_into(SEK::DeleteCallError)
},
Some(e) => if e.is_borrowed() {
return Err(SE::new(SEK::IdLocked, None)).map_err_into(SEK::DeleteCallError)
}
}
// remove the entry first, then the file
entries.remove(&id);
let pb = try!(id.clone().with_base(self.path().clone()).into_pathbuf());
if let Err(e) = FileAbstraction::remove_file(&pb) {
return Err(SEK::FileError.into_error_with_cause(Box::new(e)))
.map_err_into(SEK::DeleteCallError);
}
}
self.execute_hooks_for_id(self.post_delete_aspects.clone(), &id)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::DeleteCallError)
}
/// Save a copy of the Entry in another place
/// Executes the post_move_aspects for the new id
pub fn save_to(&self, entry: &FileLockEntry, new_id: StoreId) -> Result<()> {
self.save_to_other_location(entry, new_id, false)
}
/// Save an Entry in another place
/// Removes the original entry
/// Executes the post_move_aspects for the new id
pub fn save_as(&self, entry: FileLockEntry, new_id: StoreId) -> Result<()> {
self.save_to_other_location(&entry, new_id, true)
}
fn save_to_other_location(&self, entry: &FileLockEntry, new_id: StoreId, remove_old: bool)
-> Result<()>
{
let new_id = new_id.with_base(self.path().clone());
let hsmap = try!(
self.entries
.write()
.map_err(|_| SEK::LockPoisoned.into_error())
.map_err_into(SEK::MoveCallError)
);
if hsmap.contains_key(&new_id) {
return Err(SEK::EntryAlreadyExists.into_error()).map_err_into(SEK::MoveCallError)
}
let old_id = entry.get_location().clone();
let old_id_as_path = try!(old_id.clone().with_base(self.path().clone()).into_pathbuf());
let new_id_as_path = try!(new_id.clone().with_base(self.path().clone()).into_pathbuf());
FileAbstraction::copy(&old_id_as_path, &new_id_as_path)
.and_then(|_| {
if remove_old {
FileAbstraction::remove_file(&old_id_as_path)
} else {
Ok(())
}
})
.map_err_into(SEK::FileError)
.and_then(|_| self.execute_hooks_for_id(self.post_move_aspects.clone(), &new_id)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError))
.map_err_into(SEK::MoveCallError)
}
/// Move an entry without loading
///
/// This function moves an entry from one path to another.
///
/// Generally, this function shouldn't be used by library authors, if they "just" want to move
/// something around. A library for moving entries while caring about meta-data and links.
///
/// # Errors
///
/// This function returns an error in certain cases:
///
/// * If pre-move-hooks error (if they return an error which indicates that the action should be
/// aborted)
/// * If the about-to-be-moved entry is borrowed
/// * If the lock on the internal data structure cannot be aquired
/// * If the new path already exists
/// * If the about-to-be-moved entry does not exist
/// * If the FS-operation failed
/// * If the post-move-hooks error (though the operation has succeeded then).
///
/// # Warnings
///
/// This should be used with _great_ care, as moving an entry from `a` to `b` might result in
/// dangling links (see below).
///
/// ## Moving linked entries
///
/// If the entry which is moved is linked to another entry, these links get invalid (but we do
/// not detect this here). As links are always two-way-links, so `a` is not only linked to `b`,
/// but also the other way round, moving `b` to `c` results in the following scenario:
///
/// * `a` links to `b`, which does not exist anymore.
/// * `c` links to `a`, which does exist.
///
/// So the link is _partly dangling_, so to say.
///
pub fn move_by_id(&self, old_id: StoreId, new_id: StoreId) -> Result<()> {
let new_id = new_id.with_base(self.path().clone());
let old_id = old_id.with_base(self.path().clone());
if let Err(e) = self.execute_hooks_for_id(self.pre_move_aspects.clone(), &old_id) {
return Err(e)
.map_err_into(SEK::PreHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::MoveByIdCallError)
}
{
let mut hsmap = match self.entries.write() {
Err(_) => return Err(SE::new(SEK::LockPoisoned, None)),
Ok(m) => m,
};
if hsmap.contains_key(&new_id) {
return Err(SEK::EntryAlreadyExists.into_error());
}
// if we do not have an entry here, we fail in `FileAbstraction::rename()` below.
// if we have one, but it is borrowed, we really should not rename it, as this might
// lead to strange errors
if hsmap.get(&old_id).map(|e| e.is_borrowed()).unwrap_or(false) {
return Err(SEK::EntryAlreadyBorrowed.into_error());
}
let old_id_pb = try!(old_id.clone().with_base(self.path().clone()).into_pathbuf());
let new_id_pb = try!(new_id.clone().with_base(self.path().clone()).into_pathbuf());
match FileAbstraction::rename(&old_id_pb, &new_id_pb) {
Err(e) => return Err(SEK::EntryRenameError.into_error_with_cause(Box::new(e))),
Ok(_) => {
debug!("Rename worked on filesystem");
// assert enforced through check hsmap.contains_key(&new_id) above.
// Should therefor never fail
assert!(hsmap
.remove(&old_id)
.and_then(|mut entry| {
entry.id = new_id.clone();
hsmap.insert(new_id.clone(), entry)
}).is_none())
}
}
}
self.execute_hooks_for_id(self.pre_move_aspects.clone(), &new_id)
.map_err_into(SEK::PostHookExecuteError)
.map_err_into(SEK::HookExecutionError)
.map_err_into(SEK::MoveByIdCallError)
}
/// Gets the path where this store is on the disk
pub fn path(&self) -> &PathBuf {
&self.location
}
pub fn register_hook(&mut self,
position: HookPosition,
aspect_name: &str,
mut h: Box<Hook>)
-> Result<()>
{
debug!("Registering hook: {:?}", h);
debug!(" in position: {:?}", position);
debug!(" with aspect: {:?}", aspect_name);
let guard = match position {
HookPosition::StoreUnload => self.store_unload_aspects.clone(),
HookPosition::PreCreate => self.pre_create_aspects.clone(),
HookPosition::PostCreate => self.post_create_aspects.clone(),
HookPosition::PreRetrieve => self.pre_retrieve_aspects.clone(),
HookPosition::PostRetrieve => self.post_retrieve_aspects.clone(),
HookPosition::PreUpdate => self.pre_update_aspects.clone(),
HookPosition::PostUpdate => self.post_update_aspects.clone(),
HookPosition::PreDelete => self.pre_delete_aspects.clone(),
HookPosition::PostDelete => self.post_delete_aspects.clone(),
};
let mut guard = match guard.deref().lock().map_err(|_| SE::new(SEK::LockError, None)) {
Err(e) => return Err(SEK::HookRegisterError.into_error_with_cause(Box::new(e))),
Ok(g) => g,
};
for mut aspect in guard.deref_mut() {
if aspect.name().clone() == aspect_name.clone() {
debug!("Trying to find configuration for hook: {:?}", h);
self.get_config_for_hook(h.name()).map(|config| h.set_config(config));
debug!("Trying to register hook in aspect: {:?} <- {:?}", aspect, h);
aspect.register_hook(h);
return Ok(());
}
}
let annfe = SEK::AspectNameNotFoundError.into_error();
Err(SEK::HookRegisterError.into_error_with_cause(Box::new(annfe)))
}
fn get_config_for_hook(&self, name: &str) -> Option<&Value> {
match self.configuration {
Some(Value::Table(ref tabl)) => {
debug!("Trying to head 'hooks' section from {:?}", tabl);
tabl.get("hooks")
.map(|hook_section| {
debug!("Found hook section: {:?}", hook_section);
debug!("Reading section key: {:?}", name);
match *hook_section {
Value::Table(ref tabl) => tabl.get(name),
_ => None
}
})
.unwrap_or(None)
},
_ => None,
}
}
fn execute_hooks_for_id(&self,
aspects: Arc<Mutex<Vec<Aspect>>>,
id: &StoreId)
-> HookResult<()>
{
match aspects.lock() {
Err(_) => return Err(HookErrorKind::HookExecutionError.into()),
Ok(g) => g
}.iter().fold_defresult(|aspect| {
debug!("[Aspect][exec]: {:?}", aspect);
(aspect as &StoreIdAccessor).access(id)
}).map_err(Box::new)
.map_err(|e| HookErrorKind::HookExecutionError.into_error_with_cause(e))
}
fn execute_hooks_for_mut_file(&self,
aspects: Arc<Mutex<Vec<Aspect>>>,
fle: &mut FileLockEntry)
-> HookResult<()>
{
match aspects.lock() {
Err(_) => return Err(HookErrorKind::HookExecutionError.into()),
Ok(g) => g
}.iter().fold_defresult(|aspect| {
debug!("[Aspect][exec]: {:?}", aspect);
aspect.access_mut(fle)
}).map_err(Box::new)
.map_err(|e| HookErrorKind::HookExecutionError.into_error_with_cause(e))
}
}
impl Debug for Store {
fn fmt(&self, fmt: &mut Formatter) -> RResult<(), FMTError> {
try!(write!(fmt, " --- Store ---\n"));
try!(write!(fmt, "\n"));
try!(write!(fmt, " - location : {:?}\n", self.location));
try!(write!(fmt, " - configuration : {:?}\n", self.configuration));
try!(write!(fmt, " - pre_create_aspects : {:?}\n", self.pre_create_aspects ));
try!(write!(fmt, " - post_create_aspects : {:?}\n", self.post_create_aspects ));
try!(write!(fmt, " - pre_retrieve_aspects : {:?}\n", self.pre_retrieve_aspects ));
try!(write!(fmt, " - post_retrieve_aspects : {:?}\n", self.post_retrieve_aspects ));
try!(write!(fmt, " - pre_update_aspects : {:?}\n", self.pre_update_aspects ));
try!(write!(fmt, " - post_update_aspects : {:?}\n", self.post_update_aspects ));
try!(write!(fmt, " - pre_delete_aspects : {:?}\n", self.pre_delete_aspects ));
try!(write!(fmt, " - post_delete_aspects : {:?}\n", self.post_delete_aspects ));
try!(write!(fmt, "\n"));
try!(write!(fmt, "Entries:\n"));
try!(write!(fmt, "{:?}", self.entries));
try!(write!(fmt, "\n"));
Ok(())
}
}
impl Drop for Store {
/**
* Unlock all files on drop
*
* TODO: Unlock them
*/
fn drop(&mut self) {
match StoreId::new(Some(self.location.clone()), PathBuf::from(".")) {
Err(e) => {
trace_error(&e);
warn!("Cannot construct StoreId for Store to execute hooks!");
warn!("Will close Store without executing hooks!");
},
Ok(store_id) => {
if let Err(e) = self.execute_hooks_for_id(self.store_unload_aspects.clone(), &store_id) {
debug!("Store-load hooks execution failed. Cannot create store object.");
warn!("Store Unload Hook error: {:?}", e);
}
},
};
debug!("Dropping store");
}
}
/// A struct that allows you to borrow an Entry
pub struct FileLockEntry<'a> {
store: &'a Store,
entry: Entry,
}
impl<'a> FileLockEntry<'a, > {
fn new(store: &'a Store, entry: Entry) -> FileLockEntry<'a> {
FileLockEntry {
store: store,
entry: entry,
}
}
}
impl<'a> Debug for FileLockEntry<'a> {
fn fmt(&self, fmt: &mut Formatter) -> RResult<(), FMTError> {
write!(fmt, "FileLockEntry(Store = {})", self.store.location.to_str()
.unwrap_or("Unknown Path"))
}
}
impl<'a> Deref for FileLockEntry<'a> {
type Target = Entry;
fn deref(&self) -> &Self::Target {
&self.entry
}
}
impl<'a> DerefMut for FileLockEntry<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.entry
}
}
#[cfg(not(test))]
impl<'a> Drop for FileLockEntry<'a> {
/// This will silently ignore errors, use `Store::update` if you want to catch the errors
fn drop(&mut self) {
let _ = self.store._update(self, true);
}
}
#[cfg(test)]
impl<'a> Drop for FileLockEntry<'a> {
/// This will not silently ignore errors but prints the result of the _update() call for testing
fn drop(&mut self) {
let _ = self.store._update(self, true).map_err(|e| trace_error(&e));
}
}
/// `EntryContent` type
pub type EntryContent = String;
/// `EntryHeader`
///
/// This is basically a wrapper around `toml::Table` which provides convenience to the user of the
/// library.
#[derive(Debug, Clone)]
pub struct EntryHeader {
header: Value,
}
pub type EntryResult<V> = RResult<V, ParserError>;
#[derive(Debug, Clone, PartialEq, Eq)]
enum Token {
Key(String),
Index(usize),
}
/**
* Wrapper type around file header (TOML) object
*/
impl EntryHeader {
pub fn new() -> EntryHeader {
EntryHeader {
header: build_default_header()
}
}
pub fn header(&self) -> &Value {
&self.header
}
fn from_table(t: Table) -> EntryHeader {
EntryHeader {
header: Value::Table(t)
}
}
pub fn parse(s: &str) -> EntryResult<EntryHeader> {
use toml::Parser;
let mut parser = Parser::new(s);
parser.parse()
.ok_or(ParserErrorKind::TOMLParserErrors.into())
.and_then(verify_header_consistency)
.map(EntryHeader::from_table)
}
pub fn verify(&self) -> Result<()> {
match self.header {
Value::Table(ref t) => verify_header(&t),
_ => Err(SE::new(SEK::HeaderTypeFailure, None)),
}
}
/**
* Insert a header field by a string-spec
*
* ```ignore
* insert("something.in.a.field", Boolean(true));
* ```
*
* If an array field was accessed which is _out of bounds_ of the array available, the element
* is appended to the array.
*
* Inserts a Boolean in the section "something" -> "in" -> "a" -> "field"
* A JSON equivalent would be
*
* {
* something: {
* in: {
* a: {
* field: true
* }
* }
* }
* }
*
* Returns true if header field was set, false if there is already a value
*/
pub fn insert(&mut self, spec: &str, v: Value) -> Result<bool> {
self.insert_with_sep(spec, '.', v)
}
pub fn insert_with_sep(&mut self, spec: &str, sep: char, v: Value) -> Result<bool> {
let tokens = match EntryHeader::tokenize(spec, sep) {
Err(e) => return Err(e),
Ok(t) => t
};
let destination = match tokens.iter().last() {
None => return Err(SE::new(SEK::HeaderPathSyntaxError, None)),
Some(d) => d,
};
let path_to_dest = tokens[..(tokens.len() - 1)].into(); // N - 1 tokens
// walk N-1 tokens
let value = match EntryHeader::walk_header(&mut self.header, path_to_dest) {
Err(e) => return Err(e),
Ok(v) => v
};
// There is already an value at this place
if EntryHeader::extract(value, destination).is_ok() {
return Ok(false);
}
match *destination {
Token::Key(ref s) => { // if the destination shall be an map key
match *value {
/*
* Put it in there if we have a map
*/
Value::Table(ref mut t) => {
t.insert(s.clone(), v);
}
/*
* Fail if there is no map here
*/
_ => return Err(SE::new(SEK::HeaderPathTypeFailure, None)),
}
},
Token::Index(i) => { // if the destination shall be an array
match *value {
/*
* Put it in there if we have an array
*/
Value::Array(ref mut a) => {
a.push(v); // push to the end of the array
// if the index is inside the array, we swap-remove the element at this
// index
if a.len() < i {
a.swap_remove(i);
}
},
/*
* Fail if there is no array here
*/
_ => return Err(SE::new(SEK::HeaderPathTypeFailure, None)),
}
},
}
Ok(true)
}
/**
* Set a header field by a string-spec
*
* ```ignore
* set("something.in.a.field", Boolean(true));
* ```
*
* Sets a Boolean in the section "something" -> "in" -> "a" -> "field"
* A JSON equivalent would be
*
* {
* something: {
* in: {
* a: {
* field: true
* }
* }
* }
* }
*
* If there is already a value at this place, this value will be overridden and the old value
* will be returned
*/
pub fn set(&mut self, spec: &str, v: Value) -> Result<Option<Value>> {
self.set_with_sep(spec, '.', v)
}
pub fn set_with_sep(&mut self, spec: &str, sep: char, v: Value) -> Result<Option<Value>> {
let tokens = match EntryHeader::tokenize(spec, sep) {
Err(e) => return Err(e),
Ok(t) => t,
};
debug!("tokens = {:?}", tokens);
let destination = match tokens.iter().last() {
None => return Err(SE::new(SEK::HeaderPathSyntaxError, None)),
Some(d) => d
};
debug!("destination = {:?}", destination);
let path_to_dest = tokens[..(tokens.len() - 1)].into(); // N - 1 tokens
// walk N-1 tokens
let value = match EntryHeader::walk_header(&mut self.header, path_to_dest) {
Err(e) => return Err(e),
Ok(v) => v
};
debug!("walked value = {:?}", value);
match *destination {
Token::Key(ref s) => { // if the destination shall be an map key->value
match *value {
/*
* Put it in there if we have a map
*/
Value::Table(ref mut t) => {
debug!("Matched Key->Table");
return Ok(t.insert(s.clone(), v));
}
/*
* Fail if there is no map here
*/
_ => {
debug!("Matched Key->NON-Table");
return Err(SE::new(SEK::HeaderPathTypeFailure, None));
}
}
},
Token::Index(i) => { // if the destination shall be an array
match *value {
/*
* Put it in there if we have an array
*/
Value::Array(ref mut a) => {
debug!("Matched Index->Array");
a.push(v); // push to the end of the array
// if the index is inside the array, we swap-remove the element at this
// index
if a.len() > i {
debug!("Swap-Removing in Array {:?}[{:?}] <- {:?}", a, i, a[a.len()-1]);
return Ok(Some(a.swap_remove(i)));
}
debug!("Appended");
return Ok(None);
},
/*
* Fail if there is no array here
*/
_ => {
debug!("Matched Index->NON-Array");
return Err(SE::new(SEK::HeaderPathTypeFailure, None));
},
}
},
}
Ok(None)
}
/**
* Read a header field by a string-spec
*
* ```ignore
* let value = read("something.in.a.field");
* ```
*
* Reads a Value in the section "something" -> "in" -> "a" -> "field"
* A JSON equivalent would be
*
* {
* something: {
* in: {
* a: {
* field: true
* }
* }
* }
* }
*
* If there is no a value at this place, None will be returned. This also holds true for Arrays
* which are accessed at an index which is not yet there, even if the accessed index is much
* larger than the array length.
*/
pub fn read(&self, spec: &str) -> Result<Option<Value>> {
self.read_with_sep(spec, '.')
}
pub fn read_with_sep(&self, spec: &str, splitchr: char) -> Result<Option<Value>> {
let tokens = match EntryHeader::tokenize(spec, splitchr) {
Err(e) => return Err(e),
Ok(t) => t,
};
let mut header_clone = self.header.clone(); // we clone as READing is simpler this way
// walk N-1 tokens
match EntryHeader::walk_header(&mut header_clone, tokens) {
Err(e) => match e.err_type() {
// We cannot find the header key, as there is no path to it
SEK::HeaderKeyNotFound => Ok(None),
_ => Err(e),
},
Ok(v) => Ok(Some(v.clone())),
}
}
pub fn delete(&mut self, spec: &str) -> Result<Option<Value>> {
let tokens = match EntryHeader::tokenize(spec, '.') {
Err(e) => return Err(e),
Ok(t) => t
};
let destination = match tokens.iter().last() {
None => return Err(SE::new(SEK::HeaderPathSyntaxError, None)),
Some(d) => d
};
debug!("destination = {:?}", destination);
let path_to_dest = tokens[..(tokens.len() - 1)].into(); // N - 1 tokens
// walk N-1 tokens
let mut value = match EntryHeader::walk_header(&mut self.header, path_to_dest) {
Err(e) => return Err(e),
Ok(v) => v
};
debug!("walked value = {:?}", value);
match *destination {
Token::Key(ref s) => { // if the destination shall be an map key->value
match *value {
Value::Table(ref mut t) => {
debug!("Matched Key->Table, removing {:?}", s);
return Ok(t.remove(s));
},
_ => {
debug!("Matched Key->NON-Table");
return Err(SE::new(SEK::HeaderPathTypeFailure, None));
}
}
},
Token::Index(i) => { // if the destination shall be an array
match *value {
Value::Array(ref mut a) => {
// if the index is inside the array, we swap-remove the element at this
// index
if a.len() > i {
debug!("Removing in Array {:?}[{:?}]", a, i);
return Ok(Some(a.remove(i)));
} else {
return Ok(None);
}
},
_ => {
debug!("Matched Index->NON-Array");
return Err(SE::new(SEK::HeaderPathTypeFailure, None));
},
}
},
}
Ok(None)
}
fn tokenize(spec: &str, splitchr: char) -> Result<Vec<Token>> {
use std::str::FromStr;
spec.split(splitchr)
.map(|s| {
usize::from_str(s)
.map(Token::Index)
.or_else(|_| Ok(Token::Key(String::from(s))))
})
.collect()
}
fn walk_header(v: &mut Value, tokens: Vec<Token>) -> Result<&mut Value> {
use std::vec::IntoIter;
fn walk_iter<'a>(v: Result<&'a mut Value>, i: &mut IntoIter<Token>) -> Result<&'a mut Value> {
let next = i.next();
v.and_then(move |value| {
if let Some(token) = next {
walk_iter(EntryHeader::extract(value, &token), i)
} else {
Ok(value)
}
})
}
walk_iter(Ok(v), &mut tokens.into_iter())
}
fn extract_from_table<'a>(v: &'a mut Value, s: &str) -> Result<&'a mut Value> {
match *v {
Value::Table(ref mut t) => {
t.get_mut(&s[..])
.ok_or(SE::new(SEK::HeaderKeyNotFound, None))
},
_ => Err(SE::new(SEK::HeaderPathTypeFailure, None)),
}
}
fn extract_from_array(v: &mut Value, i: usize) -> Result<&mut Value> {
match *v {
Value::Array(ref mut a) => {
if a.len() < i {
Err(SE::new(SEK::HeaderKeyNotFound, None))
} else {
Ok(&mut a[i])
}
},
_ => Err(SE::new(SEK::HeaderPathTypeFailure, None)),
}
}
fn extract<'a>(v: &'a mut Value, token: &Token) -> Result<&'a mut Value> {
match *token {
Token::Key(ref s) => EntryHeader::extract_from_table(v, s),
Token::Index(i) => EntryHeader::extract_from_array(v, i),
}
}
}
impl Into<Table> for EntryHeader {
fn into(self) -> Table {
match self.header {
Value::Table(t) => t,
_ => panic!("EntryHeader is not a table!"),
}
}
}
impl From<Table> for EntryHeader {
fn from(t: Table) -> EntryHeader {
EntryHeader { header: Value::Table(t) }
}
}
fn build_default_header() -> Value { // BTreeMap<String, Value>
let mut m = BTreeMap::new();
m.insert(String::from("imag"), {
let mut imag_map = BTreeMap::<String, Value>::new();
imag_map.insert(String::from("version"), Value::String(String::from(version!())));
imag_map.insert(String::from("links"), Value::Array(vec![]));
Value::Table(imag_map)
});
Value::Table(m)
}
fn verify_header(t: &Table) -> Result<()> {
if !has_main_section(t) {
Err(SE::from(ParserErrorKind::MissingMainSection.into_error()))
} else if !has_imag_version_in_main_section(t) {
Err(SE::from(ParserErrorKind::MissingVersionInfo.into_error()))
} else if !has_only_tables(t) {
debug!("Could not verify that it only has tables in its base table");
Err(SE::from(ParserErrorKind::NonTableInBaseTable.into_error()))
} else {
Ok(())
}
}
fn verify_header_consistency(t: Table) -> EntryResult<Table> {
verify_header(&t)
.map_err(Box::new)
.map_err(|e| ParserErrorKind::HeaderInconsistency.into_error_with_cause(e))
.map(|_| t)
}
fn has_only_tables(t: &Table) -> bool {
debug!("Verifying that table has only tables");
t.iter().all(|(_, x)| if let Value::Table(_) = *x { true } else { false })
}
fn has_main_section(t: &Table) -> bool {
t.contains_key("imag") &&
match t.get("imag") {
Some(&Value::Table(_)) => true,
Some(_) => false,
None => false,
}
}
fn has_imag_version_in_main_section(t: &Table) -> bool {
use semver::Version;
match *t.get("imag").unwrap() {
Value::Table(ref sec) => {
sec.get("version")
.and_then(|v| {
match *v {
Value::String(ref s) => Some(Version::parse(&s[..]).is_ok()),
_ => Some(false),
}
})
.unwrap_or(false)
}
_ => false,
}
}
/**
* An Entry of the store
*
* Contains location, header and content part.
*/
#[derive(Debug, Clone)]
pub struct Entry {
location: StoreId,
header: EntryHeader,
content: EntryContent,
}
impl Entry {
pub fn new(loc: StoreId) -> Entry {
Entry {
location: loc,
header: EntryHeader::new(),
content: EntryContent::new()
}
}
pub fn from_reader<S: IntoStoreId>(loc: S, file: &mut Read) -> Result<Entry> {
let text = {
let mut s = String::new();
try!(file.read_to_string(&mut s));
s
};
Self::from_str(loc, &text[..])
}
pub fn from_str<S: IntoStoreId>(loc: S, s: &str) -> Result<Entry> {
debug!("Building entry from string");
lazy_static! {
static ref RE: Regex = Regex::new(r"(?smx)
^---$
(?P<header>.*) # Header
^---$\n
(?P<content>.*) # Content
").unwrap();
}
let matches = match RE.captures(s) {
None => return Err(SE::new(SEK::MalformedEntry, None)),
Some(s) => s,
};
let header = match matches.name("header") {
None => return Err(SE::new(SEK::MalformedEntry, None)),
Some(s) => s
};
let content = matches.name("content").unwrap_or("");
debug!("Header and content found. Yay! Building Entry object now");
Ok(Entry {
location: try!(loc.into_storeid()),
header: try!(EntryHeader::parse(header)),
content: content.into(),
})
}
pub fn to_str(&self) -> String {
format!("---\n{header}---\n{content}",
header = ::toml::encode_str(&self.header.header),
content = self.content)
}
pub fn get_location(&self) -> &StoreId {
&self.location
}
pub fn get_header(&self) -> &EntryHeader {
&self.header
}
pub fn get_header_mut(&mut self) -> &mut EntryHeader {
&mut self.header
}
pub fn get_content(&self) -> &EntryContent {
&self.content
}
pub fn get_content_mut(&mut self) -> &mut EntryContent {
&mut self.content
}
pub fn verify(&self) -> Result<()> {
self.header.verify()
}
}
mod glob_store_iter {
use std::fmt::{Debug, Formatter};
use std::fmt::Error as FmtError;
use std::path::PathBuf;
use glob::Paths;
use storeid::StoreId;
use storeid::StoreIdIterator;
use error::StoreErrorKind as SEK;
use error::MapErrInto;
use libimagerror::trace::trace_error;
pub struct GlobStoreIdIterator {
store_path: PathBuf,
paths: Paths,
}
impl Debug for GlobStoreIdIterator {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), FmtError> {
write!(fmt, "GlobStoreIdIterator")
}
}
impl Into<StoreIdIterator> for GlobStoreIdIterator {
fn into(self) -> StoreIdIterator {
StoreIdIterator::new(Box::new(self))
}
}
impl GlobStoreIdIterator {
pub fn new(paths: Paths, store_path: PathBuf) -> GlobStoreIdIterator {
debug!("Create a GlobStoreIdIterator(store_path = {:?}, /* ... */)", store_path);
GlobStoreIdIterator {
store_path: store_path,
paths: paths,
}
}
}
impl Iterator for GlobStoreIdIterator {
type Item = StoreId;
fn next(&mut self) -> Option<StoreId> {
self.paths
.next()
.and_then(|o| {
debug!("GlobStoreIdIterator::next() => {:?}", o);
o.map_err_into(SEK::StoreIdHandlingError)
.and_then(|p| StoreId::from_full_path(&self.store_path, p))
.map_err(|e| {
debug!("GlobStoreIdIterator error: {:?}", e);
trace_error(&e);
}).ok()
})
}
}
}
#[cfg(test)]
mod test {
extern crate env_logger;
use std::collections::BTreeMap;
use super::EntryHeader;
use super::Token;
use storeid::StoreId;
use toml::Value;
#[test]
fn test_imag_section() {
use super::has_main_section;
let mut map = BTreeMap::new();
map.insert("imag".into(), Value::Table(BTreeMap::new()));
assert!(has_main_section(&map));
}
#[test]
fn test_imag_invalid_section_type() {
use super::has_main_section;
let mut map = BTreeMap::new();
map.insert("imag".into(), Value::Boolean(false));
assert!(!has_main_section(&map));
}
#[test]
fn test_imag_abscent_main_section() {
use super::has_main_section;
let mut map = BTreeMap::new();
map.insert("not_imag".into(), Value::Boolean(false));
assert!(!has_main_section(&map));
}
#[test]
fn test_main_section_without_version() {
use super::has_imag_version_in_main_section;
let mut map = BTreeMap::new();
map.insert("imag".into(), Value::Table(BTreeMap::new()));
assert!(!has_imag_version_in_main_section(&map));
}
#[test]
fn test_main_section_with_version() {
use super::has_imag_version_in_main_section;
let mut map = BTreeMap::new();
let mut sub = BTreeMap::new();
sub.insert("version".into(), Value::String("0.0.0".into()));
map.insert("imag".into(), Value::Table(sub));
assert!(has_imag_version_in_main_section(&map));
}
#[test]
fn test_main_section_with_version_in_wrong_type() {
use super::has_imag_version_in_main_section;
let mut map = BTreeMap::new();
let mut sub = BTreeMap::new();
sub.insert("version".into(), Value::Boolean(false));
map.insert("imag".into(), Value::Table(sub));
assert!(!has_imag_version_in_main_section(&map));
}
#[test]
fn test_verification_good() {
use super::verify_header_consistency;
let mut header = BTreeMap::new();
let sub = {
let mut sub = BTreeMap::new();
sub.insert("version".into(), Value::String(String::from("0.0.0")));
Value::Table(sub)
};
header.insert("imag".into(), sub);
assert!(verify_header_consistency(header).is_ok());
}
#[test]
fn test_verification_invalid_versionstring() {
use super::verify_header_consistency;
let mut header = BTreeMap::new();
let sub = {
let mut sub = BTreeMap::new();
sub.insert("version".into(), Value::String(String::from("000")));
Value::Table(sub)
};
header.insert("imag".into(), sub);
assert!(!verify_header_consistency(header).is_ok());
}
#[test]
fn test_verification_current_version() {
use super::verify_header_consistency;
let mut header = BTreeMap::new();
let sub = {
let mut sub = BTreeMap::new();
sub.insert("version".into(), Value::String(String::from(version!())));
Value::Table(sub)
};
header.insert("imag".into(), sub);
assert!(verify_header_consistency(header).is_ok());
}
static TEST_ENTRY : &'static str = "---
[imag]
version = \"0.0.3\"
---
Hai";
#[test]
fn test_entry_from_str() {
use super::Entry;
use std::path::PathBuf;
println!("{}", TEST_ENTRY);
let entry = Entry::from_str(StoreId::new_baseless(PathBuf::from("test/foo~1.3")).unwrap(),
TEST_ENTRY).unwrap();
assert_eq!(entry.content, "Hai");
}
#[test]
fn test_entry_to_str() {
use super::Entry;
use std::path::PathBuf;
println!("{}", TEST_ENTRY);
let entry = Entry::from_str(StoreId::new_baseless(PathBuf::from("test/foo~1.3")).unwrap(),
TEST_ENTRY).unwrap();
let string = entry.to_str();
assert_eq!(TEST_ENTRY, string);
}
#[test]
fn test_walk_header_simple() {
let tokens = EntryHeader::tokenize("a", '.').unwrap();
assert!(tokens.len() == 1, "1 token was expected, {} were parsed", tokens.len());
assert!(tokens.iter().next().unwrap() == &Token::Key(String::from("a")),
"'a' token was expected, {:?} was parsed", tokens.iter().next());
let mut header = BTreeMap::new();
header.insert(String::from("a"), Value::Integer(1));
let mut v_header = Value::Table(header);
let res = EntryHeader::walk_header(&mut v_header, tokens);
assert_eq!(&mut Value::Integer(1), res.unwrap());
}
#[test]
fn test_walk_header_with_array() {
let tokens = EntryHeader::tokenize("a.0", '.').unwrap();
assert!(tokens.len() == 2, "2 token was expected, {} were parsed", tokens.len());
assert!(tokens.iter().next().unwrap() == &Token::Key(String::from("a")),
"'a' token was expected, {:?} was parsed", tokens.iter().next());
let mut header = BTreeMap::new();
let ary = Value::Array(vec![Value::Integer(1)]);
header.insert(String::from("a"), ary);
let mut v_header = Value::Table(header);
let res = EntryHeader::walk_header(&mut v_header, tokens);
assert_eq!(&mut Value::Integer(1), res.unwrap());
}
#[test]
fn test_walk_header_extract_array() {
let tokens = EntryHeader::tokenize("a", '.').unwrap();
assert!(tokens.len() == 1, "1 token was expected, {} were parsed", tokens.len());
assert!(tokens.iter().next().unwrap() == &Token::Key(String::from("a")),
"'a' token was expected, {:?} was parsed", tokens.iter().next());
let mut header = BTreeMap::new();
let ary = Value::Array(vec![Value::Integer(1)]);
header.insert(String::from("a"), ary);
let mut v_header = Value::Table(header);
let res = EntryHeader::walk_header(&mut v_header, tokens);
assert_eq!(&mut Value::Array(vec![Value::Integer(1)]), res.unwrap());
}
/**
* Creates a big testing header.
*
* JSON equivalent:
*
* ```json
* {
* "a": {
* "array": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]
* },
* "b": {
* "array": [ "string1", "string2", "string3", "string4" ]
* },
* "c": {
* "array": [ 1, "string2", 3, "string4" ]
* },
* "d": {
* "array": [
* {
* "d1": 1
* },
* {
* "d2": 2
* },
* {
* "d3": 3
* },
* ],
*
* "something": "else",
*
* "and": {
* "something": {
* "totally": "different"
* }
* }
* }
* }
* ```
*
* The sections "a", "b", "c", "d" are created in the respective helper functions
* create_header_section_a, create_header_section_b, create_header_section_c and
* create_header_section_d.
*
* These functions can also be used for testing.
*
*/
fn create_header() -> Value {
let a = create_header_section_a();
let b = create_header_section_b();
let c = create_header_section_c();
let d = create_header_section_d();
let mut header = BTreeMap::new();
header.insert(String::from("a"), a);
header.insert(String::from("b"), b);
header.insert(String::from("c"), c);
header.insert(String::from("d"), d);
Value::Table(header)
}
fn create_header_section_a() -> Value {
// 0..10 is exclusive 10
let a_ary = Value::Array((0..10).map(|x| Value::Integer(x)).collect());
let mut a_obj = BTreeMap::new();
a_obj.insert(String::from("array"), a_ary);
Value::Table(a_obj)
}
fn create_header_section_b() -> Value {
let b_ary = Value::Array((0..9)
.map(|x| Value::String(format!("string{}", x)))
.collect());
let mut b_obj = BTreeMap::new();
b_obj.insert(String::from("array"), b_ary);
Value::Table(b_obj)
}
fn create_header_section_c() -> Value {
let c_ary = Value::Array(
vec![
Value::Integer(1),
Value::String(String::from("string2")),
Value::Integer(3),
Value::String(String::from("string4"))
]);
let mut c_obj = BTreeMap::new();
c_obj.insert(String::from("array"), c_ary);
Value::Table(c_obj)
}
fn create_header_section_d() -> Value {
let d_ary = Value::Array(
vec![
{
let mut tab = BTreeMap::new();
tab.insert(String::from("d1"), Value::Integer(1));
tab
},
{
let mut tab = BTreeMap::new();
tab.insert(String::from("d2"), Value::Integer(2));
tab
},
{
let mut tab = BTreeMap::new();
tab.insert(String::from("d3"), Value::Integer(3));
tab
},
].into_iter().map(Value::Table).collect());
let and_obj = Value::Table({
let mut tab = BTreeMap::new();
let something_tab = Value::Table({
let mut tab = BTreeMap::new();
tab.insert(String::from("totally"), Value::String(String::from("different")));
tab
});
tab.insert(String::from("something"), something_tab);
tab
});
let mut d_obj = BTreeMap::new();
d_obj.insert(String::from("array"), d_ary);
d_obj.insert(String::from("something"), Value::String(String::from("else")));
d_obj.insert(String::from("and"), and_obj);
Value::Table(d_obj)
}
#[test]
fn test_walk_header_big_a() {
test_walk_header_extract_section("a", &create_header_section_a());
}
#[test]
fn test_walk_header_big_b() {
test_walk_header_extract_section("b", &create_header_section_b());
}
#[test]
fn test_walk_header_big_c() {
test_walk_header_extract_section("c", &create_header_section_c());
}
#[test]
fn test_walk_header_big_d() {
test_walk_header_extract_section("d", &create_header_section_d());
}
fn test_walk_header_extract_section(secname: &str, expected: &Value) {
let tokens = EntryHeader::tokenize(secname, '.').unwrap();
assert!(tokens.len() == 1, "1 token was expected, {} were parsed", tokens.len());
assert!(tokens.iter().next().unwrap() == &Token::Key(String::from(secname)),
"'{}' token was expected, {:?} was parsed", secname, tokens.iter().next());
let mut header = create_header();
let res = EntryHeader::walk_header(&mut header, tokens);
assert_eq!(expected, res.unwrap());
}
#[test]
fn test_walk_header_extract_numbers() {
test_extract_number("a", 0, 0);
test_extract_number("a", 1, 1);
test_extract_number("a", 2, 2);
test_extract_number("a", 3, 3);
test_extract_number("a", 4, 4);
test_extract_number("a", 5, 5);
test_extract_number("a", 6, 6);
test_extract_number("a", 7, 7);
test_extract_number("a", 8, 8);
test_extract_number("a", 9, 9);
test_extract_number("c", 0, 1);
test_extract_number("c", 2, 3);
}
fn test_extract_number(sec: &str, idx: usize, exp: i64) {
let tokens = EntryHeader::tokenize(&format!("{}.array.{}", sec, idx)[..], '.').unwrap();
assert!(tokens.len() == 3, "3 token was expected, {} were parsed", tokens.len());
{
let mut iter = tokens.iter();
let tok = iter.next().unwrap();
let exp = Token::Key(String::from(sec));
assert!(tok == &exp, "'{}' token was expected, {:?} was parsed", sec, tok);
let tok = iter.next().unwrap();
let exp = Token::Key(String::from("array"));
assert!(tok == &exp, "'array' token was expected, {:?} was parsed", tok);
let tok = iter.next().unwrap();
let exp = Token::Index(idx);
assert!(tok == &exp, "'{}' token was expected, {:?} was parsed", idx, tok);
}
let mut header = create_header();
let res = EntryHeader::walk_header(&mut header, tokens);
assert_eq!(&mut Value::Integer(exp), res.unwrap());
}
#[test]
fn test_header_read() {
let v = create_header();
let h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
assert!(if let Ok(Some(Value::Table(_))) = h.read("a") { true } else { false });
assert!(if let Ok(Some(Value::Array(_))) = h.read("a.array") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.array.1") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.array.9") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.read("c") { true } else { false });
assert!(if let Ok(Some(Value::Array(_))) = h.read("c.array") { true } else { false });
assert!(if let Ok(Some(Value::String(_))) = h.read("c.array.1") { true } else { false });
assert!(if let Ok(None) = h.read("c.array.9") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("d.array.0.d1") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.0.d2") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.0.d3") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.1.d1") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("d.array.1.d2") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.1.d3") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.2.d1") { true } else { false });
assert!(if let Ok(None) = h.read("d.array.2.d2") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("d.array.2.d3") { true } else { false });
assert!(if let Ok(Some(Value::String(_))) = h.read("d.something") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.read("d.and") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.read("d.and.something") { true } else { false });
assert!(if let Ok(Some(Value::String(_))) = h.read("d.and.something.totally") { true } else { false });
}
#[test]
fn test_header_set_override() {
let _ = env_logger::init();
let v = create_header();
let mut h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
println!("Testing index 0");
assert_eq!(h.read("a.array.0").unwrap().unwrap(), Value::Integer(0));
println!("Altering index 0");
assert_eq!(h.set("a.array.0", Value::Integer(42)).unwrap().unwrap(), Value::Integer(0));
println!("Values now: {:?}", h);
println!("Testing all indexes");
assert_eq!(h.read("a.array.0").unwrap().unwrap(), Value::Integer(42));
assert_eq!(h.read("a.array.1").unwrap().unwrap(), Value::Integer(1));
assert_eq!(h.read("a.array.2").unwrap().unwrap(), Value::Integer(2));
assert_eq!(h.read("a.array.3").unwrap().unwrap(), Value::Integer(3));
assert_eq!(h.read("a.array.4").unwrap().unwrap(), Value::Integer(4));
assert_eq!(h.read("a.array.5").unwrap().unwrap(), Value::Integer(5));
assert_eq!(h.read("a.array.6").unwrap().unwrap(), Value::Integer(6));
assert_eq!(h.read("a.array.7").unwrap().unwrap(), Value::Integer(7));
assert_eq!(h.read("a.array.8").unwrap().unwrap(), Value::Integer(8));
assert_eq!(h.read("a.array.9").unwrap().unwrap(), Value::Integer(9));
}
#[test]
fn test_header_set_new() {
let _ = env_logger::init();
let v = create_header();
let mut h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
assert!(h.read("a.foo").is_ok());
assert!(h.read("a.foo").unwrap().is_none());
{
let v = h.set("a.foo", Value::Integer(42));
assert!(v.is_ok());
assert!(v.unwrap().is_none());
assert!(if let Ok(Some(Value::Table(_))) = h.read("a") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.foo") { true } else { false });
}
{
let v = h.set("new", Value::Table(BTreeMap::new()));
assert!(v.is_ok());
assert!(v.unwrap().is_none());
let v = h.set("new.subset", Value::Table(BTreeMap::new()));
assert!(v.is_ok());
assert!(v.unwrap().is_none());
let v = h.set("new.subset.dest", Value::Integer(1337));
assert!(v.is_ok());
assert!(v.unwrap().is_none());
assert!(if let Ok(Some(Value::Table(_))) = h.read("new") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.read("new.subset") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("new.subset.dest") { true } else { false });
}
}
#[test]
fn test_header_insert_override() {
let _ = env_logger::init();
let v = create_header();
let mut h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
println!("Testing index 0");
assert_eq!(h.read("a.array.0").unwrap().unwrap(), Value::Integer(0));
println!("Altering index 0");
assert_eq!(h.insert("a.array.0", Value::Integer(42)).unwrap(), false);
println!("...should have failed");
println!("Testing all indexes");
assert_eq!(h.read("a.array.0").unwrap().unwrap(), Value::Integer(0));
assert_eq!(h.read("a.array.1").unwrap().unwrap(), Value::Integer(1));
assert_eq!(h.read("a.array.2").unwrap().unwrap(), Value::Integer(2));
assert_eq!(h.read("a.array.3").unwrap().unwrap(), Value::Integer(3));
assert_eq!(h.read("a.array.4").unwrap().unwrap(), Value::Integer(4));
assert_eq!(h.read("a.array.5").unwrap().unwrap(), Value::Integer(5));
assert_eq!(h.read("a.array.6").unwrap().unwrap(), Value::Integer(6));
assert_eq!(h.read("a.array.7").unwrap().unwrap(), Value::Integer(7));
assert_eq!(h.read("a.array.8").unwrap().unwrap(), Value::Integer(8));
assert_eq!(h.read("a.array.9").unwrap().unwrap(), Value::Integer(9));
}
#[test]
fn test_header_insert_new() {
let _ = env_logger::init();
let v = create_header();
let mut h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
assert!(h.read("a.foo").is_ok());
assert!(h.read("a.foo").unwrap().is_none());
{
let v = h.insert("a.foo", Value::Integer(42));
assert!(v.is_ok());
assert_eq!(v.unwrap(), true);
assert!(if let Ok(Some(Value::Table(_))) = h.read("a") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.foo") { true } else { false });
}
{
let v = h.insert("new", Value::Table(BTreeMap::new()));
assert!(v.is_ok());
assert_eq!(v.unwrap(), true);
let v = h.insert("new.subset", Value::Table(BTreeMap::new()));
assert!(v.is_ok());
assert_eq!(v.unwrap(), true);
let v = h.insert("new.subset.dest", Value::Integer(1337));
assert!(v.is_ok());
assert_eq!(v.unwrap(), true);
assert!(if let Ok(Some(Value::Table(_))) = h.read("new") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.read("new.subset") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("new.subset.dest") { true } else { false });
}
}
#[test]
fn test_header_delete() {
let _ = env_logger::init();
let v = create_header();
let mut h = match v {
Value::Table(t) => EntryHeader::from_table(t),
_ => panic!("create_header() doesn't return a table!"),
};
assert!(if let Ok(Some(Value::Table(_))) = h.read("a") { true } else { false });
assert!(if let Ok(Some(Value::Array(_))) = h.read("a.array") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.array.1") { true } else { false });
assert!(if let Ok(Some(Value::Integer(_))) = h.read("a.array.9") { true } else { false });
assert!(if let Ok(Some(Value::Integer(1))) = h.delete("a.array.1") { true } else { false });
assert!(if let Ok(Some(Value::Integer(9))) = h.delete("a.array.8") { true } else { false });
assert!(if let Ok(Some(Value::Array(_))) = h.delete("a.array") { true } else { false });
assert!(if let Ok(Some(Value::Table(_))) = h.delete("a") { true } else { false });
}
}
#[cfg(test)]
mod store_tests {
use std::path::PathBuf;
use super::Store;
pub fn get_store() -> Store {
Store::new(PathBuf::from("/"), None).unwrap()
}
#[test]
fn test_store_instantiation() {
let store = get_store();
assert_eq!(store.location, PathBuf::from("/"));
assert!(store.entries.read().unwrap().is_empty());
assert!(store.store_unload_aspects.lock().unwrap().is_empty());
assert!(store.pre_create_aspects.lock().unwrap().is_empty());
assert!(store.post_create_aspects.lock().unwrap().is_empty());
assert!(store.pre_retrieve_aspects.lock().unwrap().is_empty());
assert!(store.post_retrieve_aspects.lock().unwrap().is_empty());
assert!(store.pre_update_aspects.lock().unwrap().is_empty());
assert!(store.post_update_aspects.lock().unwrap().is_empty());
assert!(store.pre_delete_aspects.lock().unwrap().is_empty());
assert!(store.post_delete_aspects.lock().unwrap().is_empty());
assert!(store.pre_move_aspects.lock().unwrap().is_empty());
assert!(store.post_move_aspects.lock().unwrap().is_empty());
}
#[test]
fn test_store_create() {
let store = get_store();
for n in 1..100 {
let s = format!("test-{}", n);
let entry = store.create(PathBuf::from(s.clone())).unwrap();
assert!(entry.verify().is_ok());
let loc = entry.get_location().clone().into_pathbuf().unwrap();
assert!(loc.starts_with("/"));
assert!(loc.ends_with(s));
}
}
#[test]
fn test_store_get_create_get_delete_get() {
let store = get_store();
for n in 1..100 {
let res = store.get(PathBuf::from(format!("test-{}", n)));
assert!(match res { Ok(None) => true, _ => false, })
}
for n in 1..100 {
let s = format!("test-{}", n);
let entry = store.create(PathBuf::from(s.clone())).unwrap();
assert!(entry.verify().is_ok());
let loc = entry.get_location().clone().into_pathbuf().unwrap();
assert!(loc.starts_with("/"));
assert!(loc.ends_with(s));
}
for n in 1..100 {
let res = store.get(PathBuf::from(format!("test-{}", n)));
assert!(match res { Ok(Some(_)) => true, _ => false, })
}
for n in 1..100 {
assert!(store.delete(PathBuf::from(format!("test-{}", n))).is_ok())
}
for n in 1..100 {
let res = store.get(PathBuf::from(format!("test-{}", n)));
assert!(match res { Ok(None) => true, _ => false, })
}
}
#[test]
fn test_store_create_twice() {
use error::StoreErrorKind as SEK;
let store = get_store();
for n in 1..100 {
let s = format!("test-{}", n % 50);
store.create(PathBuf::from(s.clone()))
.map_err(|e| assert!(is_match!(e.err_type(), SEK::CreateCallError) && n >= 50))
.ok()
.map(|entry| {
assert!(entry.verify().is_ok());
let loc = entry.get_location().clone().into_pathbuf().unwrap();
assert!(loc.starts_with("/"));
assert!(loc.ends_with(s));
});
}
}
#[test]
fn test_store_create_in_hm() {
use storeid::StoreId;
let store = get_store();
for n in 1..100 {
let pb = StoreId::new_baseless(PathBuf::from(format!("test-{}", n))).unwrap();
assert!(store.entries.read().unwrap().get(&pb).is_none());
assert!(store.create(pb.clone()).is_ok());
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
}
}
#[test]
fn test_store_retrieve_in_hm() {
use storeid::StoreId;
let store = get_store();
for n in 1..100 {
let pb = StoreId::new_baseless(PathBuf::from(format!("test-{}", n))).unwrap();
assert!(store.entries.read().unwrap().get(&pb).is_none());
assert!(store.retrieve(pb.clone()).is_ok());
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
}
}
#[test]
fn test_get_none() {
let store = get_store();
for n in 1..100 {
match store.get(PathBuf::from(format!("test-{}", n))) {
Ok(None) => assert!(true),
_ => assert!(false),
}
}
}
#[test]
fn test_delete_none() {
let store = get_store();
for n in 1..100 {
match store.delete(PathBuf::from(format!("test-{}", n))) {
Err(_) => assert!(true),
_ => assert!(false),
}
}
}
// Disabled because we cannot test this by now, as we rely on glob() in
// Store::retieve_for_module(), which accesses the filesystem and tests run in-memory, so there
// are no files on the filesystem in this test after Store::create().
//
// #[test]
// fn test_retrieve_for_module() {
// let pathes = vec![
// "foo/1", "foo/2", "foo/3", "foo/4", "foo/5",
// "bar/1", "bar/2", "bar/3", "bar/4", "bar/5",
// "bla/1", "bla/2", "bla/3", "bla/4", "bla/5",
// "boo/1", "boo/2", "boo/3", "boo/4", "boo/5",
// "glu/1", "glu/2", "glu/3", "glu/4", "glu/5",
// ];
// fn test(store: &Store, modulename: &str) {
// use std::path::Component;
// use storeid::StoreId;
// let retrieved = store.retrieve_for_module(modulename);
// assert!(retrieved.is_ok());
// let v : Vec<StoreId> = retrieved.unwrap().collect();
// println!("v = {:?}", v);
// assert!(v.len() == 5);
// let retrieved = store.retrieve_for_module(modulename);
// assert!(retrieved.is_ok());
// assert!(retrieved.unwrap().all(|e| {
// let first = e.components().next();
// assert!(first.is_some());
// match first.unwrap() {
// Component::Normal(s) => s == modulename,
// _ => false,
// }
// }))
// }
// let store = get_store();
// for path in pathes {
// assert!(store.create(PathBuf::from(path)).is_ok());
// }
// test(&store, "foo");
// test(&store, "bar");
// test(&store, "bla");
// test(&store, "boo");
// test(&store, "glu");
// }
#[test]
fn test_store_move_moves_in_hm() {
use storeid::StoreId;
let store = get_store();
for n in 1..100 {
if n % 2 == 0 { // every second
let id = StoreId::new_baseless(PathBuf::from(format!("t-{}", n))).unwrap();
let id_mv = StoreId::new_baseless(PathBuf::from(format!("t-{}", n - 1))).unwrap();
{
assert!(store.entries.read().unwrap().get(&id).is_none());
}
{
assert!(store.create(id.clone()).is_ok());
}
{
let id_with_base = id.clone().with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&id_with_base).is_some());
}
let r = store.move_by_id(id.clone(), id_mv.clone());
assert!(r.map_err(|e| println!("ERROR: {:?}", e)).is_ok());
{
let id_mv_with_base = id_mv.clone().with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&id_mv_with_base).is_some());
}
assert!(match store.get(id.clone()) { Ok(None) => true, _ => false },
"Moved id ({:?}) is still there", id);
assert!(match store.get(id_mv.clone()) { Ok(Some(_)) => true, _ => false },
"New id ({:?}) is not in store...", id_mv);
}
}
}
}
#[cfg(test)]
mod store_hook_tests {
mod test_hook {
use hook::Hook;
use hook::accessor::HookDataAccessor;
use hook::accessor::HookDataAccessorProvider;
use hook::position::HookPosition;
use self::accessor::TestHookAccessor as DHA;
use toml::Value;
#[derive(Debug)]
pub struct TestHook {
position: HookPosition,
accessor: DHA,
}
impl TestHook {
pub fn new(pos: HookPosition, succeed: bool, error_aborting: bool) -> TestHook {
TestHook { position: pos.clone(), accessor: DHA::new(pos, succeed, error_aborting) }
}
}
impl Hook for TestHook {
fn name(&self) -> &'static str { "testhook_succeeding" }
fn set_config(&mut self, _: &Value) { }
}
impl HookDataAccessorProvider for TestHook {
fn accessor(&self) -> HookDataAccessor {
use hook::position::HookPosition as HP;
use hook::accessor::HookDataAccessor as HDA;
match self.position {
HP::StoreUnload |
HP::PreCreate |
HP::PreRetrieve |
HP::PreDelete |
HP::PostDelete => HDA::StoreIdAccess(&self.accessor),
HP::PostCreate |
HP::PostRetrieve |
HP::PreUpdate |
HP::PostUpdate => HDA::MutableAccess(&self.accessor),
}
}
}
pub mod accessor {
use hook::result::HookResult;
use hook::accessor::MutableHookDataAccessor;
use hook::accessor::NonMutableHookDataAccessor;
use hook::accessor::StoreIdAccessor;
use hook::position::HookPosition;
use store::FileLockEntry;
use storeid::StoreId;
use hook::error::HookErrorKind as HEK;
use hook::error::CustomData;
use libimagerror::into::IntoError;
#[derive(Debug)]
pub struct TestHookAccessor {
pos: HookPosition,
succeed: bool,
error_aborting: bool
}
impl TestHookAccessor {
pub fn new(position: HookPosition, succeed: bool, error_aborting: bool)
-> TestHookAccessor
{
TestHookAccessor {
pos: position,
succeed: succeed,
error_aborting: error_aborting,
}
}
}
fn get_result(succeed: bool, abort: bool) -> HookResult<()> {
println!("Generting result: succeed = {}, abort = {}", succeed, abort);
if succeed {
println!("Generating result: Ok(())");
Ok(())
} else {
if abort {
println!("Generating result: Err(_), aborting");
Err(HEK::HookExecutionError.into_error())
} else {
println!("Generating result: Err(_), not aborting");
let custom = CustomData::default().aborting(false);
Err(HEK::HookExecutionError.into_error().with_custom_data(custom))
}
}
}
impl StoreIdAccessor for TestHookAccessor {
fn access(&self, id: &StoreId) -> HookResult<()> {
get_result(self.succeed, self.error_aborting)
}
}
impl MutableHookDataAccessor for TestHookAccessor {
fn access_mut(&self, fle: &mut FileLockEntry) -> HookResult<()> {
get_result(self.succeed, self.error_aborting)
}
}
impl NonMutableHookDataAccessor for TestHookAccessor {
fn access(&self, fle: &FileLockEntry) -> HookResult<()> {
get_result(self.succeed, self.error_aborting)
}
}
}
}
use std::path::PathBuf;
use hook::position::HookPosition as HP;
use storeid::StoreId;
use store::Store;
use self::test_hook::TestHook;
fn get_store_with_config() -> Store {
use toml::Parser;
let cfg = Parser::new(mini_config()).parse().unwrap();
println!("Config parsed: {:?}", cfg);
Store::new(PathBuf::from("/"), Some(cfg.get("store").cloned().unwrap())).unwrap()
}
fn mini_config() -> &'static str {
r#"
[store]
store-unload-hook-aspects = [ "test" ]
pre-create-hook-aspects = [ "test" ]
post-create-hook-aspects = [ "test" ]
pre-move-hook-aspects = [ "test" ]
post-move-hook-aspects = [ "test" ]
pre-retrieve-hook-aspects = [ "test" ]
post-retrieve-hook-aspects = [ "test" ]
pre-update-hook-aspects = [ "test" ]
post-update-hook-aspects = [ "test" ]
pre-delete-hook-aspects = [ "test" ]
post-delete-hook-aspects = [ "test" ]
[store.aspects.test]
parallel = false
mutable_hooks = true
[store.hooks.testhook_succeeding]
aspect = "test"
"#
}
fn test_hook_execution(hook_positions: &[HP], storeid_name: &str) {
let mut store = get_store_with_config();
let pos = HP::PreCreate;
let hook = TestHook::new(pos.clone(), true, false);
println!("Registering hooks...");
for pos in hook_positions {
let hook = TestHook::new(pos.clone(), true, false);
println!("\tRegistering: {:?}", pos);
assert!(store.register_hook(pos.clone(), "test", Box::new(hook))
.map_err(|e| println!("{:?}", e))
.is_ok()
);
}
println!("... done.");
let pb = StoreId::new_baseless(PathBuf::from(storeid_name)).unwrap();
let pb_moved = StoreId::new_baseless(PathBuf::from(format!("{}-moved", storeid_name))).unwrap();
println!("Creating {:?}", pb);
assert!(store.create(pb.clone()).is_ok());
{
println!("Getting {:?} -> Some?", pb);
assert!(match store.get(pb.clone()) {
Ok(Some(_)) => true,
_ => false,
});
}
{
println!("Getting {:?} -> None?", pb_moved);
assert!(match store.get(pb_moved.clone()) {
Ok(None) => true,
_ => false,
});
}
{
println!("Moving {:?} -> {:?}", pb, pb_moved);
assert!(store.move_by_id(pb.clone(), pb_moved.clone()).map_err(|e| println!("ERROR MOVING: {:?}", e)).is_ok());
}
{
println!("Getting {:?} -> None", pb);
assert!(match store.get(pb.clone()) {
Ok(None) => true,
_ => false,
});
}
{
println!("Getting {:?} -> Some", pb_moved);
assert!(match store.get(pb_moved.clone()) {
Ok(Some(_)) => true,
_ => false,
});
}
{
println!("Getting {:?} -> Some -> updating", pb_moved);
assert!(match store.get(pb_moved.clone()).map_err(|e| println!("ERROR GETTING: {:?}", e)) {
Ok(Some(fle)) => store.update(fle).map_err(|e| println!("ERROR UPDATING: {:?}", e)).is_ok(),
_ => false,
});
}
println!("Deleting {:?}", pb_moved);
assert!(store.delete(pb_moved).is_ok());
}
#[test]
fn test_storeunload() {
test_hook_execution(&[HP::StoreUnload], "test_storeunload");
}
#[test]
fn test_precreate() {
test_hook_execution(&[HP::PreCreate], "test_precreate");
}
#[test]
fn test_postcreate() {
test_hook_execution(&[HP::PostCreate], "test_postcreate");
}
#[test]
fn test_preretrieve() {
test_hook_execution(&[HP::PreRetrieve], "test_preretrieve");
}
#[test]
fn test_postretrieve() {
test_hook_execution(&[HP::PostRetrieve], "test_postretrieve");
}
#[test]
fn test_preupdate() {
test_hook_execution(&[HP::PreUpdate], "test_preupdate");
}
#[test]
fn test_postupdate() {
test_hook_execution(&[HP::PostUpdate], "test_postupdate");
}
#[test]
fn test_predelete() {
test_hook_execution(&[HP::PreDelete], "test_predelete");
}
#[test]
fn test_postdelete() {
test_hook_execution(&[HP::PostDelete], "test_postdelete");
}
#[test]
fn test_multiple_same_position() {
let positions = [ HP::StoreUnload, HP::PreCreate, HP::PostCreate, HP::PreRetrieve,
HP::PostRetrieve, HP::PreUpdate, HP::PostUpdate, HP::PreDelete, HP::PostDelete ];
for position in positions.iter() {
for n in 2..10 {
let mut v = Vec::with_capacity(n);
for x in 0..n { v.push(position.clone()); }
test_hook_execution(&v, "test_multiple_same_position");
}
}
}
fn get_store_with_aborting_hook_at_pos(pos: HP) -> Store {
let mut store = get_store_with_config();
let hook = TestHook::new(pos.clone(), false, true);
assert!(store.register_hook(pos, "test", Box::new(hook)).map_err(|e| println!("{:?}", e)).is_ok());
store
}
fn default_test_id() -> StoreId {
StoreId::new_baseless(PathBuf::from("test")).unwrap()
}
#[test]
fn test_pre_create_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_create_error")).unwrap();
let store = get_store_with_aborting_hook_at_pos(HP::PreCreate);
assert!(store.create(storeid).is_err());
}
#[test]
fn test_pre_retrieve_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_retrieve_error")).unwrap();
let store = get_store_with_aborting_hook_at_pos(HP::PreRetrieve);
assert!(store.retrieve(storeid).is_err());
}
#[test]
fn test_pre_delete_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_delete_error")).unwrap();
let store = get_store_with_aborting_hook_at_pos(HP::PreDelete);
assert!(store.delete(storeid).is_err());
}
#[test]
fn test_pre_update_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_update_error")).unwrap();
let store = get_store_with_aborting_hook_at_pos(HP::PreUpdate);
let fle = store.create(storeid).unwrap();
assert!(store.update(fle).is_err());
}
#[test]
fn test_post_create_error() {
let store = get_store_with_aborting_hook_at_pos(HP::PostCreate);
let pb = StoreId::new_baseless(PathBuf::from("test_post_create_error")).unwrap();
assert!(store.create(pb.clone()).is_err());
// But the entry exists, as the hook fails post-create
assert!(store.entries.read().unwrap().get(&pb.with_base(store.path().clone())).is_some());
}
#[test]
fn test_post_retrieve_error() {
let store = get_store_with_aborting_hook_at_pos(HP::PostRetrieve);
let pb = StoreId::new_baseless(PathBuf::from("test_post_retrieve_error")).unwrap();
assert!(store.retrieve(pb.clone()).is_err());
// But the entry exists, as the hook fails post-retrieve
assert!(store.entries.read().unwrap().get(&pb.with_base(store.path().clone())).is_some());
}
#[test]
fn test_post_delete_error() {
let store = get_store_with_aborting_hook_at_pos(HP::PostDelete);
let pb = StoreId::new_baseless(PathBuf::from("test_post_delete_error")).unwrap();
assert!(store.create(pb.clone()).is_ok());
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
assert!(store.delete(pb.clone()).is_err());
// But the entry is removed, as we fail post-delete
assert!(store.entries.read().unwrap().get(&pb).is_none());
}
#[test]
fn test_post_update_error() {
let store = get_store_with_aborting_hook_at_pos(HP::PostUpdate);
let pb = StoreId::new_baseless(PathBuf::from("test_post_update_error")).unwrap();
let fle = store.create(pb.clone()).unwrap();
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
assert!(store.update(fle).is_err());
}
fn get_store_with_allowed_error_hook_at_pos(pos: HP) -> Store {
let mut store = get_store_with_config();
let hook = TestHook::new(pos.clone(), false, false);
assert!(store.register_hook(pos, "test", Box::new(hook)).map_err(|e| println!("{:?}", e)).is_ok());
store
}
#[test]
fn test_pre_create_allowed_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_create_allowed_error")).unwrap();
let store = get_store_with_allowed_error_hook_at_pos(HP::PreCreate);
assert!(store.create(storeid).is_ok());
}
#[test]
fn test_pre_retrieve_allowed_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_retrieve_allowed_error")).unwrap();
let store = get_store_with_allowed_error_hook_at_pos(HP::PreRetrieve);
assert!(store.retrieve(storeid).is_ok());
}
#[test]
fn test_pre_delete_allowed_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_delete_allowed_error")).unwrap();
let store = get_store_with_allowed_error_hook_at_pos(HP::PreDelete);
assert!(store.retrieve(storeid.clone()).is_ok());
assert!(store.delete(storeid).map_err(|e| println!("{:?}", e)).is_ok());
}
#[test]
fn test_pre_update_allowed_error() {
let storeid = StoreId::new_baseless(PathBuf::from("test_pre_update_allowed_error")).unwrap();
let store = get_store_with_allowed_error_hook_at_pos(HP::PreUpdate);
let fle = store.create(storeid).unwrap();
assert!(store.update(fle).is_ok());
}
#[test]
fn test_post_create_allowed_error() {
let store = get_store_with_allowed_error_hook_at_pos(HP::PostCreate);
let pb = StoreId::new_baseless(PathBuf::from("test_pre_create_allowed_error")).unwrap();
assert!(store.create(pb.clone()).is_ok());
// But the entry exists, as the hook fails post-create
assert!(store.entries.read().unwrap().get(&pb.with_base(store.path().clone())).is_some());
}
#[test]
fn test_post_retrieve_allowed_error() {
let store = get_store_with_allowed_error_hook_at_pos(HP::PostRetrieve);
let pb = StoreId::new_baseless(PathBuf::from("test_pre_retrieve_allowed_error")).unwrap();
assert!(store.retrieve(pb.clone()).is_ok());
// But the entry exists, as the hook fails post-retrieve
assert!(store.entries.read().unwrap().get(&pb.with_base(store.path().clone())).is_some());
}
#[test]
fn test_post_delete_allowed_error() {
let store = get_store_with_allowed_error_hook_at_pos(HP::PostDelete);
let pb = StoreId::new_baseless(PathBuf::from("test_pre_delete_allowed_error")).unwrap();
assert!(store.create(pb.clone()).is_ok());
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
assert!(store.delete(pb.clone()).is_ok());
// But the entry is removed, as we fail post-delete
assert!(store.entries.read().unwrap().get(&pb).is_none());
}
#[test]
fn test_post_update_allowed_error() {
let store = get_store_with_allowed_error_hook_at_pos(HP::PostUpdate);
let pb = StoreId::new_baseless(PathBuf::from("test_pre_update_allowed_error")).unwrap();
let fle = store.create(pb.clone()).unwrap();
let pb = pb.with_base(store.path().clone());
assert!(store.entries.read().unwrap().get(&pb).is_some());
assert!(store.update(fle).is_ok());
}
}
|
#![unstable(
feature = "ip",
reason = "extra functionality has not been \
scrutinized to the level that it should \
be to be stable",
issue = "27709"
)]
use crate::cmp::Ordering;
use crate::fmt::{self, Write as FmtWrite};
use crate::hash;
use crate::io::Write as IoWrite;
use crate::sys::net::netc as c;
use crate::sys_common::{AsInner, FromInner};
/// An IP address, either IPv4 or IPv6.
///
/// This enum can contain either an [`Ipv4Addr`] or an [`Ipv6Addr`], see their
/// respective documentation for more details.
///
/// The size of an `IpAddr` instance may vary depending on the target operating
/// system.
///
/// [`Ipv4Addr`]: ../../std/net/struct.Ipv4Addr.html
/// [`Ipv6Addr`]: ../../std/net/struct.Ipv6Addr.html
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
///
/// assert_eq!("127.0.0.1".parse(), Ok(localhost_v4));
/// assert_eq!("::1".parse(), Ok(localhost_v6));
///
/// assert_eq!(localhost_v4.is_ipv6(), false);
/// assert_eq!(localhost_v4.is_ipv4(), true);
/// ```
#[stable(feature = "ip_addr", since = "1.7.0")]
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash, PartialOrd, Ord)]
pub enum IpAddr {
/// An IPv4 address.
#[stable(feature = "ip_addr", since = "1.7.0")]
V4(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv4Addr),
/// An IPv6 address.
#[stable(feature = "ip_addr", since = "1.7.0")]
V6(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv6Addr),
}
/// An IPv4 address.
///
/// IPv4 addresses are defined as 32-bit integers in [IETF RFC 791].
/// They are usually represented as four octets.
///
/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses.
///
/// The size of an `Ipv4Addr` struct may vary depending on the target operating
/// system.
///
/// [IETF RFC 791]: https://tools.ietf.org/html/rfc791
/// [`IpAddr`]: ../../std/net/enum.IpAddr.html
///
/// # Textual representation
///
/// `Ipv4Addr` provides a [`FromStr`] implementation. The four octets are in decimal
/// notation, divided by `.` (this is called "dot-decimal notation").
///
/// [`FromStr`]: ../../std/str/trait.FromStr.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let localhost = Ipv4Addr::new(127, 0, 0, 1);
/// assert_eq!("127.0.0.1".parse(), Ok(localhost));
/// assert_eq!(localhost.is_loopback(), true);
/// ```
#[derive(Copy)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Ipv4Addr {
inner: c::in_addr,
}
/// An IPv6 address.
///
/// IPv6 addresses are defined as 128-bit integers in [IETF RFC 4291].
/// They are usually represented as eight 16-bit segments.
///
/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses.
///
/// The size of an `Ipv6Addr` struct may vary depending on the target operating
/// system.
///
/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
/// [`IpAddr`]: ../../std/net/enum.IpAddr.html
///
/// # Textual representation
///
/// `Ipv6Addr` provides a [`FromStr`] implementation. There are many ways to represent
/// an IPv6 address in text, but in general, each segments is written in hexadecimal
/// notation, and segments are separated by `:`. For more information, see
/// [IETF RFC 5952].
///
/// [`FromStr`]: ../../std/str/trait.FromStr.html
/// [IETF RFC 5952]: https://tools.ietf.org/html/rfc5952
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
/// assert_eq!("::1".parse(), Ok(localhost));
/// assert_eq!(localhost.is_loopback(), true);
/// ```
#[derive(Copy)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Ipv6Addr {
inner: c::in6_addr,
}
#[allow(missing_docs)]
#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)]
pub enum Ipv6MulticastScope {
InterfaceLocal,
LinkLocal,
RealmLocal,
AdminLocal,
SiteLocal,
OrganizationLocal,
Global,
}
impl IpAddr {
/// Returns [`true`] for the special 'unspecified' address.
///
/// See the documentation for [`Ipv4Addr::is_unspecified`][IPv4] and
/// [`Ipv6Addr::is_unspecified`][IPv6] for more details.
///
/// [IPv4]: ../../std/net/struct.Ipv4Addr.html#method.is_unspecified
/// [IPv6]: ../../std/net/struct.Ipv6Addr.html#method.is_unspecified
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).is_unspecified(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)).is_unspecified(), true);
/// ```
#[stable(feature = "ip_shared", since = "1.12.0")]
pub fn is_unspecified(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_unspecified(),
IpAddr::V6(ip) => ip.is_unspecified(),
}
}
/// Returns [`true`] if this is a loopback address.
///
/// See the documentation for [`Ipv4Addr::is_loopback`][IPv4] and
/// [`Ipv6Addr::is_loopback`][IPv6] for more details.
///
/// [IPv4]: ../../std/net/struct.Ipv4Addr.html#method.is_loopback
/// [IPv6]: ../../std/net/struct.Ipv6Addr.html#method.is_loopback
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).is_loopback(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1)).is_loopback(), true);
/// ```
#[stable(feature = "ip_shared", since = "1.12.0")]
pub fn is_loopback(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_loopback(),
IpAddr::V6(ip) => ip.is_loopback(),
}
}
/// Returns [`true`] if the address appears to be globally routable.
///
/// See the documentation for [`Ipv4Addr::is_global`][IPv4] and
/// [`Ipv6Addr::is_global`][IPv6] for more details.
///
/// [IPv4]: ../../std/net/struct.Ipv4Addr.html#method.is_global
/// [IPv6]: ../../std/net/struct.Ipv6Addr.html#method.is_global
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(80, 9, 12, 3)).is_global(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1)).is_global(), true);
/// ```
pub fn is_global(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_global(),
IpAddr::V6(ip) => ip.is_global(),
}
}
/// Returns [`true`] if this is a multicast address.
///
/// See the documentation for [`Ipv4Addr::is_multicast`][IPv4] and
/// [`Ipv6Addr::is_multicast`][IPv6] for more details.
///
/// [IPv4]: ../../std/net/struct.Ipv4Addr.html#method.is_multicast
/// [IPv6]: ../../std/net/struct.Ipv6Addr.html#method.is_multicast
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(224, 254, 0, 0)).is_multicast(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0)).is_multicast(), true);
/// ```
#[stable(feature = "ip_shared", since = "1.12.0")]
pub fn is_multicast(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_multicast(),
IpAddr::V6(ip) => ip.is_multicast(),
}
}
/// Returns [`true`] if this address is in a range designated for documentation.
///
/// See the documentation for [`Ipv4Addr::is_documentation`][IPv4] and
/// [`Ipv6Addr::is_documentation`][IPv6] for more details.
///
/// [IPv4]: ../../std/net/struct.Ipv4Addr.html#method.is_documentation
/// [IPv6]: ../../std/net/struct.Ipv6Addr.html#method.is_documentation
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_documentation(), true);
/// assert_eq!(
/// IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_documentation(),
/// true
/// );
/// ```
pub fn is_documentation(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_documentation(),
IpAddr::V6(ip) => ip.is_documentation(),
}
}
/// Returns [`true`] if this address is an [IPv4 address], and [`false`] otherwise.
///
/// [`true`]: ../../std/primitive.bool.html
/// [`false`]: ../../std/primitive.bool.html
/// [IPv4 address]: #variant.V4
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv4(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv4(), false);
/// ```
#[stable(feature = "ipaddr_checker", since = "1.16.0")]
pub fn is_ipv4(&self) -> bool {
matches!(self, IpAddr::V4(_))
}
/// Returns [`true`] if this address is an [IPv6 address], and [`false`] otherwise.
///
/// [`true`]: ../../std/primitive.bool.html
/// [`false`]: ../../std/primitive.bool.html
/// [IPv6 address]: #variant.V6
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv6(), false);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv6(), true);
/// ```
#[stable(feature = "ipaddr_checker", since = "1.16.0")]
pub fn is_ipv6(&self) -> bool {
matches!(self, IpAddr::V6(_))
}
}
impl Ipv4Addr {
/// Creates a new IPv4 address from four eight-bit octets.
///
/// The result will represent the IP address `a`.`b`.`c`.`d`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::new(127, 0, 0, 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_ipv4", since = "1.32.0")]
pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr {
// FIXME: should just be u32::from_be_bytes([a, b, c, d]),
// once that method is no longer rustc_const_unstable
Ipv4Addr {
inner: c::in_addr {
s_addr: u32::to_be(
((a as u32) << 24) | ((b as u32) << 16) | ((c as u32) << 8) | (d as u32),
),
},
}
}
/// An IPv4 address with the address pointing to localhost: 127.0.0.1.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::LOCALHOST;
/// assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1));
/// ```
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1);
/// An IPv4 address representing an unspecified address: 0.0.0.0
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::UNSPECIFIED;
/// assert_eq!(addr, Ipv4Addr::new(0, 0, 0, 0));
/// ```
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0);
/// An IPv4 address representing the broadcast address: 255.255.255.255
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::BROADCAST;
/// assert_eq!(addr, Ipv4Addr::new(255, 255, 255, 255));
/// ```
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255);
/// Returns the four eight-bit integers that make up this address.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::new(127, 0, 0, 1);
/// assert_eq!(addr.octets(), [127, 0, 0, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn octets(&self) -> [u8; 4] {
// This returns the order we want because s_addr is stored in big-endian.
self.inner.s_addr.to_ne_bytes()
}
/// Returns [`true`] for the special 'unspecified' address (0.0.0.0).
///
/// This property is defined in _UNIX Network Programming, Second Edition_,
/// W. Richard Stevens, p. 891; see also [ip7].
///
/// [ip7]: http://man7.org/linux/man-pages/man7/ip.7.html
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_unspecified(), true);
/// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_unspecified(), false);
/// ```
#[stable(feature = "ip_shared", since = "1.12.0")]
#[rustc_const_stable(feature = "const_ipv4", since = "1.32.0")]
pub const fn is_unspecified(&self) -> bool {
self.inner.s_addr == 0
}
/// Returns [`true`] if this is a loopback address (127.0.0.0/8).
///
/// This property is defined by [IETF RFC 1122].
///
/// [IETF RFC 1122]: https://tools.ietf.org/html/rfc1122
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_loopback(), true);
/// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_loopback(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_loopback(&self) -> bool {
self.octets()[0] == 127
}
/// Returns [`true`] if this is a private address.
///
/// The private address ranges are defined in [IETF RFC 1918] and include:
///
/// - 10.0.0.0/8
/// - 172.16.0.0/12
/// - 192.168.0.0/16
///
/// [IETF RFC 1918]: https://tools.ietf.org/html/rfc1918
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(10, 0, 0, 1).is_private(), true);
/// assert_eq!(Ipv4Addr::new(10, 10, 10, 10).is_private(), true);
/// assert_eq!(Ipv4Addr::new(172, 16, 10, 10).is_private(), true);
/// assert_eq!(Ipv4Addr::new(172, 29, 45, 14).is_private(), true);
/// assert_eq!(Ipv4Addr::new(172, 32, 0, 2).is_private(), false);
/// assert_eq!(Ipv4Addr::new(192, 168, 0, 2).is_private(), true);
/// assert_eq!(Ipv4Addr::new(192, 169, 0, 2).is_private(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_private(&self) -> bool {
match self.octets() {
[10, ..] => true,
[172, b, ..] if b >= 16 && b <= 31 => true,
[192, 168, ..] => true,
_ => false,
}
}
/// Returns [`true`] if the address is link-local (169.254.0.0/16).
///
/// This property is defined by [IETF RFC 3927].
///
/// [IETF RFC 3927]: https://tools.ietf.org/html/rfc3927
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(169, 254, 0, 0).is_link_local(), true);
/// assert_eq!(Ipv4Addr::new(169, 254, 10, 65).is_link_local(), true);
/// assert_eq!(Ipv4Addr::new(16, 89, 10, 65).is_link_local(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_link_local(&self) -> bool {
match self.octets() {
[169, 254, ..] => true,
_ => false,
}
}
/// Returns [`true`] if the address appears to be globally routable.
/// See [iana-ipv4-special-registry][ipv4-sr].
///
/// The following return false:
///
/// - private addresses (see [`is_private()`](#method.is_private))
/// - the loopback address (see [`is_loopback()`](#method.is_loopback))
/// - the link-local address (see [`is_link_local()`](#method.is_link_local))
/// - the broadcast address (see [`is_broadcast()`](#method.is_broadcast))
/// - addresses used for documentation (see [`is_documentation()`](#method.is_documentation))
/// - the unspecified address (see [`is_unspecified()`](#method.is_unspecified)), and the whole
/// 0.0.0.0/8 block
/// - addresses reserved for future protocols (see
/// [`is_ietf_protocol_assignment()`](#method.is_ietf_protocol_assignment), except
/// `192.0.0.9/32` and `192.0.0.10/32` which are globally routable
/// - addresses reserved for future use (see [`is_reserved()`](#method.is_reserved)
/// - addresses reserved for networking devices benchmarking (see
/// [`is_benchmarking`](#method.is_benchmarking))
///
/// [ipv4-sr]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv4Addr;
///
/// // private addresses are not global
/// assert_eq!(Ipv4Addr::new(10, 254, 0, 0).is_global(), false);
/// assert_eq!(Ipv4Addr::new(192, 168, 10, 65).is_global(), false);
/// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_global(), false);
///
/// // the 0.0.0.0/8 block is not global
/// assert_eq!(Ipv4Addr::new(0, 1, 2, 3).is_global(), false);
/// // in particular, the unspecified address is not global
/// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_global(), false);
///
/// // the loopback address is not global
/// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_global(), false);
///
/// // link local addresses are not global
/// assert_eq!(Ipv4Addr::new(169, 254, 45, 1).is_global(), false);
///
/// // the broadcast address is not global
/// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_global(), false);
///
/// // the address space designated for documentation is not global
/// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_global(), false);
/// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_global(), false);
/// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_global(), false);
///
/// // shared addresses are not global
/// assert_eq!(Ipv4Addr::new(100, 100, 0, 0).is_global(), false);
///
/// // addresses reserved for protocol assignment are not global
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 0).is_global(), false);
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 255).is_global(), false);
///
/// // addresses reserved for future use are not global
/// assert_eq!(Ipv4Addr::new(250, 10, 20, 30).is_global(), false);
///
/// // addresses reserved for network devices benchmarking are not global
/// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_global(), false);
///
/// // All the other addresses are global
/// assert_eq!(Ipv4Addr::new(1, 1, 1, 1).is_global(), true);
/// assert_eq!(Ipv4Addr::new(80, 9, 12, 3).is_global(), true);
/// ```
pub fn is_global(&self) -> bool {
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
// globally routable addresses in the 192.0.0.0/24 range.
if u32::from(*self) == 0xc0000009 || u32::from(*self) == 0xc000000a {
return true;
}
!self.is_private()
&& !self.is_loopback()
&& !self.is_link_local()
&& !self.is_broadcast()
&& !self.is_documentation()
&& !self.is_shared()
&& !self.is_ietf_protocol_assignment()
&& !self.is_reserved()
&& !self.is_benchmarking()
// Make sure the address is not in 0.0.0.0/8
&& self.octets()[0] != 0
}
/// Returns [`true`] if this address is part of the Shared Address Space defined in
/// [IETF RFC 6598] (`100.64.0.0/10`).
///
/// [IETF RFC 6598]: https://tools.ietf.org/html/rfc6598
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(100, 64, 0, 0).is_shared(), true);
/// assert_eq!(Ipv4Addr::new(100, 127, 255, 255).is_shared(), true);
/// assert_eq!(Ipv4Addr::new(100, 128, 0, 0).is_shared(), false);
/// ```
pub fn is_shared(&self) -> bool {
self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000)
}
/// Returns [`true`] if this address is part of `192.0.0.0/24`, which is reserved to
/// IANA for IETF protocol assignments, as documented in [IETF RFC 6890].
///
/// Note that parts of this block are in use:
///
/// - `192.0.0.8/32` is the "IPv4 dummy address" (see [IETF RFC 7600])
/// - `192.0.0.9/32` is the "Port Control Protocol Anycast" (see [IETF RFC 7723])
/// - `192.0.0.10/32` is used for NAT traversal (see [IETF RFC 8155])
///
/// [IETF RFC 6890]: https://tools.ietf.org/html/rfc6890
/// [IETF RFC 7600]: https://tools.ietf.org/html/rfc7600
/// [IETF RFC 7723]: https://tools.ietf.org/html/rfc7723
/// [IETF RFC 8155]: https://tools.ietf.org/html/rfc8155
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 0).is_ietf_protocol_assignment(), true);
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 8).is_ietf_protocol_assignment(), true);
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 9).is_ietf_protocol_assignment(), true);
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 255).is_ietf_protocol_assignment(), true);
/// assert_eq!(Ipv4Addr::new(192, 0, 1, 0).is_ietf_protocol_assignment(), false);
/// assert_eq!(Ipv4Addr::new(191, 255, 255, 255).is_ietf_protocol_assignment(), false);
/// ```
pub fn is_ietf_protocol_assignment(&self) -> bool {
self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0
}
/// Returns [`true`] if this address part of the `198.18.0.0/15` range, which is reserved for
/// network devices benchmarking. This range is defined in [IETF RFC 2544] as `192.18.0.0`
/// through `198.19.255.255` but [errata 423] corrects it to `198.18.0.0/15`.
///
/// [IETF RFC 2544]: https://tools.ietf.org/html/rfc2544
/// [errata 423]: https://www.rfc-editor.org/errata/eid423
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(198, 17, 255, 255).is_benchmarking(), false);
/// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_benchmarking(), true);
/// assert_eq!(Ipv4Addr::new(198, 19, 255, 255).is_benchmarking(), true);
/// assert_eq!(Ipv4Addr::new(198, 20, 0, 0).is_benchmarking(), false);
/// ```
pub fn is_benchmarking(&self) -> bool {
self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18
}
/// Returns [`true`] if this address is reserved by IANA for future use. [IETF RFC 1112]
/// defines the block of reserved addresses as `240.0.0.0/4`. This range normally includes the
/// broadcast address `255.255.255.255`, but this implementation explicitly excludes it, since
/// it is obviously not reserved for future use.
///
/// [IETF RFC 1112]: https://tools.ietf.org/html/rfc1112
/// [`true`]: ../../std/primitive.bool.html
///
/// # Warning
///
/// As IANA assigns new addresses, this method will be
/// updated. This may result in non-reserved addresses being
/// treated as reserved in code that relies on an outdated version
/// of this method.
///
/// # Examples
///
/// ```
/// #![feature(ip)]
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(240, 0, 0, 0).is_reserved(), true);
/// assert_eq!(Ipv4Addr::new(255, 255, 255, 254).is_reserved(), true);
///
/// assert_eq!(Ipv4Addr::new(239, 255, 255, 255).is_reserved(), false);
/// // The broadcast address is not considered as reserved for future use by this implementation
/// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_reserved(), false);
/// ```
pub fn is_reserved(&self) -> bool {
self.octets()[0] & 240 == 240 && !self.is_broadcast()
}
/// Returns [`true`] if this is a multicast address (224.0.0.0/4).
///
/// Multicast addresses have a most significant octet between 224 and 239,
/// and is defined by [IETF RFC 5771].
///
/// [IETF RFC 5771]: https://tools.ietf.org/html/rfc5771
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(224, 254, 0, 0).is_multicast(), true);
/// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_multicast(), true);
/// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_multicast(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_multicast(&self) -> bool {
self.octets()[0] >= 224 && self.octets()[0] <= 239
}
/// Returns [`true`] if this is a broadcast address (255.255.255.255).
///
/// A broadcast address has all octets set to 255 as defined in [IETF RFC 919].
///
/// [IETF RFC 919]: https://tools.ietf.org/html/rfc919
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_broadcast(), true);
/// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_broadcast(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_broadcast(&self) -> bool {
self == &Self::BROADCAST
}
/// Returns [`true`] if this address is in a range designated for documentation.
///
/// This is defined in [IETF RFC 5737]:
///
/// - 192.0.2.0/24 (TEST-NET-1)
/// - 198.51.100.0/24 (TEST-NET-2)
/// - 203.0.113.0/24 (TEST-NET-3)
///
/// [IETF RFC 5737]: https://tools.ietf.org/html/rfc5737
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_documentation(), true);
/// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_documentation(), true);
/// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_documentation(), true);
/// assert_eq!(Ipv4Addr::new(193, 34, 17, 19).is_documentation(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_documentation(&self) -> bool {
match self.octets() {
[192, 0, 2, _] => true,
[198, 51, 100, _] => true,
[203, 0, 113, _] => true,
_ => false,
}
}
/// Converts this address to an IPv4-compatible [IPv6 address].
///
/// a.b.c.d becomes ::a.b.c.d
///
/// [IPv6 address]: ../../std/net/struct.Ipv6Addr.html
///
/// # Examples
///
/// ```
/// use std::net::{Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(
/// Ipv4Addr::new(192, 0, 2, 255).to_ipv6_compatible(),
/// Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 767)
/// );
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_ipv6_compatible(&self) -> Ipv6Addr {
let octets = self.octets();
Ipv6Addr::from([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, octets[0], octets[1], octets[2], octets[3],
])
}
/// Converts this address to an IPv4-mapped [IPv6 address].
///
/// a.b.c.d becomes ::ffff:a.b.c.d
///
/// [IPv6 address]: ../../std/net/struct.Ipv6Addr.html
///
/// # Examples
///
/// ```
/// use std::net::{Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).to_ipv6_mapped(),
/// Ipv6Addr::new(0, 0, 0, 0, 0, 65535, 49152, 767));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_ipv6_mapped(&self) -> Ipv6Addr {
let octets = self.octets();
Ipv6Addr::from([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, octets[0], octets[1], octets[2], octets[3],
])
}
}
#[stable(feature = "ip_addr", since = "1.7.0")]
impl fmt::Display for IpAddr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
IpAddr::V4(ip) => ip.fmt(fmt),
IpAddr::V6(ip) => ip.fmt(fmt),
}
}
}
#[stable(feature = "ip_from_ip", since = "1.16.0")]
impl From<Ipv4Addr> for IpAddr {
/// Copies this address to a new `IpAddr::V4`.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr};
///
/// let addr = Ipv4Addr::new(127, 0, 0, 1);
///
/// assert_eq!(
/// IpAddr::V4(addr),
/// IpAddr::from(addr)
/// )
/// ```
fn from(ipv4: Ipv4Addr) -> IpAddr {
IpAddr::V4(ipv4)
}
}
#[stable(feature = "ip_from_ip", since = "1.16.0")]
impl From<Ipv6Addr> for IpAddr {
/// Copies this address to a new `IpAddr::V6`.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv6Addr};
///
/// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
///
/// assert_eq!(
/// IpAddr::V6(addr),
/// IpAddr::from(addr)
/// );
/// ```
fn from(ipv6: Ipv6Addr) -> IpAddr {
IpAddr::V6(ipv6)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for Ipv4Addr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let octets = self.octets();
// Fast Path: if there's no alignment stuff, write directly to the buffer
if fmt.precision().is_none() && fmt.width().is_none() {
write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3])
} else {
const IPV4_BUF_LEN: usize = 15; // Long enough for the longest possible IPv4 address
let mut buf = [0u8; IPV4_BUF_LEN];
let mut buf_slice = &mut buf[..];
// Note: The call to write should never fail, hence the unwrap
write!(buf_slice, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap();
let len = IPV4_BUF_LEN - buf_slice.len();
// This unsafe is OK because we know what is being written to the buffer
let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
fmt.pad(buf)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Ipv4Addr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Clone for Ipv4Addr {
fn clone(&self) -> Ipv4Addr {
*self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialEq for Ipv4Addr {
fn eq(&self, other: &Ipv4Addr) -> bool {
self.inner.s_addr == other.inner.s_addr
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialEq<Ipv4Addr> for IpAddr {
fn eq(&self, other: &Ipv4Addr) -> bool {
match self {
IpAddr::V4(v4) => v4 == other,
IpAddr::V6(_) => false,
}
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialEq<IpAddr> for Ipv4Addr {
fn eq(&self, other: &IpAddr) -> bool {
match other {
IpAddr::V4(v4) => self == v4,
IpAddr::V6(_) => false,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Eq for Ipv4Addr {}
#[stable(feature = "rust1", since = "1.0.0")]
impl hash::Hash for Ipv4Addr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
// `inner` is #[repr(packed)], so we need to copy `s_addr`.
{ self.inner.s_addr }.hash(s)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialOrd for Ipv4Addr {
fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> {
Some(self.cmp(other))
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialOrd<Ipv4Addr> for IpAddr {
fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> {
match self {
IpAddr::V4(v4) => v4.partial_cmp(other),
IpAddr::V6(_) => Some(Ordering::Greater),
}
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialOrd<IpAddr> for Ipv4Addr {
fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> {
match other {
IpAddr::V4(v4) => self.partial_cmp(v4),
IpAddr::V6(_) => Some(Ordering::Less),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for Ipv4Addr {
fn cmp(&self, other: &Ipv4Addr) -> Ordering {
u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr))
}
}
impl AsInner<c::in_addr> for Ipv4Addr {
fn as_inner(&self) -> &c::in_addr {
&self.inner
}
}
impl FromInner<c::in_addr> for Ipv4Addr {
fn from_inner(addr: c::in_addr) -> Ipv4Addr {
Ipv4Addr { inner: addr }
}
}
#[stable(feature = "ip_u32", since = "1.1.0")]
impl From<Ipv4Addr> for u32 {
/// Converts an `Ipv4Addr` into a host byte order `u32`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::new(13, 12, 11, 10);
/// assert_eq!(0x0d0c0b0au32, u32::from(addr));
/// ```
fn from(ip: Ipv4Addr) -> u32 {
let ip = ip.octets();
u32::from_be_bytes(ip)
}
}
#[stable(feature = "ip_u32", since = "1.1.0")]
impl From<u32> for Ipv4Addr {
/// Converts a host byte order `u32` into an `Ipv4Addr`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::from(0x0d0c0b0au32);
/// assert_eq!(Ipv4Addr::new(13, 12, 11, 10), addr);
/// ```
fn from(ip: u32) -> Ipv4Addr {
Ipv4Addr::from(ip.to_be_bytes())
}
}
#[stable(feature = "from_slice_v4", since = "1.9.0")]
impl From<[u8; 4]> for Ipv4Addr {
/// Creates an `Ipv4Addr` from a four element byte array.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::from([13u8, 12u8, 11u8, 10u8]);
/// assert_eq!(Ipv4Addr::new(13, 12, 11, 10), addr);
/// ```
fn from(octets: [u8; 4]) -> Ipv4Addr {
Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3])
}
}
#[stable(feature = "ip_from_slice", since = "1.17.0")]
impl From<[u8; 4]> for IpAddr {
/// Creates an `IpAddr::V4` from a four element byte array.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr};
///
/// let addr = IpAddr::from([13u8, 12u8, 11u8, 10u8]);
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(13, 12, 11, 10)), addr);
/// ```
fn from(octets: [u8; 4]) -> IpAddr {
IpAddr::V4(Ipv4Addr::from(octets))
}
}
impl Ipv6Addr {
/// Creates a new IPv6 address from eight 16-bit segments.
///
/// The result will represent the IP address `a:b:c:d:e:f:g:h`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_ipv6", since = "1.32.0")]
pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr {
Ipv6Addr {
inner: c::in6_addr {
s6_addr: [
(a >> 8) as u8,
a as u8,
(b >> 8) as u8,
b as u8,
(c >> 8) as u8,
c as u8,
(d >> 8) as u8,
d as u8,
(e >> 8) as u8,
e as u8,
(f >> 8) as u8,
f as u8,
(g >> 8) as u8,
g as u8,
(h >> 8) as u8,
h as u8,
],
},
}
}
/// An IPv6 address representing localhost: `::1`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::LOCALHOST;
/// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
/// ```
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
/// An IPv6 address representing the unspecified address: `::`
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::UNSPECIFIED;
/// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
/// ```
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
/// Returns the eight 16-bit segments that make up this address.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).segments(),
/// [0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn segments(&self) -> [u16; 8] {
let arr = &self.inner.s6_addr;
[
u16::from_be_bytes([arr[0], arr[1]]),
u16::from_be_bytes([arr[2], arr[3]]),
u16::from_be_bytes([arr[4], arr[5]]),
u16::from_be_bytes([arr[6], arr[7]]),
u16::from_be_bytes([arr[8], arr[9]]),
u16::from_be_bytes([arr[10], arr[11]]),
u16::from_be_bytes([arr[12], arr[13]]),
u16::from_be_bytes([arr[14], arr[15]]),
]
}
/// Returns [`true`] for the special 'unspecified' address (::).
///
/// This property is defined in [IETF RFC 4291].
///
/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unspecified(), false);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).is_unspecified(), true);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_unspecified(&self) -> bool {
self.segments() == [0, 0, 0, 0, 0, 0, 0, 0]
}
/// Returns [`true`] if this is a loopback address (::1).
///
/// This property is defined in [IETF RFC 4291].
///
/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_loopback(), false);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_loopback(), true);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_loopback(&self) -> bool {
self.segments() == [0, 0, 0, 0, 0, 0, 0, 1]
}
/// Returns [`true`] if the address appears to be globally routable.
///
/// The following return [`false`]:
///
/// - the loopback address
/// - link-local and unique local unicast addresses
/// - interface-, link-, realm-, admin- and site-local multicast addresses
///
/// [`true`]: ../../std/primitive.bool.html
/// [`false`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_global(), true);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_global(), false);
/// assert_eq!(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1).is_global(), true);
/// ```
pub fn is_global(&self) -> bool {
match self.multicast_scope() {
Some(Ipv6MulticastScope::Global) => true,
None => self.is_unicast_global(),
_ => false,
}
}
/// Returns [`true`] if this is a unique local address (`fc00::/7`).
///
/// This property is defined in [IETF RFC 4193].
///
/// [IETF RFC 4193]: https://tools.ietf.org/html/rfc4193
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unique_local(), false);
/// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 0).is_unique_local(), true);
/// ```
pub fn is_unique_local(&self) -> bool {
(self.segments()[0] & 0xfe00) == 0xfc00
}
/// Returns [`true`] if the address is a unicast link-local address (`fe80::/64`).
///
/// A common mis-conception is to think that "unicast link-local addresses start with
/// `fe80::`", but the [IETF RFC 4291] actually defines a stricter format for these addresses:
///
/// ```no_rust
/// | 10 |
/// | bits | 54 bits | 64 bits |
/// +----------+-------------------------+----------------------------+
/// |1111111010| 0 | interface ID |
/// +----------+-------------------------+----------------------------+
/// ```
///
/// This method validates the format defined in the RFC and won't recognize the following
/// addresses such as `fe80:0:0:1::` or `fe81::` as unicast link-local addresses for example.
/// If you need a less strict validation use [`is_unicast_link_local()`] instead.
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0);
/// assert!(ip.is_unicast_link_local_strict());
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff);
/// assert!(ip.is_unicast_link_local_strict());
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 1, 0, 0, 0, 0);
/// assert!(!ip.is_unicast_link_local_strict());
/// assert!(ip.is_unicast_link_local());
///
/// let ip = Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 0);
/// assert!(!ip.is_unicast_link_local_strict());
/// assert!(ip.is_unicast_link_local());
/// ```
///
/// # See also
///
/// - [IETF RFC 4291 section 2.5.6]
/// - [RFC 4291 errata 4406] (which has been rejected but provides useful
/// insight)
/// - [`is_unicast_link_local()`]
///
/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
/// [IETF RFC 4291 section 2.5.6]: https://tools.ietf.org/html/rfc4291#section-2.5.6
/// [`true`]: ../../std/primitive.bool.html
/// [RFC 4291 errata 4406]: https://www.rfc-editor.org/errata/eid4406
/// [`is_unicast_link_local()`]: ../../std/net/struct.Ipv6Addr.html#method.is_unicast_link_local
pub fn is_unicast_link_local_strict(&self) -> bool {
(self.segments()[0] & 0xffff) == 0xfe80
&& (self.segments()[1] & 0xffff) == 0
&& (self.segments()[2] & 0xffff) == 0
&& (self.segments()[3] & 0xffff) == 0
}
/// Returns [`true`] if the address is a unicast link-local address (`fe80::/10`).
///
/// This method returns [`true`] for addresses in the range reserved by [RFC 4291 section 2.4],
/// i.e. addresses with the following format:
///
/// ```no_rust
/// | 10 |
/// | bits | 54 bits | 64 bits |
/// +----------+-------------------------+----------------------------+
/// |1111111010| arbitratry value | interface ID |
/// +----------+-------------------------+----------------------------+
/// ```
///
/// As a result, this method consider addresses such as `fe80:0:0:1::` or `fe81::` to be
/// unicast link-local addresses, whereas [`is_unicast_link_local_strict()`] does not. If you
/// need a strict validation fully compliant with the RFC, use
/// [`is_unicast_link_local_strict()`].
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0);
/// assert!(ip.is_unicast_link_local());
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff);
/// assert!(ip.is_unicast_link_local());
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 1, 0, 0, 0, 0);
/// assert!(ip.is_unicast_link_local());
/// assert!(!ip.is_unicast_link_local_strict());
///
/// let ip = Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 0);
/// assert!(ip.is_unicast_link_local());
/// assert!(!ip.is_unicast_link_local_strict());
/// ```
///
/// # See also
///
/// - [IETF RFC 4291 section 2.4]
/// - [RFC 4291 errata 4406] (which has been rejected but provides useful
/// insight)
///
/// [IETF RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4
/// [`true`]: ../../std/primitive.bool.html
/// [RFC 4291 errata 4406]: https://www.rfc-editor.org/errata/eid4406
/// [`is_unicast_link_local_strict()`]: ../../std/net/struct.Ipv6Addr.html#method.is_unicast_link_local_strict
pub fn is_unicast_link_local(&self) -> bool {
(self.segments()[0] & 0xffc0) == 0xfe80
}
/// Returns [`true`] if this is a deprecated unicast site-local address (fec0::/10). The
/// unicast site-local address format is defined in [RFC 4291 section 2.5.7] as:
///
/// ```no_rust
/// | 10 |
/// | bits | 54 bits | 64 bits |
/// +----------+-------------------------+----------------------------+
/// |1111111011| subnet ID | interface ID |
/// +----------+-------------------------+----------------------------+
/// ```
///
/// [`true`]: ../../std/primitive.bool.html
/// [RFC 4291 section 2.5.7]: https://tools.ietf.org/html/rfc4291#section-2.5.7
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(
/// Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unicast_site_local(),
/// false
/// );
/// assert_eq!(Ipv6Addr::new(0xfec2, 0, 0, 0, 0, 0, 0, 0).is_unicast_site_local(), true);
/// ```
///
/// # Warning
///
/// As per [RFC 3879], the whole `FEC0::/10` prefix is
/// deprecated. New software must not support site-local
/// addresses.
///
/// [RFC 3879]: https://tools.ietf.org/html/rfc3879
pub fn is_unicast_site_local(&self) -> bool {
(self.segments()[0] & 0xffc0) == 0xfec0
}
/// Returns [`true`] if this is an address reserved for documentation
/// (2001:db8::/32).
///
/// This property is defined in [IETF RFC 3849].
///
/// [IETF RFC 3849]: https://tools.ietf.org/html/rfc3849
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_documentation(), false);
/// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_documentation(), true);
/// ```
pub fn is_documentation(&self) -> bool {
(self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8)
}
/// Returns [`true`] if the address is a globally routable unicast address.
///
/// The following return false:
///
/// - the loopback address
/// - the link-local addresses
/// - unique local addresses
/// - the unspecified address
/// - the address range reserved for documentation
///
/// This method returns [`true`] for site-local addresses as per [RFC 4291 section 2.5.7]
///
/// ```no_rust
/// The special behavior of [the site-local unicast] prefix defined in [RFC3513] must no longer
/// be supported in new implementations (i.e., new implementations must treat this prefix as
/// Global Unicast).
/// ```
///
/// [`true`]: ../../std/primitive.bool.html
/// [RFC 4291 section 2.5.7]: https://tools.ietf.org/html/rfc4291#section-2.5.7
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_global(), false);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unicast_global(), true);
/// ```
pub fn is_unicast_global(&self) -> bool {
!self.is_multicast()
&& !self.is_loopback()
&& !self.is_unicast_link_local()
&& !self.is_unique_local()
&& !self.is_unspecified()
&& !self.is_documentation()
}
/// Returns the address's multicast scope if the address is multicast.
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::{Ipv6Addr, Ipv6MulticastScope};
///
/// assert_eq!(
/// Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0).multicast_scope(),
/// Some(Ipv6MulticastScope::Global)
/// );
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).multicast_scope(), None);
/// ```
pub fn multicast_scope(&self) -> Option<Ipv6MulticastScope> {
if self.is_multicast() {
match self.segments()[0] & 0x000f {
1 => Some(Ipv6MulticastScope::InterfaceLocal),
2 => Some(Ipv6MulticastScope::LinkLocal),
3 => Some(Ipv6MulticastScope::RealmLocal),
4 => Some(Ipv6MulticastScope::AdminLocal),
5 => Some(Ipv6MulticastScope::SiteLocal),
8 => Some(Ipv6MulticastScope::OrganizationLocal),
14 => Some(Ipv6MulticastScope::Global),
_ => None,
}
} else {
None
}
}
/// Returns [`true`] if this is a multicast address (ff00::/8).
///
/// This property is defined by [IETF RFC 4291].
///
/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_multicast(), true);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_multicast(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_multicast(&self) -> bool {
(self.segments()[0] & 0xff00) == 0xff00
}
/// Converts this address to an [IPv4 address]. Returns [`None`] if this address is
/// neither IPv4-compatible or IPv4-mapped.
///
/// ::a.b.c.d and ::ffff:a.b.c.d become a.b.c.d
///
/// [IPv4 address]: ../../std/net/struct.Ipv4Addr.html
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
///
/// ```
/// use std::net::{Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4(), None);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4(),
/// Some(Ipv4Addr::new(192, 10, 2, 255)));
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4(),
/// Some(Ipv4Addr::new(0, 0, 0, 1)));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_ipv4(&self) -> Option<Ipv4Addr> {
match self.segments() {
[0, 0, 0, 0, 0, f, g, h] if f == 0 || f == 0xffff => {
Some(Ipv4Addr::new((g >> 8) as u8, g as u8, (h >> 8) as u8, h as u8))
}
_ => None,
}
}
/// Returns the sixteen eight-bit integers the IPv6 address consists of.
///
/// ```
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).octets(),
/// [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
/// ```
#[stable(feature = "ipv6_to_octets", since = "1.12.0")]
#[rustc_const_stable(feature = "const_ipv6", since = "1.32.0")]
pub const fn octets(&self) -> [u8; 16] {
self.inner.s6_addr
}
}
/// Write an Ipv6Addr, conforming to the canonical style described by
/// [RFC 5952](https://tools.ietf.org/html/rfc5952).
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for Ipv6Addr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// If there are no alignment requirements, write out the IP address to
// f. Otherwise, write it to a local buffer, then use f.pad.
if f.precision().is_none() && f.width().is_none() {
let segments = self.segments();
// Special case for :: and ::1; otherwise they get written with the
// IPv4 formatter
if self.is_unspecified() {
f.write_str("::")
} else if self.is_loopback() {
f.write_str("::1")
} else if let Some(ipv4) = self.to_ipv4() {
match segments[5] {
// IPv4 Compatible address
0 => write!(f, "::{}", ipv4),
// IPv4 Mapped address
0xffff => write!(f, "::ffff:{}", ipv4),
_ => unreachable!(),
}
} else {
#[derive(Copy, Clone, Default)]
struct Span {
start: usize,
len: usize,
}
// Find the inner 0 span
let zeroes = {
let mut longest = Span::default();
let mut current = Span::default();
for (i, &segment) in segments.iter().enumerate() {
if segment == 0 {
if current.len == 0 {
current.start = i;
}
current.len += 1;
if current.len > longest.len {
longest = current;
}
} else {
current = Span::default();
}
}
longest
};
/// Write a colon-separated part of the address
#[inline]
fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) -> fmt::Result {
if let Some(first) = chunk.first() {
fmt::LowerHex::fmt(first, f)?;
for segment in &chunk[1..] {
f.write_char(':')?;
fmt::LowerHex::fmt(segment, f)?;
}
}
Ok(())
}
if zeroes.len > 1 {
fmt_subslice(f, &segments[..zeroes.start])?;
f.write_str("::")?;
fmt_subslice(f, &segments[zeroes.start + zeroes.len..])
} else {
fmt_subslice(f, &segments)
}
}
} else {
// Slow path: write the address to a local buffer, the use f.pad.
// Defined recursively by using the fast path to write to the
// buffer.
// This is the largest possible size of an IPv6 address
const IPV6_BUF_LEN: usize = (4 * 8) + 7;
let mut buf = [0u8; IPV6_BUF_LEN];
let mut buf_slice = &mut buf[..];
// Note: This call to write should never fail, so unwrap is okay.
write!(buf_slice, "{}", self).unwrap();
let len = IPV6_BUF_LEN - buf_slice.len();
// This is safe because we know exactly what can be in this buffer
let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
f.pad(buf)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Ipv6Addr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Clone for Ipv6Addr {
fn clone(&self) -> Ipv6Addr {
*self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialEq for Ipv6Addr {
fn eq(&self, other: &Ipv6Addr) -> bool {
self.inner.s6_addr == other.inner.s6_addr
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialEq<IpAddr> for Ipv6Addr {
fn eq(&self, other: &IpAddr) -> bool {
match other {
IpAddr::V4(_) => false,
IpAddr::V6(v6) => self == v6,
}
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialEq<Ipv6Addr> for IpAddr {
fn eq(&self, other: &Ipv6Addr) -> bool {
match self {
IpAddr::V4(_) => false,
IpAddr::V6(v6) => v6 == other,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Eq for Ipv6Addr {}
#[stable(feature = "rust1", since = "1.0.0")]
impl hash::Hash for Ipv6Addr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
self.inner.s6_addr.hash(s)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialOrd for Ipv6Addr {
fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> {
Some(self.cmp(other))
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialOrd<Ipv6Addr> for IpAddr {
fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> {
match self {
IpAddr::V4(_) => Some(Ordering::Less),
IpAddr::V6(v6) => v6.partial_cmp(other),
}
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialOrd<IpAddr> for Ipv6Addr {
fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> {
match other {
IpAddr::V4(_) => Some(Ordering::Greater),
IpAddr::V6(v6) => self.partial_cmp(v6),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for Ipv6Addr {
fn cmp(&self, other: &Ipv6Addr) -> Ordering {
self.segments().cmp(&other.segments())
}
}
impl AsInner<c::in6_addr> for Ipv6Addr {
fn as_inner(&self) -> &c::in6_addr {
&self.inner
}
}
impl FromInner<c::in6_addr> for Ipv6Addr {
fn from_inner(addr: c::in6_addr) -> Ipv6Addr {
Ipv6Addr { inner: addr }
}
}
#[stable(feature = "i128", since = "1.26.0")]
impl From<Ipv6Addr> for u128 {
/// Convert an `Ipv6Addr` into a host byte order `u128`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::new(
/// 0x1020, 0x3040, 0x5060, 0x7080,
/// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
/// );
/// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr));
/// ```
fn from(ip: Ipv6Addr) -> u128 {
let ip = ip.octets();
u128::from_be_bytes(ip)
}
}
#[stable(feature = "i128", since = "1.26.0")]
impl From<u128> for Ipv6Addr {
/// Convert a host byte order `u128` into an `Ipv6Addr`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::from(0x102030405060708090A0B0C0D0E0F00D_u128);
/// assert_eq!(
/// Ipv6Addr::new(
/// 0x1020, 0x3040, 0x5060, 0x7080,
/// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
/// ),
/// addr);
/// ```
fn from(ip: u128) -> Ipv6Addr {
Ipv6Addr::from(ip.to_be_bytes())
}
}
#[stable(feature = "ipv6_from_octets", since = "1.9.0")]
impl From<[u8; 16]> for Ipv6Addr {
/// Creates an `Ipv6Addr` from a sixteen element byte array.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::from([
/// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8,
/// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8,
/// ]);
/// assert_eq!(
/// Ipv6Addr::new(
/// 0x1918, 0x1716,
/// 0x1514, 0x1312,
/// 0x1110, 0x0f0e,
/// 0x0d0c, 0x0b0a
/// ),
/// addr
/// );
/// ```
fn from(octets: [u8; 16]) -> Ipv6Addr {
let inner = c::in6_addr { s6_addr: octets };
Ipv6Addr::from_inner(inner)
}
}
#[stable(feature = "ipv6_from_segments", since = "1.16.0")]
impl From<[u16; 8]> for Ipv6Addr {
/// Creates an `Ipv6Addr` from an eight element 16-bit array.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::from([
/// 525u16, 524u16, 523u16, 522u16,
/// 521u16, 520u16, 519u16, 518u16,
/// ]);
/// assert_eq!(
/// Ipv6Addr::new(
/// 0x20d, 0x20c,
/// 0x20b, 0x20a,
/// 0x209, 0x208,
/// 0x207, 0x206
/// ),
/// addr
/// );
/// ```
fn from(segments: [u16; 8]) -> Ipv6Addr {
let [a, b, c, d, e, f, g, h] = segments;
Ipv6Addr::new(a, b, c, d, e, f, g, h)
}
}
#[stable(feature = "ip_from_slice", since = "1.17.0")]
impl From<[u8; 16]> for IpAddr {
/// Creates an `IpAddr::V6` from a sixteen element byte array.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv6Addr};
///
/// let addr = IpAddr::from([
/// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8,
/// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8,
/// ]);
/// assert_eq!(
/// IpAddr::V6(Ipv6Addr::new(
/// 0x1918, 0x1716,
/// 0x1514, 0x1312,
/// 0x1110, 0x0f0e,
/// 0x0d0c, 0x0b0a
/// )),
/// addr
/// );
/// ```
fn from(octets: [u8; 16]) -> IpAddr {
IpAddr::V6(Ipv6Addr::from(octets))
}
}
#[stable(feature = "ip_from_slice", since = "1.17.0")]
impl From<[u16; 8]> for IpAddr {
/// Creates an `IpAddr::V6` from an eight element 16-bit array.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv6Addr};
///
/// let addr = IpAddr::from([
/// 525u16, 524u16, 523u16, 522u16,
/// 521u16, 520u16, 519u16, 518u16,
/// ]);
/// assert_eq!(
/// IpAddr::V6(Ipv6Addr::new(
/// 0x20d, 0x20c,
/// 0x20b, 0x20a,
/// 0x209, 0x208,
/// 0x207, 0x206
/// )),
/// addr
/// );
/// ```
fn from(segments: [u16; 8]) -> IpAddr {
IpAddr::V6(Ipv6Addr::from(segments))
}
}
// Tests for this module
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use crate::net::test::{sa4, sa6, tsa};
use crate::net::*;
use crate::str::FromStr;
#[test]
fn test_from_str_ipv4() {
assert_eq!(Ok(Ipv4Addr::new(127, 0, 0, 1)), "127.0.0.1".parse());
assert_eq!(Ok(Ipv4Addr::new(255, 255, 255, 255)), "255.255.255.255".parse());
assert_eq!(Ok(Ipv4Addr::new(0, 0, 0, 0)), "0.0.0.0".parse());
// out of range
let none: Option<Ipv4Addr> = "256.0.0.1".parse().ok();
assert_eq!(None, none);
// too short
let none: Option<Ipv4Addr> = "255.0.0".parse().ok();
assert_eq!(None, none);
// too long
let none: Option<Ipv4Addr> = "255.0.0.1.2".parse().ok();
assert_eq!(None, none);
// no number between dots
let none: Option<Ipv4Addr> = "255.0..1".parse().ok();
assert_eq!(None, none);
}
#[test]
fn test_from_str_ipv6() {
assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "0:0:0:0:0:0:0:0".parse());
assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "0:0:0:0:0:0:0:1".parse());
assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "::1".parse());
assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "::".parse());
assert_eq!(
Ok(Ipv6Addr::new(0x2a02, 0x6b8, 0, 0, 0, 0, 0x11, 0x11)),
"2a02:6b8::11:11".parse()
);
// too long group
let none: Option<Ipv6Addr> = "::00000".parse().ok();
assert_eq!(None, none);
// too short
let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7".parse().ok();
assert_eq!(None, none);
// too long
let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:8:9".parse().ok();
assert_eq!(None, none);
// triple colon
let none: Option<Ipv6Addr> = "1:2:::6:7:8".parse().ok();
assert_eq!(None, none);
// two double colons
let none: Option<Ipv6Addr> = "1:2::6::8".parse().ok();
assert_eq!(None, none);
// `::` indicating zero groups of zeros
let none: Option<Ipv6Addr> = "1:2:3:4::5:6:7:8".parse().ok();
assert_eq!(None, none);
}
#[test]
fn test_from_str_ipv4_in_ipv6() {
assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 545)), "::192.0.2.33".parse());
assert_eq!(
Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0xFFFF, 49152, 545)),
"::FFFF:192.0.2.33".parse()
);
assert_eq!(
Ok(Ipv6Addr::new(0x64, 0xff9b, 0, 0, 0, 0, 49152, 545)),
"64:ff9b::192.0.2.33".parse()
);
assert_eq!(
Ok(Ipv6Addr::new(0x2001, 0xdb8, 0x122, 0xc000, 0x2, 0x2100, 49152, 545)),
"2001:db8:122:c000:2:2100:192.0.2.33".parse()
);
// colon after v4
let none: Option<Ipv4Addr> = "::127.0.0.1:".parse().ok();
assert_eq!(None, none);
// not enough groups
let none: Option<Ipv6Addr> = "1.2.3.4.5:127.0.0.1".parse().ok();
assert_eq!(None, none);
// too many groups
let none: Option<Ipv6Addr> = "1.2.3.4.5:6:7:127.0.0.1".parse().ok();
assert_eq!(None, none);
}
#[test]
fn test_from_str_socket_addr() {
assert_eq!(Ok(sa4(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse());
assert_eq!(
Ok(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80)),
"77.88.21.11:80".parse()
);
assert_eq!(
Ok(sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53)),
"[2a02:6b8:0:1::1]:53".parse()
);
assert_eq!(
Ok(SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0)),
"[2a02:6b8:0:1::1]:53".parse()
);
assert_eq!(
Ok(sa6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22)),
"[::127.0.0.1]:22".parse()
);
assert_eq!(
Ok(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22, 0, 0)),
"[::127.0.0.1]:22".parse()
);
// without port
let none: Option<SocketAddr> = "127.0.0.1".parse().ok();
assert_eq!(None, none);
// without port
let none: Option<SocketAddr> = "127.0.0.1:".parse().ok();
assert_eq!(None, none);
// wrong brackets around v4
let none: Option<SocketAddr> = "[127.0.0.1]:22".parse().ok();
assert_eq!(None, none);
// port out of range
let none: Option<SocketAddr> = "127.0.0.1:123456".parse().ok();
assert_eq!(None, none);
}
#[test]
fn ipv4_addr_to_string() {
// Short address
assert_eq!(Ipv4Addr::new(1, 1, 1, 1).to_string(), "1.1.1.1");
// Long address
assert_eq!(Ipv4Addr::new(127, 127, 127, 127).to_string(), "127.127.127.127");
// Test padding
assert_eq!(&format!("{:16}", Ipv4Addr::new(1, 1, 1, 1)), "1.1.1.1 ");
assert_eq!(&format!("{:>16}", Ipv4Addr::new(1, 1, 1, 1)), " 1.1.1.1");
}
#[test]
fn ipv6_addr_to_string() {
// ipv4-mapped address
let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280);
assert_eq!(a1.to_string(), "::ffff:192.0.2.128");
// ipv4-compatible address
let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280);
assert_eq!(a1.to_string(), "::192.0.2.128");
// v6 address with no zero segments
assert_eq!(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15).to_string(), "8:9:a:b:c:d:e:f");
// longest possible IPv6 length
assert_eq!(
Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888)
.to_string(),
"1111:2222:3333:4444:5555:6666:7777:8888"
);
// padding
assert_eq!(
&format!("{:20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)),
"1:2:3:4:5:6:7:8 "
);
assert_eq!(
&format!("{:>20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)),
" 1:2:3:4:5:6:7:8"
);
// reduce a single run of zeros
assert_eq!(
"ae::ffff:102:304",
Ipv6Addr::new(0xae, 0, 0, 0, 0, 0xffff, 0x0102, 0x0304).to_string()
);
// don't reduce just a single zero segment
assert_eq!("1:2:3:4:5:6:0:8", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 0, 8).to_string());
// 'any' address
assert_eq!("::", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).to_string());
// loopback address
assert_eq!("::1", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_string());
// ends in zeros
assert_eq!("1::", Ipv6Addr::new(1, 0, 0, 0, 0, 0, 0, 0).to_string());
// two runs of zeros, second one is longer
assert_eq!("1:0:0:4::8", Ipv6Addr::new(1, 0, 0, 4, 0, 0, 0, 8).to_string());
// two runs of zeros, equal length
assert_eq!("1::4:5:0:0:8", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8).to_string());
}
#[test]
fn ipv4_to_ipv6() {
assert_eq!(
Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678),
Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_mapped()
);
assert_eq!(
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678),
Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_compatible()
);
}
#[test]
fn ipv6_to_ipv4() {
assert_eq!(
Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4(),
Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
);
assert_eq!(
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4(),
Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
);
assert_eq!(Ipv6Addr::new(0, 0, 1, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), None);
}
#[test]
fn ip_properties() {
macro_rules! ip {
($s:expr) => {
IpAddr::from_str($s).unwrap()
};
}
macro_rules! check {
($s:expr) => {
check!($s, 0);
};
($s:expr, $mask:expr) => {{
let unspec: u8 = 1 << 0;
let loopback: u8 = 1 << 1;
let global: u8 = 1 << 2;
let multicast: u8 = 1 << 3;
let doc: u8 = 1 << 4;
if ($mask & unspec) == unspec {
assert!(ip!($s).is_unspecified());
} else {
assert!(!ip!($s).is_unspecified());
}
if ($mask & loopback) == loopback {
assert!(ip!($s).is_loopback());
} else {
assert!(!ip!($s).is_loopback());
}
if ($mask & global) == global {
assert!(ip!($s).is_global());
} else {
assert!(!ip!($s).is_global());
}
if ($mask & multicast) == multicast {
assert!(ip!($s).is_multicast());
} else {
assert!(!ip!($s).is_multicast());
}
if ($mask & doc) == doc {
assert!(ip!($s).is_documentation());
} else {
assert!(!ip!($s).is_documentation());
}
}};
}
let unspec: u8 = 1 << 0;
let loopback: u8 = 1 << 1;
let global: u8 = 1 << 2;
let multicast: u8 = 1 << 3;
let doc: u8 = 1 << 4;
check!("0.0.0.0", unspec);
check!("0.0.0.1");
check!("0.1.0.0");
check!("10.9.8.7");
check!("127.1.2.3", loopback);
check!("172.31.254.253");
check!("169.254.253.242");
check!("192.0.2.183", doc);
check!("192.1.2.183", global);
check!("192.168.254.253");
check!("198.51.100.0", doc);
check!("203.0.113.0", doc);
check!("203.2.113.0", global);
check!("224.0.0.0", global | multicast);
check!("239.255.255.255", global | multicast);
check!("255.255.255.255");
// make sure benchmarking addresses are not global
check!("198.18.0.0");
check!("198.18.54.2");
check!("198.19.255.255");
// make sure addresses reserved for protocol assignment are not global
check!("192.0.0.0");
check!("192.0.0.255");
check!("192.0.0.100");
// make sure reserved addresses are not global
check!("240.0.0.0");
check!("251.54.1.76");
check!("254.255.255.255");
// make sure shared addresses are not global
check!("100.64.0.0");
check!("100.127.255.255");
check!("100.100.100.0");
check!("::", unspec);
check!("::1", loopback);
check!("::0.0.0.2", global);
check!("1::", global);
check!("fc00::");
check!("fdff:ffff::");
check!("fe80:ffff::");
check!("febf:ffff::");
check!("fec0::", global);
check!("ff01::", multicast);
check!("ff02::", multicast);
check!("ff03::", multicast);
check!("ff04::", multicast);
check!("ff05::", multicast);
check!("ff08::", multicast);
check!("ff0e::", global | multicast);
check!("2001:db8:85a3::8a2e:370:7334", doc);
check!("102:304:506:708:90a:b0c:d0e:f10", global);
}
#[test]
fn ipv4_properties() {
macro_rules! ip {
($s:expr) => {
Ipv4Addr::from_str($s).unwrap()
};
}
macro_rules! check {
($s:expr) => {
check!($s, 0);
};
($s:expr, $mask:expr) => {{
let unspec: u16 = 1 << 0;
let loopback: u16 = 1 << 1;
let private: u16 = 1 << 2;
let link_local: u16 = 1 << 3;
let global: u16 = 1 << 4;
let multicast: u16 = 1 << 5;
let broadcast: u16 = 1 << 6;
let documentation: u16 = 1 << 7;
let benchmarking: u16 = 1 << 8;
let ietf_protocol_assignment: u16 = 1 << 9;
let reserved: u16 = 1 << 10;
let shared: u16 = 1 << 11;
if ($mask & unspec) == unspec {
assert!(ip!($s).is_unspecified());
} else {
assert!(!ip!($s).is_unspecified());
}
if ($mask & loopback) == loopback {
assert!(ip!($s).is_loopback());
} else {
assert!(!ip!($s).is_loopback());
}
if ($mask & private) == private {
assert!(ip!($s).is_private());
} else {
assert!(!ip!($s).is_private());
}
if ($mask & link_local) == link_local {
assert!(ip!($s).is_link_local());
} else {
assert!(!ip!($s).is_link_local());
}
if ($mask & global) == global {
assert!(ip!($s).is_global());
} else {
assert!(!ip!($s).is_global());
}
if ($mask & multicast) == multicast {
assert!(ip!($s).is_multicast());
} else {
assert!(!ip!($s).is_multicast());
}
if ($mask & broadcast) == broadcast {
assert!(ip!($s).is_broadcast());
} else {
assert!(!ip!($s).is_broadcast());
}
if ($mask & documentation) == documentation {
assert!(ip!($s).is_documentation());
} else {
assert!(!ip!($s).is_documentation());
}
if ($mask & benchmarking) == benchmarking {
assert!(ip!($s).is_benchmarking());
} else {
assert!(!ip!($s).is_benchmarking());
}
if ($mask & ietf_protocol_assignment) == ietf_protocol_assignment {
assert!(ip!($s).is_ietf_protocol_assignment());
} else {
assert!(!ip!($s).is_ietf_protocol_assignment());
}
if ($mask & reserved) == reserved {
assert!(ip!($s).is_reserved());
} else {
assert!(!ip!($s).is_reserved());
}
if ($mask & shared) == shared {
assert!(ip!($s).is_shared());
} else {
assert!(!ip!($s).is_shared());
}
}};
}
let unspec: u16 = 1 << 0;
let loopback: u16 = 1 << 1;
let private: u16 = 1 << 2;
let link_local: u16 = 1 << 3;
let global: u16 = 1 << 4;
let multicast: u16 = 1 << 5;
let broadcast: u16 = 1 << 6;
let documentation: u16 = 1 << 7;
let benchmarking: u16 = 1 << 8;
let ietf_protocol_assignment: u16 = 1 << 9;
let reserved: u16 = 1 << 10;
let shared: u16 = 1 << 11;
check!("0.0.0.0", unspec);
check!("0.0.0.1");
check!("0.1.0.0");
check!("10.9.8.7", private);
check!("127.1.2.3", loopback);
check!("172.31.254.253", private);
check!("169.254.253.242", link_local);
check!("192.0.2.183", documentation);
check!("192.1.2.183", global);
check!("192.168.254.253", private);
check!("198.51.100.0", documentation);
check!("203.0.113.0", documentation);
check!("203.2.113.0", global);
check!("224.0.0.0", global | multicast);
check!("239.255.255.255", global | multicast);
check!("255.255.255.255", broadcast);
check!("198.18.0.0", benchmarking);
check!("198.18.54.2", benchmarking);
check!("198.19.255.255", benchmarking);
check!("192.0.0.0", ietf_protocol_assignment);
check!("192.0.0.255", ietf_protocol_assignment);
check!("192.0.0.100", ietf_protocol_assignment);
check!("240.0.0.0", reserved);
check!("251.54.1.76", reserved);
check!("254.255.255.255", reserved);
check!("100.64.0.0", shared);
check!("100.127.255.255", shared);
check!("100.100.100.0", shared);
}
#[test]
fn ipv6_properties() {
macro_rules! ip {
($s:expr) => {
Ipv6Addr::from_str($s).unwrap()
};
}
macro_rules! check {
($s:expr, &[$($octet:expr),*], $mask:expr) => {
assert_eq!($s, ip!($s).to_string());
let octets = &[$($octet),*];
assert_eq!(&ip!($s).octets(), octets);
assert_eq!(Ipv6Addr::from(*octets), ip!($s));
let unspecified: u16 = 1 << 0;
let loopback: u16 = 1 << 1;
let unique_local: u16 = 1 << 2;
let global: u16 = 1 << 3;
let unicast_link_local: u16 = 1 << 4;
let unicast_link_local_strict: u16 = 1 << 5;
let unicast_site_local: u16 = 1 << 6;
let unicast_global: u16 = 1 << 7;
let documentation: u16 = 1 << 8;
let multicast_interface_local: u16 = 1 << 9;
let multicast_link_local: u16 = 1 << 10;
let multicast_realm_local: u16 = 1 << 11;
let multicast_admin_local: u16 = 1 << 12;
let multicast_site_local: u16 = 1 << 13;
let multicast_organization_local: u16 = 1 << 14;
let multicast_global: u16 = 1 << 15;
let multicast: u16 = multicast_interface_local
| multicast_admin_local
| multicast_global
| multicast_link_local
| multicast_realm_local
| multicast_site_local
| multicast_organization_local;
if ($mask & unspecified) == unspecified {
assert!(ip!($s).is_unspecified());
} else {
assert!(!ip!($s).is_unspecified());
}
if ($mask & loopback) == loopback {
assert!(ip!($s).is_loopback());
} else {
assert!(!ip!($s).is_loopback());
}
if ($mask & unique_local) == unique_local {
assert!(ip!($s).is_unique_local());
} else {
assert!(!ip!($s).is_unique_local());
}
if ($mask & global) == global {
assert!(ip!($s).is_global());
} else {
assert!(!ip!($s).is_global());
}
if ($mask & unicast_link_local) == unicast_link_local {
assert!(ip!($s).is_unicast_link_local());
} else {
assert!(!ip!($s).is_unicast_link_local());
}
if ($mask & unicast_link_local_strict) == unicast_link_local_strict {
assert!(ip!($s).is_unicast_link_local_strict());
} else {
assert!(!ip!($s).is_unicast_link_local_strict());
}
if ($mask & unicast_site_local) == unicast_site_local {
assert!(ip!($s).is_unicast_site_local());
} else {
assert!(!ip!($s).is_unicast_site_local());
}
if ($mask & unicast_global) == unicast_global {
assert!(ip!($s).is_unicast_global());
} else {
assert!(!ip!($s).is_unicast_global());
}
if ($mask & documentation) == documentation {
assert!(ip!($s).is_documentation());
} else {
assert!(!ip!($s).is_documentation());
}
if ($mask & multicast) != 0 {
assert!(ip!($s).multicast_scope().is_some());
assert!(ip!($s).is_multicast());
} else {
assert!(ip!($s).multicast_scope().is_none());
assert!(!ip!($s).is_multicast());
}
if ($mask & multicast_interface_local) == multicast_interface_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::InterfaceLocal);
}
if ($mask & multicast_link_local) == multicast_link_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::LinkLocal);
}
if ($mask & multicast_realm_local) == multicast_realm_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::RealmLocal);
}
if ($mask & multicast_admin_local) == multicast_admin_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::AdminLocal);
}
if ($mask & multicast_site_local) == multicast_site_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::SiteLocal);
}
if ($mask & multicast_organization_local) == multicast_organization_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::OrganizationLocal);
}
if ($mask & multicast_global) == multicast_global {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::Global);
}
}
}
let unspecified: u16 = 1 << 0;
let loopback: u16 = 1 << 1;
let unique_local: u16 = 1 << 2;
let global: u16 = 1 << 3;
let unicast_link_local: u16 = 1 << 4;
let unicast_link_local_strict: u16 = 1 << 5;
let unicast_site_local: u16 = 1 << 6;
let unicast_global: u16 = 1 << 7;
let documentation: u16 = 1 << 8;
let multicast_interface_local: u16 = 1 << 9;
let multicast_link_local: u16 = 1 << 10;
let multicast_realm_local: u16 = 1 << 11;
let multicast_admin_local: u16 = 1 << 12;
let multicast_site_local: u16 = 1 << 13;
let multicast_organization_local: u16 = 1 << 14;
let multicast_global: u16 = 1 << 15;
check!("::", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unspecified);
check!("::1", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], loopback);
check!(
"::0.0.0.2",
&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2],
global | unicast_global
);
check!("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], global | unicast_global);
check!("fc00::", &[0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unique_local);
check!(
"fdff:ffff::",
&[0xfd, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unique_local
);
check!(
"fe80:ffff::",
&[0xfe, 0x80, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_link_local
);
check!(
"fe80::",
&[0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_link_local | unicast_link_local_strict
);
check!(
"febf:ffff::",
&[0xfe, 0xbf, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_link_local
);
check!(
"febf::",
&[0xfe, 0xbf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_link_local
);
check!(
"febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff",
&[
0xfe, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff
],
unicast_link_local
);
check!(
"fe80::ffff:ffff:ffff:ffff",
&[
0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff
],
unicast_link_local | unicast_link_local_strict
);
check!(
"fe80:0:0:1::",
&[0xfe, 0x80, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_link_local
);
check!(
"fec0::",
&[0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_site_local | unicast_global | global
);
check!(
"ff01::",
&[0xff, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_interface_local
);
check!(
"ff02::",
&[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_link_local
);
check!(
"ff03::",
&[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_realm_local
);
check!(
"ff04::",
&[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_admin_local
);
check!(
"ff05::",
&[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_site_local
);
check!(
"ff08::",
&[0xff, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_organization_local
);
check!(
"ff0e::",
&[0xff, 0xe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_global | global
);
check!(
"2001:db8:85a3::8a2e:370:7334",
&[0x20, 1, 0xd, 0xb8, 0x85, 0xa3, 0, 0, 0, 0, 0x8a, 0x2e, 3, 0x70, 0x73, 0x34],
documentation
);
check!(
"102:304:506:708:90a:b0c:d0e:f10",
&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
global | unicast_global
);
}
#[test]
fn to_socket_addr_socketaddr() {
let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 12345);
assert_eq!(Ok(vec![a]), tsa(a));
}
#[test]
fn test_ipv4_to_int() {
let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44);
assert_eq!(u32::from(a), 0x11223344);
}
#[test]
fn test_int_to_ipv4() {
let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44);
assert_eq!(Ipv4Addr::from(0x11223344), a);
}
#[test]
fn test_ipv6_to_int() {
let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11);
assert_eq!(u128::from(a), 0x112233445566778899aabbccddeeff11u128);
}
#[test]
fn test_int_to_ipv6() {
let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11);
assert_eq!(Ipv6Addr::from(0x112233445566778899aabbccddeeff11u128), a);
}
#[test]
fn ipv4_from_constructors() {
assert_eq!(Ipv4Addr::LOCALHOST, Ipv4Addr::new(127, 0, 0, 1));
assert!(Ipv4Addr::LOCALHOST.is_loopback());
assert_eq!(Ipv4Addr::UNSPECIFIED, Ipv4Addr::new(0, 0, 0, 0));
assert!(Ipv4Addr::UNSPECIFIED.is_unspecified());
assert_eq!(Ipv4Addr::BROADCAST, Ipv4Addr::new(255, 255, 255, 255));
assert!(Ipv4Addr::BROADCAST.is_broadcast());
}
#[test]
fn ipv6_from_contructors() {
assert_eq!(Ipv6Addr::LOCALHOST, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
assert!(Ipv6Addr::LOCALHOST.is_loopback());
assert_eq!(Ipv6Addr::UNSPECIFIED, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
assert!(Ipv6Addr::UNSPECIFIED.is_unspecified());
}
#[test]
fn ipv4_from_octets() {
assert_eq!(Ipv4Addr::from([127, 0, 0, 1]), Ipv4Addr::new(127, 0, 0, 1))
}
#[test]
fn ipv6_from_segments() {
let from_u16s =
Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]);
let new = Ipv6Addr::new(0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff);
assert_eq!(new, from_u16s);
}
#[test]
fn ipv6_from_octets() {
let from_u16s =
Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]);
let from_u8s = Ipv6Addr::from([
0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd,
0xee, 0xff,
]);
assert_eq!(from_u16s, from_u8s);
}
#[test]
fn cmp() {
let v41 = Ipv4Addr::new(100, 64, 3, 3);
let v42 = Ipv4Addr::new(192, 0, 2, 2);
let v61 = "2001:db8:f00::1002".parse::<Ipv6Addr>().unwrap();
let v62 = "2001:db8:f00::2001".parse::<Ipv6Addr>().unwrap();
assert!(v41 < v42);
assert!(v61 < v62);
assert_eq!(v41, IpAddr::V4(v41));
assert_eq!(v61, IpAddr::V6(v61));
assert!(v41 != IpAddr::V4(v42));
assert!(v61 != IpAddr::V6(v62));
assert!(v41 < IpAddr::V4(v42));
assert!(v61 < IpAddr::V6(v62));
assert!(IpAddr::V4(v41) < v42);
assert!(IpAddr::V6(v61) < v62);
assert!(v41 < IpAddr::V6(v61));
assert!(IpAddr::V4(v41) < v61);
}
#[test]
fn is_v4() {
let ip = IpAddr::V4(Ipv4Addr::new(100, 64, 3, 3));
assert!(ip.is_ipv4());
assert!(!ip.is_ipv6());
}
#[test]
fn is_v6() {
let ip = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678));
assert!(!ip.is_ipv4());
assert!(ip.is_ipv6());
}
}
Remove unused FromInner impl for Ipv4Addr
#![unstable(
feature = "ip",
reason = "extra functionality has not been \
scrutinized to the level that it should \
be to be stable",
issue = "27709"
)]
use crate::cmp::Ordering;
use crate::fmt::{self, Write as FmtWrite};
use crate::hash;
use crate::io::Write as IoWrite;
use crate::sys::net::netc as c;
use crate::sys_common::{AsInner, FromInner};
/// An IP address, either IPv4 or IPv6.
///
/// This enum can contain either an [`Ipv4Addr`] or an [`Ipv6Addr`], see their
/// respective documentation for more details.
///
/// The size of an `IpAddr` instance may vary depending on the target operating
/// system.
///
/// [`Ipv4Addr`]: ../../std/net/struct.Ipv4Addr.html
/// [`Ipv6Addr`]: ../../std/net/struct.Ipv6Addr.html
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
///
/// assert_eq!("127.0.0.1".parse(), Ok(localhost_v4));
/// assert_eq!("::1".parse(), Ok(localhost_v6));
///
/// assert_eq!(localhost_v4.is_ipv6(), false);
/// assert_eq!(localhost_v4.is_ipv4(), true);
/// ```
#[stable(feature = "ip_addr", since = "1.7.0")]
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash, PartialOrd, Ord)]
pub enum IpAddr {
/// An IPv4 address.
#[stable(feature = "ip_addr", since = "1.7.0")]
V4(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv4Addr),
/// An IPv6 address.
#[stable(feature = "ip_addr", since = "1.7.0")]
V6(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv6Addr),
}
/// An IPv4 address.
///
/// IPv4 addresses are defined as 32-bit integers in [IETF RFC 791].
/// They are usually represented as four octets.
///
/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses.
///
/// The size of an `Ipv4Addr` struct may vary depending on the target operating
/// system.
///
/// [IETF RFC 791]: https://tools.ietf.org/html/rfc791
/// [`IpAddr`]: ../../std/net/enum.IpAddr.html
///
/// # Textual representation
///
/// `Ipv4Addr` provides a [`FromStr`] implementation. The four octets are in decimal
/// notation, divided by `.` (this is called "dot-decimal notation").
///
/// [`FromStr`]: ../../std/str/trait.FromStr.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let localhost = Ipv4Addr::new(127, 0, 0, 1);
/// assert_eq!("127.0.0.1".parse(), Ok(localhost));
/// assert_eq!(localhost.is_loopback(), true);
/// ```
#[derive(Copy)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Ipv4Addr {
inner: c::in_addr,
}
/// An IPv6 address.
///
/// IPv6 addresses are defined as 128-bit integers in [IETF RFC 4291].
/// They are usually represented as eight 16-bit segments.
///
/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses.
///
/// The size of an `Ipv6Addr` struct may vary depending on the target operating
/// system.
///
/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
/// [`IpAddr`]: ../../std/net/enum.IpAddr.html
///
/// # Textual representation
///
/// `Ipv6Addr` provides a [`FromStr`] implementation. There are many ways to represent
/// an IPv6 address in text, but in general, each segments is written in hexadecimal
/// notation, and segments are separated by `:`. For more information, see
/// [IETF RFC 5952].
///
/// [`FromStr`]: ../../std/str/trait.FromStr.html
/// [IETF RFC 5952]: https://tools.ietf.org/html/rfc5952
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
/// assert_eq!("::1".parse(), Ok(localhost));
/// assert_eq!(localhost.is_loopback(), true);
/// ```
#[derive(Copy)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Ipv6Addr {
inner: c::in6_addr,
}
#[allow(missing_docs)]
#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)]
pub enum Ipv6MulticastScope {
InterfaceLocal,
LinkLocal,
RealmLocal,
AdminLocal,
SiteLocal,
OrganizationLocal,
Global,
}
impl IpAddr {
/// Returns [`true`] for the special 'unspecified' address.
///
/// See the documentation for [`Ipv4Addr::is_unspecified`][IPv4] and
/// [`Ipv6Addr::is_unspecified`][IPv6] for more details.
///
/// [IPv4]: ../../std/net/struct.Ipv4Addr.html#method.is_unspecified
/// [IPv6]: ../../std/net/struct.Ipv6Addr.html#method.is_unspecified
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).is_unspecified(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)).is_unspecified(), true);
/// ```
#[stable(feature = "ip_shared", since = "1.12.0")]
pub fn is_unspecified(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_unspecified(),
IpAddr::V6(ip) => ip.is_unspecified(),
}
}
/// Returns [`true`] if this is a loopback address.
///
/// See the documentation for [`Ipv4Addr::is_loopback`][IPv4] and
/// [`Ipv6Addr::is_loopback`][IPv6] for more details.
///
/// [IPv4]: ../../std/net/struct.Ipv4Addr.html#method.is_loopback
/// [IPv6]: ../../std/net/struct.Ipv6Addr.html#method.is_loopback
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).is_loopback(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1)).is_loopback(), true);
/// ```
#[stable(feature = "ip_shared", since = "1.12.0")]
pub fn is_loopback(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_loopback(),
IpAddr::V6(ip) => ip.is_loopback(),
}
}
/// Returns [`true`] if the address appears to be globally routable.
///
/// See the documentation for [`Ipv4Addr::is_global`][IPv4] and
/// [`Ipv6Addr::is_global`][IPv6] for more details.
///
/// [IPv4]: ../../std/net/struct.Ipv4Addr.html#method.is_global
/// [IPv6]: ../../std/net/struct.Ipv6Addr.html#method.is_global
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(80, 9, 12, 3)).is_global(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1)).is_global(), true);
/// ```
pub fn is_global(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_global(),
IpAddr::V6(ip) => ip.is_global(),
}
}
/// Returns [`true`] if this is a multicast address.
///
/// See the documentation for [`Ipv4Addr::is_multicast`][IPv4] and
/// [`Ipv6Addr::is_multicast`][IPv6] for more details.
///
/// [IPv4]: ../../std/net/struct.Ipv4Addr.html#method.is_multicast
/// [IPv6]: ../../std/net/struct.Ipv6Addr.html#method.is_multicast
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(224, 254, 0, 0)).is_multicast(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0)).is_multicast(), true);
/// ```
#[stable(feature = "ip_shared", since = "1.12.0")]
pub fn is_multicast(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_multicast(),
IpAddr::V6(ip) => ip.is_multicast(),
}
}
/// Returns [`true`] if this address is in a range designated for documentation.
///
/// See the documentation for [`Ipv4Addr::is_documentation`][IPv4] and
/// [`Ipv6Addr::is_documentation`][IPv6] for more details.
///
/// [IPv4]: ../../std/net/struct.Ipv4Addr.html#method.is_documentation
/// [IPv6]: ../../std/net/struct.Ipv6Addr.html#method.is_documentation
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_documentation(), true);
/// assert_eq!(
/// IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_documentation(),
/// true
/// );
/// ```
pub fn is_documentation(&self) -> bool {
match self {
IpAddr::V4(ip) => ip.is_documentation(),
IpAddr::V6(ip) => ip.is_documentation(),
}
}
/// Returns [`true`] if this address is an [IPv4 address], and [`false`] otherwise.
///
/// [`true`]: ../../std/primitive.bool.html
/// [`false`]: ../../std/primitive.bool.html
/// [IPv4 address]: #variant.V4
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv4(), true);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv4(), false);
/// ```
#[stable(feature = "ipaddr_checker", since = "1.16.0")]
pub fn is_ipv4(&self) -> bool {
matches!(self, IpAddr::V4(_))
}
/// Returns [`true`] if this address is an [IPv6 address], and [`false`] otherwise.
///
/// [`true`]: ../../std/primitive.bool.html
/// [`false`]: ../../std/primitive.bool.html
/// [IPv6 address]: #variant.V6
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv6(), false);
/// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv6(), true);
/// ```
#[stable(feature = "ipaddr_checker", since = "1.16.0")]
pub fn is_ipv6(&self) -> bool {
matches!(self, IpAddr::V6(_))
}
}
impl Ipv4Addr {
/// Creates a new IPv4 address from four eight-bit octets.
///
/// The result will represent the IP address `a`.`b`.`c`.`d`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::new(127, 0, 0, 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_ipv4", since = "1.32.0")]
pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr {
// FIXME: should just be u32::from_be_bytes([a, b, c, d]),
// once that method is no longer rustc_const_unstable
Ipv4Addr {
inner: c::in_addr {
s_addr: u32::to_be(
((a as u32) << 24) | ((b as u32) << 16) | ((c as u32) << 8) | (d as u32),
),
},
}
}
/// An IPv4 address with the address pointing to localhost: 127.0.0.1.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::LOCALHOST;
/// assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1));
/// ```
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1);
/// An IPv4 address representing an unspecified address: 0.0.0.0
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::UNSPECIFIED;
/// assert_eq!(addr, Ipv4Addr::new(0, 0, 0, 0));
/// ```
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0);
/// An IPv4 address representing the broadcast address: 255.255.255.255
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::BROADCAST;
/// assert_eq!(addr, Ipv4Addr::new(255, 255, 255, 255));
/// ```
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255);
/// Returns the four eight-bit integers that make up this address.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::new(127, 0, 0, 1);
/// assert_eq!(addr.octets(), [127, 0, 0, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn octets(&self) -> [u8; 4] {
// This returns the order we want because s_addr is stored in big-endian.
self.inner.s_addr.to_ne_bytes()
}
/// Returns [`true`] for the special 'unspecified' address (0.0.0.0).
///
/// This property is defined in _UNIX Network Programming, Second Edition_,
/// W. Richard Stevens, p. 891; see also [ip7].
///
/// [ip7]: http://man7.org/linux/man-pages/man7/ip.7.html
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_unspecified(), true);
/// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_unspecified(), false);
/// ```
#[stable(feature = "ip_shared", since = "1.12.0")]
#[rustc_const_stable(feature = "const_ipv4", since = "1.32.0")]
pub const fn is_unspecified(&self) -> bool {
self.inner.s_addr == 0
}
/// Returns [`true`] if this is a loopback address (127.0.0.0/8).
///
/// This property is defined by [IETF RFC 1122].
///
/// [IETF RFC 1122]: https://tools.ietf.org/html/rfc1122
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_loopback(), true);
/// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_loopback(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_loopback(&self) -> bool {
self.octets()[0] == 127
}
/// Returns [`true`] if this is a private address.
///
/// The private address ranges are defined in [IETF RFC 1918] and include:
///
/// - 10.0.0.0/8
/// - 172.16.0.0/12
/// - 192.168.0.0/16
///
/// [IETF RFC 1918]: https://tools.ietf.org/html/rfc1918
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(10, 0, 0, 1).is_private(), true);
/// assert_eq!(Ipv4Addr::new(10, 10, 10, 10).is_private(), true);
/// assert_eq!(Ipv4Addr::new(172, 16, 10, 10).is_private(), true);
/// assert_eq!(Ipv4Addr::new(172, 29, 45, 14).is_private(), true);
/// assert_eq!(Ipv4Addr::new(172, 32, 0, 2).is_private(), false);
/// assert_eq!(Ipv4Addr::new(192, 168, 0, 2).is_private(), true);
/// assert_eq!(Ipv4Addr::new(192, 169, 0, 2).is_private(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_private(&self) -> bool {
match self.octets() {
[10, ..] => true,
[172, b, ..] if b >= 16 && b <= 31 => true,
[192, 168, ..] => true,
_ => false,
}
}
/// Returns [`true`] if the address is link-local (169.254.0.0/16).
///
/// This property is defined by [IETF RFC 3927].
///
/// [IETF RFC 3927]: https://tools.ietf.org/html/rfc3927
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(169, 254, 0, 0).is_link_local(), true);
/// assert_eq!(Ipv4Addr::new(169, 254, 10, 65).is_link_local(), true);
/// assert_eq!(Ipv4Addr::new(16, 89, 10, 65).is_link_local(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_link_local(&self) -> bool {
match self.octets() {
[169, 254, ..] => true,
_ => false,
}
}
/// Returns [`true`] if the address appears to be globally routable.
/// See [iana-ipv4-special-registry][ipv4-sr].
///
/// The following return false:
///
/// - private addresses (see [`is_private()`](#method.is_private))
/// - the loopback address (see [`is_loopback()`](#method.is_loopback))
/// - the link-local address (see [`is_link_local()`](#method.is_link_local))
/// - the broadcast address (see [`is_broadcast()`](#method.is_broadcast))
/// - addresses used for documentation (see [`is_documentation()`](#method.is_documentation))
/// - the unspecified address (see [`is_unspecified()`](#method.is_unspecified)), and the whole
/// 0.0.0.0/8 block
/// - addresses reserved for future protocols (see
/// [`is_ietf_protocol_assignment()`](#method.is_ietf_protocol_assignment), except
/// `192.0.0.9/32` and `192.0.0.10/32` which are globally routable
/// - addresses reserved for future use (see [`is_reserved()`](#method.is_reserved)
/// - addresses reserved for networking devices benchmarking (see
/// [`is_benchmarking`](#method.is_benchmarking))
///
/// [ipv4-sr]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv4Addr;
///
/// // private addresses are not global
/// assert_eq!(Ipv4Addr::new(10, 254, 0, 0).is_global(), false);
/// assert_eq!(Ipv4Addr::new(192, 168, 10, 65).is_global(), false);
/// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_global(), false);
///
/// // the 0.0.0.0/8 block is not global
/// assert_eq!(Ipv4Addr::new(0, 1, 2, 3).is_global(), false);
/// // in particular, the unspecified address is not global
/// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_global(), false);
///
/// // the loopback address is not global
/// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_global(), false);
///
/// // link local addresses are not global
/// assert_eq!(Ipv4Addr::new(169, 254, 45, 1).is_global(), false);
///
/// // the broadcast address is not global
/// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_global(), false);
///
/// // the address space designated for documentation is not global
/// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_global(), false);
/// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_global(), false);
/// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_global(), false);
///
/// // shared addresses are not global
/// assert_eq!(Ipv4Addr::new(100, 100, 0, 0).is_global(), false);
///
/// // addresses reserved for protocol assignment are not global
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 0).is_global(), false);
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 255).is_global(), false);
///
/// // addresses reserved for future use are not global
/// assert_eq!(Ipv4Addr::new(250, 10, 20, 30).is_global(), false);
///
/// // addresses reserved for network devices benchmarking are not global
/// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_global(), false);
///
/// // All the other addresses are global
/// assert_eq!(Ipv4Addr::new(1, 1, 1, 1).is_global(), true);
/// assert_eq!(Ipv4Addr::new(80, 9, 12, 3).is_global(), true);
/// ```
pub fn is_global(&self) -> bool {
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
// globally routable addresses in the 192.0.0.0/24 range.
if u32::from(*self) == 0xc0000009 || u32::from(*self) == 0xc000000a {
return true;
}
!self.is_private()
&& !self.is_loopback()
&& !self.is_link_local()
&& !self.is_broadcast()
&& !self.is_documentation()
&& !self.is_shared()
&& !self.is_ietf_protocol_assignment()
&& !self.is_reserved()
&& !self.is_benchmarking()
// Make sure the address is not in 0.0.0.0/8
&& self.octets()[0] != 0
}
/// Returns [`true`] if this address is part of the Shared Address Space defined in
/// [IETF RFC 6598] (`100.64.0.0/10`).
///
/// [IETF RFC 6598]: https://tools.ietf.org/html/rfc6598
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(100, 64, 0, 0).is_shared(), true);
/// assert_eq!(Ipv4Addr::new(100, 127, 255, 255).is_shared(), true);
/// assert_eq!(Ipv4Addr::new(100, 128, 0, 0).is_shared(), false);
/// ```
pub fn is_shared(&self) -> bool {
self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000)
}
/// Returns [`true`] if this address is part of `192.0.0.0/24`, which is reserved to
/// IANA for IETF protocol assignments, as documented in [IETF RFC 6890].
///
/// Note that parts of this block are in use:
///
/// - `192.0.0.8/32` is the "IPv4 dummy address" (see [IETF RFC 7600])
/// - `192.0.0.9/32` is the "Port Control Protocol Anycast" (see [IETF RFC 7723])
/// - `192.0.0.10/32` is used for NAT traversal (see [IETF RFC 8155])
///
/// [IETF RFC 6890]: https://tools.ietf.org/html/rfc6890
/// [IETF RFC 7600]: https://tools.ietf.org/html/rfc7600
/// [IETF RFC 7723]: https://tools.ietf.org/html/rfc7723
/// [IETF RFC 8155]: https://tools.ietf.org/html/rfc8155
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 0).is_ietf_protocol_assignment(), true);
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 8).is_ietf_protocol_assignment(), true);
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 9).is_ietf_protocol_assignment(), true);
/// assert_eq!(Ipv4Addr::new(192, 0, 0, 255).is_ietf_protocol_assignment(), true);
/// assert_eq!(Ipv4Addr::new(192, 0, 1, 0).is_ietf_protocol_assignment(), false);
/// assert_eq!(Ipv4Addr::new(191, 255, 255, 255).is_ietf_protocol_assignment(), false);
/// ```
pub fn is_ietf_protocol_assignment(&self) -> bool {
self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0
}
/// Returns [`true`] if this address part of the `198.18.0.0/15` range, which is reserved for
/// network devices benchmarking. This range is defined in [IETF RFC 2544] as `192.18.0.0`
/// through `198.19.255.255` but [errata 423] corrects it to `198.18.0.0/15`.
///
/// [IETF RFC 2544]: https://tools.ietf.org/html/rfc2544
/// [errata 423]: https://www.rfc-editor.org/errata/eid423
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(198, 17, 255, 255).is_benchmarking(), false);
/// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_benchmarking(), true);
/// assert_eq!(Ipv4Addr::new(198, 19, 255, 255).is_benchmarking(), true);
/// assert_eq!(Ipv4Addr::new(198, 20, 0, 0).is_benchmarking(), false);
/// ```
pub fn is_benchmarking(&self) -> bool {
self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18
}
/// Returns [`true`] if this address is reserved by IANA for future use. [IETF RFC 1112]
/// defines the block of reserved addresses as `240.0.0.0/4`. This range normally includes the
/// broadcast address `255.255.255.255`, but this implementation explicitly excludes it, since
/// it is obviously not reserved for future use.
///
/// [IETF RFC 1112]: https://tools.ietf.org/html/rfc1112
/// [`true`]: ../../std/primitive.bool.html
///
/// # Warning
///
/// As IANA assigns new addresses, this method will be
/// updated. This may result in non-reserved addresses being
/// treated as reserved in code that relies on an outdated version
/// of this method.
///
/// # Examples
///
/// ```
/// #![feature(ip)]
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(240, 0, 0, 0).is_reserved(), true);
/// assert_eq!(Ipv4Addr::new(255, 255, 255, 254).is_reserved(), true);
///
/// assert_eq!(Ipv4Addr::new(239, 255, 255, 255).is_reserved(), false);
/// // The broadcast address is not considered as reserved for future use by this implementation
/// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_reserved(), false);
/// ```
pub fn is_reserved(&self) -> bool {
self.octets()[0] & 240 == 240 && !self.is_broadcast()
}
/// Returns [`true`] if this is a multicast address (224.0.0.0/4).
///
/// Multicast addresses have a most significant octet between 224 and 239,
/// and is defined by [IETF RFC 5771].
///
/// [IETF RFC 5771]: https://tools.ietf.org/html/rfc5771
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(224, 254, 0, 0).is_multicast(), true);
/// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_multicast(), true);
/// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_multicast(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_multicast(&self) -> bool {
self.octets()[0] >= 224 && self.octets()[0] <= 239
}
/// Returns [`true`] if this is a broadcast address (255.255.255.255).
///
/// A broadcast address has all octets set to 255 as defined in [IETF RFC 919].
///
/// [IETF RFC 919]: https://tools.ietf.org/html/rfc919
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_broadcast(), true);
/// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_broadcast(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_broadcast(&self) -> bool {
self == &Self::BROADCAST
}
/// Returns [`true`] if this address is in a range designated for documentation.
///
/// This is defined in [IETF RFC 5737]:
///
/// - 192.0.2.0/24 (TEST-NET-1)
/// - 198.51.100.0/24 (TEST-NET-2)
/// - 203.0.113.0/24 (TEST-NET-3)
///
/// [IETF RFC 5737]: https://tools.ietf.org/html/rfc5737
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_documentation(), true);
/// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_documentation(), true);
/// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_documentation(), true);
/// assert_eq!(Ipv4Addr::new(193, 34, 17, 19).is_documentation(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_documentation(&self) -> bool {
match self.octets() {
[192, 0, 2, _] => true,
[198, 51, 100, _] => true,
[203, 0, 113, _] => true,
_ => false,
}
}
/// Converts this address to an IPv4-compatible [IPv6 address].
///
/// a.b.c.d becomes ::a.b.c.d
///
/// [IPv6 address]: ../../std/net/struct.Ipv6Addr.html
///
/// # Examples
///
/// ```
/// use std::net::{Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(
/// Ipv4Addr::new(192, 0, 2, 255).to_ipv6_compatible(),
/// Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 767)
/// );
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_ipv6_compatible(&self) -> Ipv6Addr {
let octets = self.octets();
Ipv6Addr::from([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, octets[0], octets[1], octets[2], octets[3],
])
}
/// Converts this address to an IPv4-mapped [IPv6 address].
///
/// a.b.c.d becomes ::ffff:a.b.c.d
///
/// [IPv6 address]: ../../std/net/struct.Ipv6Addr.html
///
/// # Examples
///
/// ```
/// use std::net::{Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).to_ipv6_mapped(),
/// Ipv6Addr::new(0, 0, 0, 0, 0, 65535, 49152, 767));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_ipv6_mapped(&self) -> Ipv6Addr {
let octets = self.octets();
Ipv6Addr::from([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, octets[0], octets[1], octets[2], octets[3],
])
}
}
#[stable(feature = "ip_addr", since = "1.7.0")]
impl fmt::Display for IpAddr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
IpAddr::V4(ip) => ip.fmt(fmt),
IpAddr::V6(ip) => ip.fmt(fmt),
}
}
}
#[stable(feature = "ip_from_ip", since = "1.16.0")]
impl From<Ipv4Addr> for IpAddr {
/// Copies this address to a new `IpAddr::V4`.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr};
///
/// let addr = Ipv4Addr::new(127, 0, 0, 1);
///
/// assert_eq!(
/// IpAddr::V4(addr),
/// IpAddr::from(addr)
/// )
/// ```
fn from(ipv4: Ipv4Addr) -> IpAddr {
IpAddr::V4(ipv4)
}
}
#[stable(feature = "ip_from_ip", since = "1.16.0")]
impl From<Ipv6Addr> for IpAddr {
/// Copies this address to a new `IpAddr::V6`.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv6Addr};
///
/// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
///
/// assert_eq!(
/// IpAddr::V6(addr),
/// IpAddr::from(addr)
/// );
/// ```
fn from(ipv6: Ipv6Addr) -> IpAddr {
IpAddr::V6(ipv6)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for Ipv4Addr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let octets = self.octets();
// Fast Path: if there's no alignment stuff, write directly to the buffer
if fmt.precision().is_none() && fmt.width().is_none() {
write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3])
} else {
const IPV4_BUF_LEN: usize = 15; // Long enough for the longest possible IPv4 address
let mut buf = [0u8; IPV4_BUF_LEN];
let mut buf_slice = &mut buf[..];
// Note: The call to write should never fail, hence the unwrap
write!(buf_slice, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap();
let len = IPV4_BUF_LEN - buf_slice.len();
// This unsafe is OK because we know what is being written to the buffer
let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
fmt.pad(buf)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Ipv4Addr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Clone for Ipv4Addr {
fn clone(&self) -> Ipv4Addr {
*self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialEq for Ipv4Addr {
fn eq(&self, other: &Ipv4Addr) -> bool {
self.inner.s_addr == other.inner.s_addr
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialEq<Ipv4Addr> for IpAddr {
fn eq(&self, other: &Ipv4Addr) -> bool {
match self {
IpAddr::V4(v4) => v4 == other,
IpAddr::V6(_) => false,
}
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialEq<IpAddr> for Ipv4Addr {
fn eq(&self, other: &IpAddr) -> bool {
match other {
IpAddr::V4(v4) => self == v4,
IpAddr::V6(_) => false,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Eq for Ipv4Addr {}
#[stable(feature = "rust1", since = "1.0.0")]
impl hash::Hash for Ipv4Addr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
// `inner` is #[repr(packed)], so we need to copy `s_addr`.
{ self.inner.s_addr }.hash(s)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialOrd for Ipv4Addr {
fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> {
Some(self.cmp(other))
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialOrd<Ipv4Addr> for IpAddr {
fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> {
match self {
IpAddr::V4(v4) => v4.partial_cmp(other),
IpAddr::V6(_) => Some(Ordering::Greater),
}
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialOrd<IpAddr> for Ipv4Addr {
fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> {
match other {
IpAddr::V4(v4) => self.partial_cmp(v4),
IpAddr::V6(_) => Some(Ordering::Less),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for Ipv4Addr {
fn cmp(&self, other: &Ipv4Addr) -> Ordering {
u32::from_be(self.inner.s_addr).cmp(&u32::from_be(other.inner.s_addr))
}
}
impl AsInner<c::in_addr> for Ipv4Addr {
fn as_inner(&self) -> &c::in_addr {
&self.inner
}
}
#[stable(feature = "ip_u32", since = "1.1.0")]
impl From<Ipv4Addr> for u32 {
/// Converts an `Ipv4Addr` into a host byte order `u32`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::new(13, 12, 11, 10);
/// assert_eq!(0x0d0c0b0au32, u32::from(addr));
/// ```
fn from(ip: Ipv4Addr) -> u32 {
let ip = ip.octets();
u32::from_be_bytes(ip)
}
}
#[stable(feature = "ip_u32", since = "1.1.0")]
impl From<u32> for Ipv4Addr {
/// Converts a host byte order `u32` into an `Ipv4Addr`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::from(0x0d0c0b0au32);
/// assert_eq!(Ipv4Addr::new(13, 12, 11, 10), addr);
/// ```
fn from(ip: u32) -> Ipv4Addr {
Ipv4Addr::from(ip.to_be_bytes())
}
}
#[stable(feature = "from_slice_v4", since = "1.9.0")]
impl From<[u8; 4]> for Ipv4Addr {
/// Creates an `Ipv4Addr` from a four element byte array.
///
/// # Examples
///
/// ```
/// use std::net::Ipv4Addr;
///
/// let addr = Ipv4Addr::from([13u8, 12u8, 11u8, 10u8]);
/// assert_eq!(Ipv4Addr::new(13, 12, 11, 10), addr);
/// ```
fn from(octets: [u8; 4]) -> Ipv4Addr {
Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3])
}
}
#[stable(feature = "ip_from_slice", since = "1.17.0")]
impl From<[u8; 4]> for IpAddr {
/// Creates an `IpAddr::V4` from a four element byte array.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr};
///
/// let addr = IpAddr::from([13u8, 12u8, 11u8, 10u8]);
/// assert_eq!(IpAddr::V4(Ipv4Addr::new(13, 12, 11, 10)), addr);
/// ```
fn from(octets: [u8; 4]) -> IpAddr {
IpAddr::V4(Ipv4Addr::from(octets))
}
}
impl Ipv6Addr {
/// Creates a new IPv6 address from eight 16-bit segments.
///
/// The result will represent the IP address `a:b:c:d:e:f:g:h`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_ipv6", since = "1.32.0")]
pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr {
Ipv6Addr {
inner: c::in6_addr {
s6_addr: [
(a >> 8) as u8,
a as u8,
(b >> 8) as u8,
b as u8,
(c >> 8) as u8,
c as u8,
(d >> 8) as u8,
d as u8,
(e >> 8) as u8,
e as u8,
(f >> 8) as u8,
f as u8,
(g >> 8) as u8,
g as u8,
(h >> 8) as u8,
h as u8,
],
},
}
}
/// An IPv6 address representing localhost: `::1`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::LOCALHOST;
/// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
/// ```
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
/// An IPv6 address representing the unspecified address: `::`
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::UNSPECIFIED;
/// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
/// ```
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
/// Returns the eight 16-bit segments that make up this address.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).segments(),
/// [0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn segments(&self) -> [u16; 8] {
let arr = &self.inner.s6_addr;
[
u16::from_be_bytes([arr[0], arr[1]]),
u16::from_be_bytes([arr[2], arr[3]]),
u16::from_be_bytes([arr[4], arr[5]]),
u16::from_be_bytes([arr[6], arr[7]]),
u16::from_be_bytes([arr[8], arr[9]]),
u16::from_be_bytes([arr[10], arr[11]]),
u16::from_be_bytes([arr[12], arr[13]]),
u16::from_be_bytes([arr[14], arr[15]]),
]
}
/// Returns [`true`] for the special 'unspecified' address (::).
///
/// This property is defined in [IETF RFC 4291].
///
/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unspecified(), false);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).is_unspecified(), true);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_unspecified(&self) -> bool {
self.segments() == [0, 0, 0, 0, 0, 0, 0, 0]
}
/// Returns [`true`] if this is a loopback address (::1).
///
/// This property is defined in [IETF RFC 4291].
///
/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_loopback(), false);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_loopback(), true);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_loopback(&self) -> bool {
self.segments() == [0, 0, 0, 0, 0, 0, 0, 1]
}
/// Returns [`true`] if the address appears to be globally routable.
///
/// The following return [`false`]:
///
/// - the loopback address
/// - link-local and unique local unicast addresses
/// - interface-, link-, realm-, admin- and site-local multicast addresses
///
/// [`true`]: ../../std/primitive.bool.html
/// [`false`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_global(), true);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_global(), false);
/// assert_eq!(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1).is_global(), true);
/// ```
pub fn is_global(&self) -> bool {
match self.multicast_scope() {
Some(Ipv6MulticastScope::Global) => true,
None => self.is_unicast_global(),
_ => false,
}
}
/// Returns [`true`] if this is a unique local address (`fc00::/7`).
///
/// This property is defined in [IETF RFC 4193].
///
/// [IETF RFC 4193]: https://tools.ietf.org/html/rfc4193
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unique_local(), false);
/// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 0).is_unique_local(), true);
/// ```
pub fn is_unique_local(&self) -> bool {
(self.segments()[0] & 0xfe00) == 0xfc00
}
/// Returns [`true`] if the address is a unicast link-local address (`fe80::/64`).
///
/// A common mis-conception is to think that "unicast link-local addresses start with
/// `fe80::`", but the [IETF RFC 4291] actually defines a stricter format for these addresses:
///
/// ```no_rust
/// | 10 |
/// | bits | 54 bits | 64 bits |
/// +----------+-------------------------+----------------------------+
/// |1111111010| 0 | interface ID |
/// +----------+-------------------------+----------------------------+
/// ```
///
/// This method validates the format defined in the RFC and won't recognize the following
/// addresses such as `fe80:0:0:1::` or `fe81::` as unicast link-local addresses for example.
/// If you need a less strict validation use [`is_unicast_link_local()`] instead.
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0);
/// assert!(ip.is_unicast_link_local_strict());
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff);
/// assert!(ip.is_unicast_link_local_strict());
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 1, 0, 0, 0, 0);
/// assert!(!ip.is_unicast_link_local_strict());
/// assert!(ip.is_unicast_link_local());
///
/// let ip = Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 0);
/// assert!(!ip.is_unicast_link_local_strict());
/// assert!(ip.is_unicast_link_local());
/// ```
///
/// # See also
///
/// - [IETF RFC 4291 section 2.5.6]
/// - [RFC 4291 errata 4406] (which has been rejected but provides useful
/// insight)
/// - [`is_unicast_link_local()`]
///
/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
/// [IETF RFC 4291 section 2.5.6]: https://tools.ietf.org/html/rfc4291#section-2.5.6
/// [`true`]: ../../std/primitive.bool.html
/// [RFC 4291 errata 4406]: https://www.rfc-editor.org/errata/eid4406
/// [`is_unicast_link_local()`]: ../../std/net/struct.Ipv6Addr.html#method.is_unicast_link_local
pub fn is_unicast_link_local_strict(&self) -> bool {
(self.segments()[0] & 0xffff) == 0xfe80
&& (self.segments()[1] & 0xffff) == 0
&& (self.segments()[2] & 0xffff) == 0
&& (self.segments()[3] & 0xffff) == 0
}
/// Returns [`true`] if the address is a unicast link-local address (`fe80::/10`).
///
/// This method returns [`true`] for addresses in the range reserved by [RFC 4291 section 2.4],
/// i.e. addresses with the following format:
///
/// ```no_rust
/// | 10 |
/// | bits | 54 bits | 64 bits |
/// +----------+-------------------------+----------------------------+
/// |1111111010| arbitratry value | interface ID |
/// +----------+-------------------------+----------------------------+
/// ```
///
/// As a result, this method consider addresses such as `fe80:0:0:1::` or `fe81::` to be
/// unicast link-local addresses, whereas [`is_unicast_link_local_strict()`] does not. If you
/// need a strict validation fully compliant with the RFC, use
/// [`is_unicast_link_local_strict()`].
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0);
/// assert!(ip.is_unicast_link_local());
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff);
/// assert!(ip.is_unicast_link_local());
///
/// let ip = Ipv6Addr::new(0xfe80, 0, 0, 1, 0, 0, 0, 0);
/// assert!(ip.is_unicast_link_local());
/// assert!(!ip.is_unicast_link_local_strict());
///
/// let ip = Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 0);
/// assert!(ip.is_unicast_link_local());
/// assert!(!ip.is_unicast_link_local_strict());
/// ```
///
/// # See also
///
/// - [IETF RFC 4291 section 2.4]
/// - [RFC 4291 errata 4406] (which has been rejected but provides useful
/// insight)
///
/// [IETF RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4
/// [`true`]: ../../std/primitive.bool.html
/// [RFC 4291 errata 4406]: https://www.rfc-editor.org/errata/eid4406
/// [`is_unicast_link_local_strict()`]: ../../std/net/struct.Ipv6Addr.html#method.is_unicast_link_local_strict
pub fn is_unicast_link_local(&self) -> bool {
(self.segments()[0] & 0xffc0) == 0xfe80
}
/// Returns [`true`] if this is a deprecated unicast site-local address (fec0::/10). The
/// unicast site-local address format is defined in [RFC 4291 section 2.5.7] as:
///
/// ```no_rust
/// | 10 |
/// | bits | 54 bits | 64 bits |
/// +----------+-------------------------+----------------------------+
/// |1111111011| subnet ID | interface ID |
/// +----------+-------------------------+----------------------------+
/// ```
///
/// [`true`]: ../../std/primitive.bool.html
/// [RFC 4291 section 2.5.7]: https://tools.ietf.org/html/rfc4291#section-2.5.7
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(
/// Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unicast_site_local(),
/// false
/// );
/// assert_eq!(Ipv6Addr::new(0xfec2, 0, 0, 0, 0, 0, 0, 0).is_unicast_site_local(), true);
/// ```
///
/// # Warning
///
/// As per [RFC 3879], the whole `FEC0::/10` prefix is
/// deprecated. New software must not support site-local
/// addresses.
///
/// [RFC 3879]: https://tools.ietf.org/html/rfc3879
pub fn is_unicast_site_local(&self) -> bool {
(self.segments()[0] & 0xffc0) == 0xfec0
}
/// Returns [`true`] if this is an address reserved for documentation
/// (2001:db8::/32).
///
/// This property is defined in [IETF RFC 3849].
///
/// [IETF RFC 3849]: https://tools.ietf.org/html/rfc3849
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_documentation(), false);
/// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_documentation(), true);
/// ```
pub fn is_documentation(&self) -> bool {
(self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8)
}
/// Returns [`true`] if the address is a globally routable unicast address.
///
/// The following return false:
///
/// - the loopback address
/// - the link-local addresses
/// - unique local addresses
/// - the unspecified address
/// - the address range reserved for documentation
///
/// This method returns [`true`] for site-local addresses as per [RFC 4291 section 2.5.7]
///
/// ```no_rust
/// The special behavior of [the site-local unicast] prefix defined in [RFC3513] must no longer
/// be supported in new implementations (i.e., new implementations must treat this prefix as
/// Global Unicast).
/// ```
///
/// [`true`]: ../../std/primitive.bool.html
/// [RFC 4291 section 2.5.7]: https://tools.ietf.org/html/rfc4291#section-2.5.7
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_global(), false);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unicast_global(), true);
/// ```
pub fn is_unicast_global(&self) -> bool {
!self.is_multicast()
&& !self.is_loopback()
&& !self.is_unicast_link_local()
&& !self.is_unique_local()
&& !self.is_unspecified()
&& !self.is_documentation()
}
/// Returns the address's multicast scope if the address is multicast.
///
/// # Examples
///
/// ```
/// #![feature(ip)]
///
/// use std::net::{Ipv6Addr, Ipv6MulticastScope};
///
/// assert_eq!(
/// Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0).multicast_scope(),
/// Some(Ipv6MulticastScope::Global)
/// );
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).multicast_scope(), None);
/// ```
pub fn multicast_scope(&self) -> Option<Ipv6MulticastScope> {
if self.is_multicast() {
match self.segments()[0] & 0x000f {
1 => Some(Ipv6MulticastScope::InterfaceLocal),
2 => Some(Ipv6MulticastScope::LinkLocal),
3 => Some(Ipv6MulticastScope::RealmLocal),
4 => Some(Ipv6MulticastScope::AdminLocal),
5 => Some(Ipv6MulticastScope::SiteLocal),
8 => Some(Ipv6MulticastScope::OrganizationLocal),
14 => Some(Ipv6MulticastScope::Global),
_ => None,
}
} else {
None
}
}
/// Returns [`true`] if this is a multicast address (ff00::/8).
///
/// This property is defined by [IETF RFC 4291].
///
/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291
/// [`true`]: ../../std/primitive.bool.html
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_multicast(), true);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_multicast(), false);
/// ```
#[stable(since = "1.7.0", feature = "ip_17")]
pub fn is_multicast(&self) -> bool {
(self.segments()[0] & 0xff00) == 0xff00
}
/// Converts this address to an [IPv4 address]. Returns [`None`] if this address is
/// neither IPv4-compatible or IPv4-mapped.
///
/// ::a.b.c.d and ::ffff:a.b.c.d become a.b.c.d
///
/// [IPv4 address]: ../../std/net/struct.Ipv4Addr.html
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
///
/// ```
/// use std::net::{Ipv4Addr, Ipv6Addr};
///
/// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4(), None);
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4(),
/// Some(Ipv4Addr::new(192, 10, 2, 255)));
/// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4(),
/// Some(Ipv4Addr::new(0, 0, 0, 1)));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_ipv4(&self) -> Option<Ipv4Addr> {
match self.segments() {
[0, 0, 0, 0, 0, f, g, h] if f == 0 || f == 0xffff => {
Some(Ipv4Addr::new((g >> 8) as u8, g as u8, (h >> 8) as u8, h as u8))
}
_ => None,
}
}
/// Returns the sixteen eight-bit integers the IPv6 address consists of.
///
/// ```
/// use std::net::Ipv6Addr;
///
/// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).octets(),
/// [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
/// ```
#[stable(feature = "ipv6_to_octets", since = "1.12.0")]
#[rustc_const_stable(feature = "const_ipv6", since = "1.32.0")]
pub const fn octets(&self) -> [u8; 16] {
self.inner.s6_addr
}
}
/// Write an Ipv6Addr, conforming to the canonical style described by
/// [RFC 5952](https://tools.ietf.org/html/rfc5952).
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for Ipv6Addr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// If there are no alignment requirements, write out the IP address to
// f. Otherwise, write it to a local buffer, then use f.pad.
if f.precision().is_none() && f.width().is_none() {
let segments = self.segments();
// Special case for :: and ::1; otherwise they get written with the
// IPv4 formatter
if self.is_unspecified() {
f.write_str("::")
} else if self.is_loopback() {
f.write_str("::1")
} else if let Some(ipv4) = self.to_ipv4() {
match segments[5] {
// IPv4 Compatible address
0 => write!(f, "::{}", ipv4),
// IPv4 Mapped address
0xffff => write!(f, "::ffff:{}", ipv4),
_ => unreachable!(),
}
} else {
#[derive(Copy, Clone, Default)]
struct Span {
start: usize,
len: usize,
}
// Find the inner 0 span
let zeroes = {
let mut longest = Span::default();
let mut current = Span::default();
for (i, &segment) in segments.iter().enumerate() {
if segment == 0 {
if current.len == 0 {
current.start = i;
}
current.len += 1;
if current.len > longest.len {
longest = current;
}
} else {
current = Span::default();
}
}
longest
};
/// Write a colon-separated part of the address
#[inline]
fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) -> fmt::Result {
if let Some(first) = chunk.first() {
fmt::LowerHex::fmt(first, f)?;
for segment in &chunk[1..] {
f.write_char(':')?;
fmt::LowerHex::fmt(segment, f)?;
}
}
Ok(())
}
if zeroes.len > 1 {
fmt_subslice(f, &segments[..zeroes.start])?;
f.write_str("::")?;
fmt_subslice(f, &segments[zeroes.start + zeroes.len..])
} else {
fmt_subslice(f, &segments)
}
}
} else {
// Slow path: write the address to a local buffer, the use f.pad.
// Defined recursively by using the fast path to write to the
// buffer.
// This is the largest possible size of an IPv6 address
const IPV6_BUF_LEN: usize = (4 * 8) + 7;
let mut buf = [0u8; IPV6_BUF_LEN];
let mut buf_slice = &mut buf[..];
// Note: This call to write should never fail, so unwrap is okay.
write!(buf_slice, "{}", self).unwrap();
let len = IPV6_BUF_LEN - buf_slice.len();
// This is safe because we know exactly what can be in this buffer
let buf = unsafe { crate::str::from_utf8_unchecked(&buf[..len]) };
f.pad(buf)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Ipv6Addr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Clone for Ipv6Addr {
fn clone(&self) -> Ipv6Addr {
*self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialEq for Ipv6Addr {
fn eq(&self, other: &Ipv6Addr) -> bool {
self.inner.s6_addr == other.inner.s6_addr
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialEq<IpAddr> for Ipv6Addr {
fn eq(&self, other: &IpAddr) -> bool {
match other {
IpAddr::V4(_) => false,
IpAddr::V6(v6) => self == v6,
}
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialEq<Ipv6Addr> for IpAddr {
fn eq(&self, other: &Ipv6Addr) -> bool {
match self {
IpAddr::V4(_) => false,
IpAddr::V6(v6) => v6 == other,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Eq for Ipv6Addr {}
#[stable(feature = "rust1", since = "1.0.0")]
impl hash::Hash for Ipv6Addr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
self.inner.s6_addr.hash(s)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialOrd for Ipv6Addr {
fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> {
Some(self.cmp(other))
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialOrd<Ipv6Addr> for IpAddr {
fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> {
match self {
IpAddr::V4(_) => Some(Ordering::Less),
IpAddr::V6(v6) => v6.partial_cmp(other),
}
}
}
#[stable(feature = "ip_cmp", since = "1.16.0")]
impl PartialOrd<IpAddr> for Ipv6Addr {
fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> {
match other {
IpAddr::V4(_) => Some(Ordering::Greater),
IpAddr::V6(v6) => self.partial_cmp(v6),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for Ipv6Addr {
fn cmp(&self, other: &Ipv6Addr) -> Ordering {
self.segments().cmp(&other.segments())
}
}
impl AsInner<c::in6_addr> for Ipv6Addr {
fn as_inner(&self) -> &c::in6_addr {
&self.inner
}
}
impl FromInner<c::in6_addr> for Ipv6Addr {
fn from_inner(addr: c::in6_addr) -> Ipv6Addr {
Ipv6Addr { inner: addr }
}
}
#[stable(feature = "i128", since = "1.26.0")]
impl From<Ipv6Addr> for u128 {
/// Convert an `Ipv6Addr` into a host byte order `u128`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::new(
/// 0x1020, 0x3040, 0x5060, 0x7080,
/// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
/// );
/// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr));
/// ```
fn from(ip: Ipv6Addr) -> u128 {
let ip = ip.octets();
u128::from_be_bytes(ip)
}
}
#[stable(feature = "i128", since = "1.26.0")]
impl From<u128> for Ipv6Addr {
/// Convert a host byte order `u128` into an `Ipv6Addr`.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::from(0x102030405060708090A0B0C0D0E0F00D_u128);
/// assert_eq!(
/// Ipv6Addr::new(
/// 0x1020, 0x3040, 0x5060, 0x7080,
/// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D,
/// ),
/// addr);
/// ```
fn from(ip: u128) -> Ipv6Addr {
Ipv6Addr::from(ip.to_be_bytes())
}
}
#[stable(feature = "ipv6_from_octets", since = "1.9.0")]
impl From<[u8; 16]> for Ipv6Addr {
/// Creates an `Ipv6Addr` from a sixteen element byte array.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::from([
/// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8,
/// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8,
/// ]);
/// assert_eq!(
/// Ipv6Addr::new(
/// 0x1918, 0x1716,
/// 0x1514, 0x1312,
/// 0x1110, 0x0f0e,
/// 0x0d0c, 0x0b0a
/// ),
/// addr
/// );
/// ```
fn from(octets: [u8; 16]) -> Ipv6Addr {
let inner = c::in6_addr { s6_addr: octets };
Ipv6Addr::from_inner(inner)
}
}
#[stable(feature = "ipv6_from_segments", since = "1.16.0")]
impl From<[u16; 8]> for Ipv6Addr {
/// Creates an `Ipv6Addr` from an eight element 16-bit array.
///
/// # Examples
///
/// ```
/// use std::net::Ipv6Addr;
///
/// let addr = Ipv6Addr::from([
/// 525u16, 524u16, 523u16, 522u16,
/// 521u16, 520u16, 519u16, 518u16,
/// ]);
/// assert_eq!(
/// Ipv6Addr::new(
/// 0x20d, 0x20c,
/// 0x20b, 0x20a,
/// 0x209, 0x208,
/// 0x207, 0x206
/// ),
/// addr
/// );
/// ```
fn from(segments: [u16; 8]) -> Ipv6Addr {
let [a, b, c, d, e, f, g, h] = segments;
Ipv6Addr::new(a, b, c, d, e, f, g, h)
}
}
#[stable(feature = "ip_from_slice", since = "1.17.0")]
impl From<[u8; 16]> for IpAddr {
/// Creates an `IpAddr::V6` from a sixteen element byte array.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv6Addr};
///
/// let addr = IpAddr::from([
/// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8,
/// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8,
/// ]);
/// assert_eq!(
/// IpAddr::V6(Ipv6Addr::new(
/// 0x1918, 0x1716,
/// 0x1514, 0x1312,
/// 0x1110, 0x0f0e,
/// 0x0d0c, 0x0b0a
/// )),
/// addr
/// );
/// ```
fn from(octets: [u8; 16]) -> IpAddr {
IpAddr::V6(Ipv6Addr::from(octets))
}
}
#[stable(feature = "ip_from_slice", since = "1.17.0")]
impl From<[u16; 8]> for IpAddr {
/// Creates an `IpAddr::V6` from an eight element 16-bit array.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv6Addr};
///
/// let addr = IpAddr::from([
/// 525u16, 524u16, 523u16, 522u16,
/// 521u16, 520u16, 519u16, 518u16,
/// ]);
/// assert_eq!(
/// IpAddr::V6(Ipv6Addr::new(
/// 0x20d, 0x20c,
/// 0x20b, 0x20a,
/// 0x209, 0x208,
/// 0x207, 0x206
/// )),
/// addr
/// );
/// ```
fn from(segments: [u16; 8]) -> IpAddr {
IpAddr::V6(Ipv6Addr::from(segments))
}
}
// Tests for this module
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use crate::net::test::{sa4, sa6, tsa};
use crate::net::*;
use crate::str::FromStr;
#[test]
fn test_from_str_ipv4() {
assert_eq!(Ok(Ipv4Addr::new(127, 0, 0, 1)), "127.0.0.1".parse());
assert_eq!(Ok(Ipv4Addr::new(255, 255, 255, 255)), "255.255.255.255".parse());
assert_eq!(Ok(Ipv4Addr::new(0, 0, 0, 0)), "0.0.0.0".parse());
// out of range
let none: Option<Ipv4Addr> = "256.0.0.1".parse().ok();
assert_eq!(None, none);
// too short
let none: Option<Ipv4Addr> = "255.0.0".parse().ok();
assert_eq!(None, none);
// too long
let none: Option<Ipv4Addr> = "255.0.0.1.2".parse().ok();
assert_eq!(None, none);
// no number between dots
let none: Option<Ipv4Addr> = "255.0..1".parse().ok();
assert_eq!(None, none);
}
#[test]
fn test_from_str_ipv6() {
assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "0:0:0:0:0:0:0:0".parse());
assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "0:0:0:0:0:0:0:1".parse());
assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "::1".parse());
assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "::".parse());
assert_eq!(
Ok(Ipv6Addr::new(0x2a02, 0x6b8, 0, 0, 0, 0, 0x11, 0x11)),
"2a02:6b8::11:11".parse()
);
// too long group
let none: Option<Ipv6Addr> = "::00000".parse().ok();
assert_eq!(None, none);
// too short
let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7".parse().ok();
assert_eq!(None, none);
// too long
let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:8:9".parse().ok();
assert_eq!(None, none);
// triple colon
let none: Option<Ipv6Addr> = "1:2:::6:7:8".parse().ok();
assert_eq!(None, none);
// two double colons
let none: Option<Ipv6Addr> = "1:2::6::8".parse().ok();
assert_eq!(None, none);
// `::` indicating zero groups of zeros
let none: Option<Ipv6Addr> = "1:2:3:4::5:6:7:8".parse().ok();
assert_eq!(None, none);
}
#[test]
fn test_from_str_ipv4_in_ipv6() {
assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 545)), "::192.0.2.33".parse());
assert_eq!(
Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0xFFFF, 49152, 545)),
"::FFFF:192.0.2.33".parse()
);
assert_eq!(
Ok(Ipv6Addr::new(0x64, 0xff9b, 0, 0, 0, 0, 49152, 545)),
"64:ff9b::192.0.2.33".parse()
);
assert_eq!(
Ok(Ipv6Addr::new(0x2001, 0xdb8, 0x122, 0xc000, 0x2, 0x2100, 49152, 545)),
"2001:db8:122:c000:2:2100:192.0.2.33".parse()
);
// colon after v4
let none: Option<Ipv4Addr> = "::127.0.0.1:".parse().ok();
assert_eq!(None, none);
// not enough groups
let none: Option<Ipv6Addr> = "1.2.3.4.5:127.0.0.1".parse().ok();
assert_eq!(None, none);
// too many groups
let none: Option<Ipv6Addr> = "1.2.3.4.5:6:7:127.0.0.1".parse().ok();
assert_eq!(None, none);
}
#[test]
fn test_from_str_socket_addr() {
assert_eq!(Ok(sa4(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse());
assert_eq!(
Ok(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80)),
"77.88.21.11:80".parse()
);
assert_eq!(
Ok(sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53)),
"[2a02:6b8:0:1::1]:53".parse()
);
assert_eq!(
Ok(SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0)),
"[2a02:6b8:0:1::1]:53".parse()
);
assert_eq!(
Ok(sa6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22)),
"[::127.0.0.1]:22".parse()
);
assert_eq!(
Ok(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22, 0, 0)),
"[::127.0.0.1]:22".parse()
);
// without port
let none: Option<SocketAddr> = "127.0.0.1".parse().ok();
assert_eq!(None, none);
// without port
let none: Option<SocketAddr> = "127.0.0.1:".parse().ok();
assert_eq!(None, none);
// wrong brackets around v4
let none: Option<SocketAddr> = "[127.0.0.1]:22".parse().ok();
assert_eq!(None, none);
// port out of range
let none: Option<SocketAddr> = "127.0.0.1:123456".parse().ok();
assert_eq!(None, none);
}
#[test]
fn ipv4_addr_to_string() {
// Short address
assert_eq!(Ipv4Addr::new(1, 1, 1, 1).to_string(), "1.1.1.1");
// Long address
assert_eq!(Ipv4Addr::new(127, 127, 127, 127).to_string(), "127.127.127.127");
// Test padding
assert_eq!(&format!("{:16}", Ipv4Addr::new(1, 1, 1, 1)), "1.1.1.1 ");
assert_eq!(&format!("{:>16}", Ipv4Addr::new(1, 1, 1, 1)), " 1.1.1.1");
}
#[test]
fn ipv6_addr_to_string() {
// ipv4-mapped address
let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280);
assert_eq!(a1.to_string(), "::ffff:192.0.2.128");
// ipv4-compatible address
let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280);
assert_eq!(a1.to_string(), "::192.0.2.128");
// v6 address with no zero segments
assert_eq!(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15).to_string(), "8:9:a:b:c:d:e:f");
// longest possible IPv6 length
assert_eq!(
Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888)
.to_string(),
"1111:2222:3333:4444:5555:6666:7777:8888"
);
// padding
assert_eq!(
&format!("{:20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)),
"1:2:3:4:5:6:7:8 "
);
assert_eq!(
&format!("{:>20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)),
" 1:2:3:4:5:6:7:8"
);
// reduce a single run of zeros
assert_eq!(
"ae::ffff:102:304",
Ipv6Addr::new(0xae, 0, 0, 0, 0, 0xffff, 0x0102, 0x0304).to_string()
);
// don't reduce just a single zero segment
assert_eq!("1:2:3:4:5:6:0:8", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 0, 8).to_string());
// 'any' address
assert_eq!("::", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).to_string());
// loopback address
assert_eq!("::1", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_string());
// ends in zeros
assert_eq!("1::", Ipv6Addr::new(1, 0, 0, 0, 0, 0, 0, 0).to_string());
// two runs of zeros, second one is longer
assert_eq!("1:0:0:4::8", Ipv6Addr::new(1, 0, 0, 4, 0, 0, 0, 8).to_string());
// two runs of zeros, equal length
assert_eq!("1::4:5:0:0:8", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8).to_string());
}
#[test]
fn ipv4_to_ipv6() {
assert_eq!(
Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678),
Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_mapped()
);
assert_eq!(
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678),
Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_compatible()
);
}
#[test]
fn ipv6_to_ipv4() {
assert_eq!(
Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4(),
Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
);
assert_eq!(
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4(),
Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78))
);
assert_eq!(Ipv6Addr::new(0, 0, 1, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), None);
}
#[test]
fn ip_properties() {
macro_rules! ip {
($s:expr) => {
IpAddr::from_str($s).unwrap()
};
}
macro_rules! check {
($s:expr) => {
check!($s, 0);
};
($s:expr, $mask:expr) => {{
let unspec: u8 = 1 << 0;
let loopback: u8 = 1 << 1;
let global: u8 = 1 << 2;
let multicast: u8 = 1 << 3;
let doc: u8 = 1 << 4;
if ($mask & unspec) == unspec {
assert!(ip!($s).is_unspecified());
} else {
assert!(!ip!($s).is_unspecified());
}
if ($mask & loopback) == loopback {
assert!(ip!($s).is_loopback());
} else {
assert!(!ip!($s).is_loopback());
}
if ($mask & global) == global {
assert!(ip!($s).is_global());
} else {
assert!(!ip!($s).is_global());
}
if ($mask & multicast) == multicast {
assert!(ip!($s).is_multicast());
} else {
assert!(!ip!($s).is_multicast());
}
if ($mask & doc) == doc {
assert!(ip!($s).is_documentation());
} else {
assert!(!ip!($s).is_documentation());
}
}};
}
let unspec: u8 = 1 << 0;
let loopback: u8 = 1 << 1;
let global: u8 = 1 << 2;
let multicast: u8 = 1 << 3;
let doc: u8 = 1 << 4;
check!("0.0.0.0", unspec);
check!("0.0.0.1");
check!("0.1.0.0");
check!("10.9.8.7");
check!("127.1.2.3", loopback);
check!("172.31.254.253");
check!("169.254.253.242");
check!("192.0.2.183", doc);
check!("192.1.2.183", global);
check!("192.168.254.253");
check!("198.51.100.0", doc);
check!("203.0.113.0", doc);
check!("203.2.113.0", global);
check!("224.0.0.0", global | multicast);
check!("239.255.255.255", global | multicast);
check!("255.255.255.255");
// make sure benchmarking addresses are not global
check!("198.18.0.0");
check!("198.18.54.2");
check!("198.19.255.255");
// make sure addresses reserved for protocol assignment are not global
check!("192.0.0.0");
check!("192.0.0.255");
check!("192.0.0.100");
// make sure reserved addresses are not global
check!("240.0.0.0");
check!("251.54.1.76");
check!("254.255.255.255");
// make sure shared addresses are not global
check!("100.64.0.0");
check!("100.127.255.255");
check!("100.100.100.0");
check!("::", unspec);
check!("::1", loopback);
check!("::0.0.0.2", global);
check!("1::", global);
check!("fc00::");
check!("fdff:ffff::");
check!("fe80:ffff::");
check!("febf:ffff::");
check!("fec0::", global);
check!("ff01::", multicast);
check!("ff02::", multicast);
check!("ff03::", multicast);
check!("ff04::", multicast);
check!("ff05::", multicast);
check!("ff08::", multicast);
check!("ff0e::", global | multicast);
check!("2001:db8:85a3::8a2e:370:7334", doc);
check!("102:304:506:708:90a:b0c:d0e:f10", global);
}
#[test]
fn ipv4_properties() {
macro_rules! ip {
($s:expr) => {
Ipv4Addr::from_str($s).unwrap()
};
}
macro_rules! check {
($s:expr) => {
check!($s, 0);
};
($s:expr, $mask:expr) => {{
let unspec: u16 = 1 << 0;
let loopback: u16 = 1 << 1;
let private: u16 = 1 << 2;
let link_local: u16 = 1 << 3;
let global: u16 = 1 << 4;
let multicast: u16 = 1 << 5;
let broadcast: u16 = 1 << 6;
let documentation: u16 = 1 << 7;
let benchmarking: u16 = 1 << 8;
let ietf_protocol_assignment: u16 = 1 << 9;
let reserved: u16 = 1 << 10;
let shared: u16 = 1 << 11;
if ($mask & unspec) == unspec {
assert!(ip!($s).is_unspecified());
} else {
assert!(!ip!($s).is_unspecified());
}
if ($mask & loopback) == loopback {
assert!(ip!($s).is_loopback());
} else {
assert!(!ip!($s).is_loopback());
}
if ($mask & private) == private {
assert!(ip!($s).is_private());
} else {
assert!(!ip!($s).is_private());
}
if ($mask & link_local) == link_local {
assert!(ip!($s).is_link_local());
} else {
assert!(!ip!($s).is_link_local());
}
if ($mask & global) == global {
assert!(ip!($s).is_global());
} else {
assert!(!ip!($s).is_global());
}
if ($mask & multicast) == multicast {
assert!(ip!($s).is_multicast());
} else {
assert!(!ip!($s).is_multicast());
}
if ($mask & broadcast) == broadcast {
assert!(ip!($s).is_broadcast());
} else {
assert!(!ip!($s).is_broadcast());
}
if ($mask & documentation) == documentation {
assert!(ip!($s).is_documentation());
} else {
assert!(!ip!($s).is_documentation());
}
if ($mask & benchmarking) == benchmarking {
assert!(ip!($s).is_benchmarking());
} else {
assert!(!ip!($s).is_benchmarking());
}
if ($mask & ietf_protocol_assignment) == ietf_protocol_assignment {
assert!(ip!($s).is_ietf_protocol_assignment());
} else {
assert!(!ip!($s).is_ietf_protocol_assignment());
}
if ($mask & reserved) == reserved {
assert!(ip!($s).is_reserved());
} else {
assert!(!ip!($s).is_reserved());
}
if ($mask & shared) == shared {
assert!(ip!($s).is_shared());
} else {
assert!(!ip!($s).is_shared());
}
}};
}
let unspec: u16 = 1 << 0;
let loopback: u16 = 1 << 1;
let private: u16 = 1 << 2;
let link_local: u16 = 1 << 3;
let global: u16 = 1 << 4;
let multicast: u16 = 1 << 5;
let broadcast: u16 = 1 << 6;
let documentation: u16 = 1 << 7;
let benchmarking: u16 = 1 << 8;
let ietf_protocol_assignment: u16 = 1 << 9;
let reserved: u16 = 1 << 10;
let shared: u16 = 1 << 11;
check!("0.0.0.0", unspec);
check!("0.0.0.1");
check!("0.1.0.0");
check!("10.9.8.7", private);
check!("127.1.2.3", loopback);
check!("172.31.254.253", private);
check!("169.254.253.242", link_local);
check!("192.0.2.183", documentation);
check!("192.1.2.183", global);
check!("192.168.254.253", private);
check!("198.51.100.0", documentation);
check!("203.0.113.0", documentation);
check!("203.2.113.0", global);
check!("224.0.0.0", global | multicast);
check!("239.255.255.255", global | multicast);
check!("255.255.255.255", broadcast);
check!("198.18.0.0", benchmarking);
check!("198.18.54.2", benchmarking);
check!("198.19.255.255", benchmarking);
check!("192.0.0.0", ietf_protocol_assignment);
check!("192.0.0.255", ietf_protocol_assignment);
check!("192.0.0.100", ietf_protocol_assignment);
check!("240.0.0.0", reserved);
check!("251.54.1.76", reserved);
check!("254.255.255.255", reserved);
check!("100.64.0.0", shared);
check!("100.127.255.255", shared);
check!("100.100.100.0", shared);
}
#[test]
fn ipv6_properties() {
macro_rules! ip {
($s:expr) => {
Ipv6Addr::from_str($s).unwrap()
};
}
macro_rules! check {
($s:expr, &[$($octet:expr),*], $mask:expr) => {
assert_eq!($s, ip!($s).to_string());
let octets = &[$($octet),*];
assert_eq!(&ip!($s).octets(), octets);
assert_eq!(Ipv6Addr::from(*octets), ip!($s));
let unspecified: u16 = 1 << 0;
let loopback: u16 = 1 << 1;
let unique_local: u16 = 1 << 2;
let global: u16 = 1 << 3;
let unicast_link_local: u16 = 1 << 4;
let unicast_link_local_strict: u16 = 1 << 5;
let unicast_site_local: u16 = 1 << 6;
let unicast_global: u16 = 1 << 7;
let documentation: u16 = 1 << 8;
let multicast_interface_local: u16 = 1 << 9;
let multicast_link_local: u16 = 1 << 10;
let multicast_realm_local: u16 = 1 << 11;
let multicast_admin_local: u16 = 1 << 12;
let multicast_site_local: u16 = 1 << 13;
let multicast_organization_local: u16 = 1 << 14;
let multicast_global: u16 = 1 << 15;
let multicast: u16 = multicast_interface_local
| multicast_admin_local
| multicast_global
| multicast_link_local
| multicast_realm_local
| multicast_site_local
| multicast_organization_local;
if ($mask & unspecified) == unspecified {
assert!(ip!($s).is_unspecified());
} else {
assert!(!ip!($s).is_unspecified());
}
if ($mask & loopback) == loopback {
assert!(ip!($s).is_loopback());
} else {
assert!(!ip!($s).is_loopback());
}
if ($mask & unique_local) == unique_local {
assert!(ip!($s).is_unique_local());
} else {
assert!(!ip!($s).is_unique_local());
}
if ($mask & global) == global {
assert!(ip!($s).is_global());
} else {
assert!(!ip!($s).is_global());
}
if ($mask & unicast_link_local) == unicast_link_local {
assert!(ip!($s).is_unicast_link_local());
} else {
assert!(!ip!($s).is_unicast_link_local());
}
if ($mask & unicast_link_local_strict) == unicast_link_local_strict {
assert!(ip!($s).is_unicast_link_local_strict());
} else {
assert!(!ip!($s).is_unicast_link_local_strict());
}
if ($mask & unicast_site_local) == unicast_site_local {
assert!(ip!($s).is_unicast_site_local());
} else {
assert!(!ip!($s).is_unicast_site_local());
}
if ($mask & unicast_global) == unicast_global {
assert!(ip!($s).is_unicast_global());
} else {
assert!(!ip!($s).is_unicast_global());
}
if ($mask & documentation) == documentation {
assert!(ip!($s).is_documentation());
} else {
assert!(!ip!($s).is_documentation());
}
if ($mask & multicast) != 0 {
assert!(ip!($s).multicast_scope().is_some());
assert!(ip!($s).is_multicast());
} else {
assert!(ip!($s).multicast_scope().is_none());
assert!(!ip!($s).is_multicast());
}
if ($mask & multicast_interface_local) == multicast_interface_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::InterfaceLocal);
}
if ($mask & multicast_link_local) == multicast_link_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::LinkLocal);
}
if ($mask & multicast_realm_local) == multicast_realm_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::RealmLocal);
}
if ($mask & multicast_admin_local) == multicast_admin_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::AdminLocal);
}
if ($mask & multicast_site_local) == multicast_site_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::SiteLocal);
}
if ($mask & multicast_organization_local) == multicast_organization_local {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::OrganizationLocal);
}
if ($mask & multicast_global) == multicast_global {
assert_eq!(ip!($s).multicast_scope().unwrap(),
Ipv6MulticastScope::Global);
}
}
}
let unspecified: u16 = 1 << 0;
let loopback: u16 = 1 << 1;
let unique_local: u16 = 1 << 2;
let global: u16 = 1 << 3;
let unicast_link_local: u16 = 1 << 4;
let unicast_link_local_strict: u16 = 1 << 5;
let unicast_site_local: u16 = 1 << 6;
let unicast_global: u16 = 1 << 7;
let documentation: u16 = 1 << 8;
let multicast_interface_local: u16 = 1 << 9;
let multicast_link_local: u16 = 1 << 10;
let multicast_realm_local: u16 = 1 << 11;
let multicast_admin_local: u16 = 1 << 12;
let multicast_site_local: u16 = 1 << 13;
let multicast_organization_local: u16 = 1 << 14;
let multicast_global: u16 = 1 << 15;
check!("::", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unspecified);
check!("::1", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], loopback);
check!(
"::0.0.0.2",
&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2],
global | unicast_global
);
check!("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], global | unicast_global);
check!("fc00::", &[0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unique_local);
check!(
"fdff:ffff::",
&[0xfd, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unique_local
);
check!(
"fe80:ffff::",
&[0xfe, 0x80, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_link_local
);
check!(
"fe80::",
&[0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_link_local | unicast_link_local_strict
);
check!(
"febf:ffff::",
&[0xfe, 0xbf, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_link_local
);
check!(
"febf::",
&[0xfe, 0xbf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_link_local
);
check!(
"febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff",
&[
0xfe, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff
],
unicast_link_local
);
check!(
"fe80::ffff:ffff:ffff:ffff",
&[
0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff
],
unicast_link_local | unicast_link_local_strict
);
check!(
"fe80:0:0:1::",
&[0xfe, 0x80, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_link_local
);
check!(
"fec0::",
&[0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
unicast_site_local | unicast_global | global
);
check!(
"ff01::",
&[0xff, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_interface_local
);
check!(
"ff02::",
&[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_link_local
);
check!(
"ff03::",
&[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_realm_local
);
check!(
"ff04::",
&[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_admin_local
);
check!(
"ff05::",
&[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_site_local
);
check!(
"ff08::",
&[0xff, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_organization_local
);
check!(
"ff0e::",
&[0xff, 0xe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
multicast_global | global
);
check!(
"2001:db8:85a3::8a2e:370:7334",
&[0x20, 1, 0xd, 0xb8, 0x85, 0xa3, 0, 0, 0, 0, 0x8a, 0x2e, 3, 0x70, 0x73, 0x34],
documentation
);
check!(
"102:304:506:708:90a:b0c:d0e:f10",
&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
global | unicast_global
);
}
#[test]
fn to_socket_addr_socketaddr() {
let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 12345);
assert_eq!(Ok(vec![a]), tsa(a));
}
#[test]
fn test_ipv4_to_int() {
let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44);
assert_eq!(u32::from(a), 0x11223344);
}
#[test]
fn test_int_to_ipv4() {
let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44);
assert_eq!(Ipv4Addr::from(0x11223344), a);
}
#[test]
fn test_ipv6_to_int() {
let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11);
assert_eq!(u128::from(a), 0x112233445566778899aabbccddeeff11u128);
}
#[test]
fn test_int_to_ipv6() {
let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11);
assert_eq!(Ipv6Addr::from(0x112233445566778899aabbccddeeff11u128), a);
}
#[test]
fn ipv4_from_constructors() {
assert_eq!(Ipv4Addr::LOCALHOST, Ipv4Addr::new(127, 0, 0, 1));
assert!(Ipv4Addr::LOCALHOST.is_loopback());
assert_eq!(Ipv4Addr::UNSPECIFIED, Ipv4Addr::new(0, 0, 0, 0));
assert!(Ipv4Addr::UNSPECIFIED.is_unspecified());
assert_eq!(Ipv4Addr::BROADCAST, Ipv4Addr::new(255, 255, 255, 255));
assert!(Ipv4Addr::BROADCAST.is_broadcast());
}
#[test]
fn ipv6_from_contructors() {
assert_eq!(Ipv6Addr::LOCALHOST, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
assert!(Ipv6Addr::LOCALHOST.is_loopback());
assert_eq!(Ipv6Addr::UNSPECIFIED, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
assert!(Ipv6Addr::UNSPECIFIED.is_unspecified());
}
#[test]
fn ipv4_from_octets() {
assert_eq!(Ipv4Addr::from([127, 0, 0, 1]), Ipv4Addr::new(127, 0, 0, 1))
}
#[test]
fn ipv6_from_segments() {
let from_u16s =
Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]);
let new = Ipv6Addr::new(0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff);
assert_eq!(new, from_u16s);
}
#[test]
fn ipv6_from_octets() {
let from_u16s =
Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]);
let from_u8s = Ipv6Addr::from([
0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd,
0xee, 0xff,
]);
assert_eq!(from_u16s, from_u8s);
}
#[test]
fn cmp() {
let v41 = Ipv4Addr::new(100, 64, 3, 3);
let v42 = Ipv4Addr::new(192, 0, 2, 2);
let v61 = "2001:db8:f00::1002".parse::<Ipv6Addr>().unwrap();
let v62 = "2001:db8:f00::2001".parse::<Ipv6Addr>().unwrap();
assert!(v41 < v42);
assert!(v61 < v62);
assert_eq!(v41, IpAddr::V4(v41));
assert_eq!(v61, IpAddr::V6(v61));
assert!(v41 != IpAddr::V4(v42));
assert!(v61 != IpAddr::V6(v62));
assert!(v41 < IpAddr::V4(v42));
assert!(v61 < IpAddr::V6(v62));
assert!(IpAddr::V4(v41) < v42);
assert!(IpAddr::V6(v61) < v62);
assert!(v41 < IpAddr::V6(v61));
assert!(IpAddr::V4(v41) < v61);
}
#[test]
fn is_v4() {
let ip = IpAddr::V4(Ipv4Addr::new(100, 64, 3, 3));
assert!(ip.is_ipv4());
assert!(!ip.is_ipv6());
}
#[test]
fn is_v6() {
let ip = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678));
assert!(!ip.is_ipv4());
assert!(ip.is_ipv6());
}
}
|
//! Functions for parsing DWARF debugging information.
use byteorder;
use constants;
use leb128;
use std::cell::{Cell, RefCell};
use std::collections::hash_map;
use std::error;
use std::fmt::{self, Debug};
use std::io;
use std::marker::PhantomData;
use std::mem;
use std::ops::{Deref, Index, Range, RangeFrom, RangeTo};
/// A trait describing the endianity of some buffer.
///
/// All interesting methods are from the `byteorder` crate's `ByteOrder`
/// trait. All methods are static. You shouldn't instantiate concrete objects
/// that implement this trait, it is just used as compile-time phantom data.
pub trait Endianity
: byteorder::ByteOrder + Debug + Clone + Copy + PartialEq + Eq {
}
/// Little endian byte order.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LittleEndian {}
impl byteorder::ByteOrder for LittleEndian {
fn read_u16(buf: &[u8]) -> u16 {
byteorder::LittleEndian::read_u16(buf)
}
fn read_u32(buf: &[u8]) -> u32 {
byteorder::LittleEndian::read_u32(buf)
}
fn read_u64(buf: &[u8]) -> u64 {
byteorder::LittleEndian::read_u64(buf)
}
fn read_uint(buf: &[u8], nbytes: usize) -> u64 {
byteorder::LittleEndian::read_uint(buf, nbytes)
}
fn write_u16(buf: &mut [u8], n: u16) {
byteorder::LittleEndian::write_u16(buf, n)
}
fn write_u32(buf: &mut [u8], n: u32) {
byteorder::LittleEndian::write_u32(buf, n)
}
fn write_u64(buf: &mut [u8], n: u64) {
byteorder::LittleEndian::write_u64(buf, n)
}
fn write_uint(buf: &mut [u8], n: u64, nbytes: usize) {
byteorder::LittleEndian::write_uint(buf, n, nbytes)
}
}
impl Endianity for LittleEndian {}
/// Big endian byte order.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BigEndian {}
impl byteorder::ByteOrder for BigEndian {
fn read_u16(buf: &[u8]) -> u16 {
byteorder::BigEndian::read_u16(buf)
}
fn read_u32(buf: &[u8]) -> u32 {
byteorder::BigEndian::read_u32(buf)
}
fn read_u64(buf: &[u8]) -> u64 {
byteorder::BigEndian::read_u64(buf)
}
fn read_uint(buf: &[u8], nbytes: usize) -> u64 {
byteorder::BigEndian::read_uint(buf, nbytes)
}
fn write_u16(buf: &mut [u8], n: u16) {
byteorder::BigEndian::write_u16(buf, n)
}
fn write_u32(buf: &mut [u8], n: u32) {
byteorder::BigEndian::write_u32(buf, n)
}
fn write_u64(buf: &mut [u8], n: u64) {
byteorder::BigEndian::write_u64(buf, n)
}
fn write_uint(buf: &mut [u8], n: u64, nbytes: usize) {
byteorder::BigEndian::write_uint(buf, n, nbytes)
}
}
impl Endianity for BigEndian {}
/// An error that occurred when parsing.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Error {
/// An error parsing an unsigned LEB128 value.
BadUnsignedLeb128,
/// An error parsing a signed LEB128 value.
BadSignedLeb128,
/// An abbreviation declared that its code is zero, but zero is reserved for
/// null records.
AbbreviationCodeZero,
/// Found an unknown `DW_TAG_*` type.
UnknownTag,
/// The abbreviation's has-children byte was not one of
/// `DW_CHILDREN_{yes,no}`.
BadHasChildren,
/// Found an unknown `DW_FORM_*` type.
UnknownForm,
/// Expected a zero, found something else.
ExpectedZero,
/// Found an abbreviation code that has already been used.
DuplicateAbbreviationCode,
/// Found an unknown reserved length value.
UnknownReservedLength,
/// Found an unknown DWARF version.
UnknownVersion,
/// The unit header's claimed length is too short to even hold the header
/// itself.
UnitHeaderLengthTooShort,
/// Found a record with an unknown abbreviation code.
UnknownAbbreviation,
/// Hit the end of input before it was expected.
UnexpectedEof,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Debug::fmt(self, f)
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::BadUnsignedLeb128 => "An error parsing an unsigned LEB128 value",
Error::BadSignedLeb128 => "An error parsing a signed LEB128 value",
Error::AbbreviationCodeZero => {
"An abbreviation declared that its code is zero,
but zero is reserved for null records"
}
Error::UnknownTag => "Found an unknown `DW_TAG_*` type",
Error::BadHasChildren => {
"The abbreviation's has-children byte was not one of
`DW_CHILDREN_{yes,no}`"
}
Error::UnknownForm => "Found an unknown `DW_FORM_*` type",
Error::ExpectedZero => "Expected a zero, found something else",
Error::DuplicateAbbreviationCode => {
"Found an abbreviation code that has already been used"
}
Error::UnknownReservedLength => "Found an unknown reserved length value",
Error::UnknownVersion => "Found an unknown DWARF version",
Error::UnitHeaderLengthTooShort => {
"The unit header's claimed length is too short to even hold
the header itself"
}
Error::UnknownAbbreviation => "Found a record with an unknown abbreviation code",
Error::UnexpectedEof => "Hit the end of input before it was expected",
}
}
}
/// The result of a parse.
pub type ParseResult<T> = Result<T, Error>;
/// A &[u8] slice with compile-time endianity metadata.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct EndianBuf<'input, Endian>(&'input [u8], PhantomData<Endian>) where Endian: Endianity;
impl<'input, Endian> EndianBuf<'input, Endian>
where Endian: Endianity
{
fn new(buf: &'input [u8]) -> EndianBuf<'input, Endian> {
EndianBuf(buf, PhantomData)
}
// Unfortunately, std::ops::Index *must* return a reference, so we can't
// implement Index<Range<usize>> to return a new EndianBuf the way we would
// like to. Instead, we abandon fancy indexing operators and have these
// plain old methods.
#[allow(dead_code)]
fn range_from(&self, idx: RangeFrom<usize>) -> EndianBuf<'input, Endian> {
EndianBuf(&self.0[idx], self.1)
}
fn range_to(&self, idx: RangeTo<usize>) -> EndianBuf<'input, Endian> {
EndianBuf(&self.0[idx], self.1)
}
}
impl<'input, Endian> Index<usize> for EndianBuf<'input, Endian>
where Endian: Endianity
{
type Output = u8;
fn index(&self, idx: usize) -> &Self::Output {
&self.0[idx]
}
}
impl<'input, Endian> Deref for EndianBuf<'input, Endian>
where Endian: Endianity
{
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.0
}
}
impl<'input, Endian> Into<&'input [u8]> for EndianBuf<'input, Endian>
where Endian: Endianity
{
fn into(self) -> &'input [u8] {
self.0
}
}
fn parse_u8(input: &[u8]) -> ParseResult<(&[u8], u8)> {
if input.len() == 0 {
Err(Error::UnexpectedEof)
} else {
Ok((&input[1..], input[0]))
}
}
fn parse_u16<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u16)>
where Endian: Endianity
{
if input.len() < 2 {
Err(Error::UnexpectedEof)
} else {
Ok((input.range_from(2..), Endian::read_u16(&input)))
}
}
fn parse_u32<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u32)>
where Endian: Endianity
{
if input.len() < 4 {
Err(Error::UnexpectedEof)
} else {
Ok((input.range_from(4..), Endian::read_u32(&input)))
}
}
fn parse_u64<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u64)>
where Endian: Endianity
{
if input.len() < 8 {
Err(Error::UnexpectedEof)
} else {
Ok((input.range_from(8..), Endian::read_u64(&input)))
}
}
fn parse_u32_as_u64<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u64)>
where Endian: Endianity
{
if input.len() < 4 {
Err(Error::UnexpectedEof)
} else {
Ok((input.range_from(4..), Endian::read_u32(&input) as u64))
}
}
/// An offset into the `.debug_types` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugTypesOffset(pub u64);
/// An offset into the `.debug_str` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugStrOffset(pub u64);
/// An offset into the `.debug_abbrev` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugAbbrevOffset(pub u64);
/// An offset into the `.debug_info` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugInfoOffset(pub u64);
/// An offset into the `.debug_line` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugLineOffset(pub u64);
/// An offset into the `.debug_loc` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugLocOffset(pub u64);
/// An offset into the `.debug_macinfo` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugMacinfoOffset(pub u64);
/// An offset into the current compilation or type unit.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
pub struct UnitOffset(pub u64);
/// The `DebugAbbrev` struct represents the abbreviations describing
/// `DebuggingInformationEntry`s' attribute names and forms found in the
/// `.debug_abbrev` section.
#[derive(Debug, Clone, Copy)]
pub struct DebugAbbrev<'input, Endian>
where Endian: Endianity
{
debug_abbrev_section: EndianBuf<'input, Endian>,
}
impl<'input, Endian> DebugAbbrev<'input, Endian>
where Endian: Endianity
{
/// Construct a new `DebugAbbrev` instance from the data in the `.debug_abbrev`
/// section.
///
/// It is the caller's responsibility to read the `.debug_abbrev` section and
/// present it as a `&[u8]` slice. That means using some ELF loader on
/// Linux, a Mach-O loader on OSX, etc.
///
/// ```
/// use gimli::{DebugAbbrev, LittleEndian};
///
/// # let buf = [0x00, 0x01, 0x02, 0x03];
/// # let read_debug_abbrev_section_somehow = || &buf;
/// let debug_abbrev = DebugAbbrev::<LittleEndian>::new(read_debug_abbrev_section_somehow());
/// ```
pub fn new(debug_abbrev_section: &'input [u8]) -> DebugAbbrev<'input, Endian> {
DebugAbbrev { debug_abbrev_section: EndianBuf(debug_abbrev_section, PhantomData) }
}
}
/// The `DebugInfo` struct represents the DWARF debugging information found in
/// the `.debug_info` section.
#[derive(Debug, Clone, Copy)]
pub struct DebugInfo<'input, Endian>
where Endian: Endianity
{
debug_info_section: EndianBuf<'input, Endian>,
}
impl<'input, Endian> DebugInfo<'input, Endian>
where Endian: Endianity
{
/// Construct a new `DebugInfo` instance from the data in the `.debug_info`
/// section.
///
/// It is the caller's responsibility to read the `.debug_info` section and
/// present it as a `&[u8]` slice. That means using some ELF loader on
/// Linux, a Mach-O loader on OSX, etc.
///
/// ```
/// use gimli::{DebugInfo, LittleEndian};
///
/// # let buf = [0x00, 0x01, 0x02, 0x03];
/// # let read_debug_info_section_somehow = || &buf;
/// let debug_info = DebugInfo::<LittleEndian>::new(read_debug_info_section_somehow());
/// ```
pub fn new(debug_info_section: &'input [u8]) -> DebugInfo<'input, Endian> {
DebugInfo { debug_info_section: EndianBuf(debug_info_section, PhantomData) }
}
/// Iterate the compilation- and partial-units in this
/// `.debug_info` section.
///
/// ```
/// use gimli::{DebugInfo, LittleEndian};
///
/// # let buf = [];
/// # let read_debug_info_section_somehow = || &buf;
/// let debug_info = DebugInfo::<LittleEndian>::new(read_debug_info_section_somehow());
///
/// for parse_result in debug_info.units() {
/// let unit = parse_result.unwrap();
/// println!("unit's length is {}", unit.unit_length());
/// }
/// ```
pub fn units(&self) -> UnitHeadersIter<'input, Endian> {
UnitHeadersIter { input: self.debug_info_section }
}
}
/// An iterator over the compilation- and partial-units of a section.
///
/// See the [documentation on
/// `DebugInfo::units`](./struct.DebugInfo.html#method.units)
/// for more detail.
pub struct UnitHeadersIter<'input, Endian>
where Endian: Endianity
{
input: EndianBuf<'input, Endian>,
}
impl<'input, Endian> Iterator for UnitHeadersIter<'input, Endian>
where Endian: Endianity
{
type Item = ParseResult<UnitHeader<'input, Endian>>;
fn next(&mut self) -> Option<Self::Item> {
if self.input.is_empty() {
None
} else {
match parse_unit_header(self.input) {
Ok((_, header)) => {
let unit_len = header.length_including_self() as usize;
if self.input.len() < unit_len {
self.input = self.input.range_to(..0);
} else {
self.input = self.input.range_from(unit_len..);
}
Some(Ok(header))
}
Err(e) => {
self.input = self.input.range_to(..0);
Some(Err(e))
}
}
}
}
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_units() {
let buf = [
// First compilation unit.
// Enable 64-bit DWARF.
0xff, 0xff, 0xff, 0xff,
// Unit length = 43
0x2b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// Version 4
0x04, 0x00,
// debug_abbrev_offset
0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
// address size
0x08,
// Placeholder data for first compilation unit's DIEs.
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
// Second compilation unit
// 32-bit unit length = 39
0x27, 0x00, 0x00, 0x00,
// Version 4
0x04, 0x00,
// debug_abbrev_offset
0x05, 0x06, 0x07, 0x08,
// Address size
0x04,
// Placeholder data for second compilation unit's DIEs.
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
];
let debug_info = DebugInfo::<LittleEndian>::new(&buf);
let mut units = debug_info.units();
match units.next() {
Some(Ok(header)) => {
let expected = UnitHeader::<LittleEndian>::new(0x000000000000002b,
4,
DebugAbbrevOffset(0x0102030405060708),
8,
Format::Dwarf64,
&buf[23..23+32]);
assert_eq!(header, expected);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
match units.next() {
Some(Ok(header)) => {
let expected =
UnitHeader::new(0x00000027,
4,
DebugAbbrevOffset(0x08070605),
4,
Format::Dwarf32,
&buf[buf.len()-32..]);
assert_eq!(header, expected);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
assert!(units.next().is_none());
}
/// Parse an unsigned LEB128 encoded integer.
fn parse_unsigned_leb(mut input: &[u8]) -> ParseResult<(&[u8], u64)> {
match leb128::read::unsigned(&mut input) {
Ok(val) => Ok((input, val)),
Err(leb128::read::Error::IoError(ref e)) if e.kind() == io::ErrorKind::UnexpectedEof => {
Err(Error::UnexpectedEof)
}
Err(_) => Err(Error::BadUnsignedLeb128),
}
}
/// Parse a signed LEB128 encoded integer.
fn parse_signed_leb(mut input: &[u8]) -> ParseResult<(&[u8], i64)> {
match leb128::read::signed(&mut input) {
Ok(val) => Ok((input, val)),
Err(leb128::read::Error::IoError(ref e)) if e.kind() == io::ErrorKind::UnexpectedEof => {
Err(Error::UnexpectedEof)
}
Err(_) => Err(Error::BadSignedLeb128),
}
}
/// Parse an abbreviation's code.
fn parse_abbreviation_code(input: &[u8]) -> ParseResult<(&[u8], u64)> {
let (rest, code) = try!(parse_unsigned_leb(input));
if code == 0 {
Err(Error::AbbreviationCodeZero)
} else {
Ok((rest, code))
}
}
/// Parse an abbreviation's tag.
fn parse_abbreviation_tag(input: &[u8]) -> ParseResult<(&[u8], constants::DwTag)> {
let (rest, val) = try!(parse_unsigned_leb(input));
if val == 0 {
Err(Error::AbbreviationCodeZero)
} else {
Ok((rest, constants::DwTag(val)))
}
}
/// Parse an abbreviation's "does the type have children?" byte.
fn parse_abbreviation_has_children(input: &[u8]) -> ParseResult<(&[u8], constants::DwChildren)> {
let (rest, val) = try!(parse_u8(input));
let val = constants::DwChildren(val);
if val == constants::DW_CHILDREN_no || val == constants::DW_CHILDREN_yes {
Ok((rest, val))
} else {
Err(Error::BadHasChildren)
}
}
/// Parse an attribute's name.
fn parse_attribute_name(input: &[u8]) -> ParseResult<(&[u8], constants::DwAt)> {
let (rest, val) = try!(parse_unsigned_leb(input));
Ok((rest, constants::DwAt(val)))
}
/// Parse an attribute's form.
fn parse_attribute_form(input: &[u8]) -> ParseResult<(&[u8], constants::DwForm)> {
let (rest, val) = try!(parse_unsigned_leb(input));
Ok((rest, constants::DwForm(val)))
}
/// The description of an attribute in an abbreviated type. It is a pair of name
/// and form.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct AttributeSpecification {
name: constants::DwAt,
form: constants::DwForm,
}
impl AttributeSpecification {
/// Construct a new `AttributeSpecification` from the given name and form.
pub fn new(name: constants::DwAt, form: constants::DwForm) -> AttributeSpecification {
AttributeSpecification {
name: name,
form: form,
}
}
/// Get the attribute's name.
pub fn name(&self) -> constants::DwAt {
self.name
}
/// Get the attribute's form.
pub fn form(&self) -> constants::DwForm {
self.form
}
/// Return the size of the attribute, in bytes.
///
/// Note that because some attributes are variably sized, the size cannot
/// always be known without parsing, in which case we return `None`.
pub fn size<'me, 'input, 'unit, Endian>(&'me self,
header: &'unit UnitHeader<'input, Endian>)
-> Option<usize>
where Endian: Endianity
{
match self.form {
constants::DW_FORM_addr => Some(header.address_size() as usize),
constants::DW_FORM_flag |
constants::DW_FORM_flag_present |
constants::DW_FORM_data1 |
constants::DW_FORM_ref1 => Some(1),
constants::DW_FORM_data2 |
constants::DW_FORM_ref2 => Some(2),
constants::DW_FORM_data4 |
constants::DW_FORM_ref4 => Some(4),
constants::DW_FORM_data8 |
constants::DW_FORM_ref8 => Some(8),
constants::DW_FORM_sec_offset |
constants::DW_FORM_ref_addr |
constants::DW_FORM_ref_sig8 |
constants::DW_FORM_strp => {
match header.format() {
Format::Dwarf32 => Some(4),
Format::Dwarf64 => Some(8),
}
}
constants::DW_FORM_block |
constants::DW_FORM_block1 |
constants::DW_FORM_block2 |
constants::DW_FORM_block4 |
constants::DW_FORM_exprloc |
constants::DW_FORM_ref_udata |
constants::DW_FORM_string |
constants::DW_FORM_sdata |
constants::DW_FORM_udata |
constants::DW_FORM_indirect => None,
// We don't know the size of unknown forms.
_ => None,
}
}
}
/// Parse a non-null attribute specification.
fn parse_attribute_specification(input: &[u8]) -> ParseResult<(&[u8], AttributeSpecification)> {
let (rest, name) = try!(parse_attribute_name(input));
let (rest, form) = try!(parse_attribute_form(rest));
let spec = AttributeSpecification::new(name, form);
Ok((rest, spec))
}
/// Parse the null attribute specification.
fn parse_null_attribute_specification(input: &[u8]) -> ParseResult<(&[u8], ())> {
let (rest, name) = try!(parse_unsigned_leb(input));
if name != 0 {
return Err(Error::ExpectedZero);
}
let (rest, form) = try!(parse_unsigned_leb(rest));
if form != 0 {
return Err(Error::ExpectedZero);
}
Ok((rest, ()))
}
/// Parse a series of attribute specifications, terminated by a null attribute
/// specification.
fn parse_attribute_specifications(mut input: &[u8])
-> ParseResult<(&[u8], Vec<AttributeSpecification>)> {
let mut attrs = Vec::new();
loop {
let result = parse_null_attribute_specification(input).map(|(rest, _)| (rest, None));
let result = result.or_else(|_| parse_attribute_specification(input).map(|(rest, a)| (rest, Some(a))));
let (rest, attr) = try!(result);
input = rest;
match attr {
None => break,
Some(attr) => attrs.push(attr),
};
}
Ok((input, attrs))
}
/// An abbreviation describes the shape of a `DebuggingInformationEntry`'s type:
/// its code, tag type, whether it has children, and its set of attributes.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Abbreviation {
code: u64,
tag: constants::DwTag,
has_children: constants::DwChildren,
attributes: Vec<AttributeSpecification>,
}
impl Abbreviation {
/// Construct a new `Abbreviation`.
///
/// ### Panics
///
/// Panics if `code` is `0`.
pub fn new(code: u64,
tag: constants::DwTag,
has_children: constants::DwChildren,
attributes: Vec<AttributeSpecification>)
-> Abbreviation {
assert!(code != 0);
Abbreviation {
code: code,
tag: tag,
has_children: has_children,
attributes: attributes,
}
}
/// Get this abbreviation's code.
pub fn code(&self) -> u64 {
self.code
}
/// Get this abbreviation's tag.
pub fn tag(&self) -> constants::DwTag {
self.tag
}
/// Return true if this abbreviation's type has children, false otherwise.
pub fn has_children(&self) -> bool {
self.has_children == constants::DW_CHILDREN_yes
}
/// Get this abbreviation's attributes.
pub fn attributes(&self) -> &[AttributeSpecification] {
&self.attributes[..]
}
}
/// Parse a non-null abbreviation.
fn parse_abbreviation(input: &[u8]) -> ParseResult<(&[u8], Abbreviation)> {
let (rest, code) = try!(parse_abbreviation_code(input));
let (rest, tag) = try!(parse_abbreviation_tag(rest));
let (rest, has_children) = try!(parse_abbreviation_has_children(rest));
let (rest, attributes) = try!(parse_attribute_specifications(rest));
let abbrev = Abbreviation::new(code, tag, has_children, attributes);
Ok((rest, abbrev))
}
/// Parse a null abbreviation.
fn parse_null_abbreviation(input: &[u8]) -> ParseResult<(&[u8], ())> {
let (rest, name) = try!(parse_unsigned_leb(input));
if name == 0 {
Ok((rest, ()))
} else {
Err(Error::ExpectedZero)
}
}
/// A set of type abbreviations.
///
/// Construct an `Abbreviations` instance with the
/// [`abbreviations()`](struct.UnitHeader.html#method.abbreviations)
/// method.
#[derive(Debug, Default, Clone)]
pub struct Abbreviations {
abbrevs: hash_map::HashMap<u64, Abbreviation>,
}
impl Abbreviations {
/// Construct a new, empty set of abbreviations.
fn empty() -> Abbreviations {
Abbreviations { abbrevs: hash_map::HashMap::new() }
}
/// Insert an abbreviation into the set.
///
/// Returns `Ok` if it is the first abbreviation in the set with its code,
/// `Err` if the code is a duplicate and there already exists an
/// abbreviation in the set with the given abbreviation's code.
fn insert(&mut self, abbrev: Abbreviation) -> Result<(), ()> {
match self.abbrevs.entry(abbrev.code) {
hash_map::Entry::Occupied(_) => Err(()),
hash_map::Entry::Vacant(entry) => {
entry.insert(abbrev);
Ok(())
}
}
}
/// Get the abbreviation associated with the given code.
fn get(&self, code: u64) -> Option<&Abbreviation> {
self.abbrevs.get(&code)
}
}
/// Parse a series of abbreviations, terminated by a null abbreviation.
fn parse_abbreviations(mut input: &[u8]) -> ParseResult<(&[u8], Abbreviations)> {
let mut abbrevs = Abbreviations::empty();
loop {
let result = parse_null_abbreviation(input).map(|(rest, _)| (rest, None));
let result = result.or_else(|_| parse_abbreviation(input).map(|(rest, a)| (rest, Some(a))));
let (rest, abbrev) = try!(result);
input = rest;
match abbrev {
None => break,
Some(abbrev) => {
if let Err(_) = abbrevs.insert(abbrev) {
return Err(Error::DuplicateAbbreviationCode);
}
}
}
}
Ok((input, abbrevs))
}
/// Whether the format of a compilation unit is 32- or 64-bit.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Format {
/// 64-bit DWARF
Dwarf64,
/// 32-bit DWARF
Dwarf32,
}
const MAX_DWARF_32_UNIT_LENGTH: u64 = 0xfffffff0;
const DWARF_64_INITIAL_UNIT_LENGTH: u64 = 0xffffffff;
/// Parse the compilation unit header's length.
fn parse_unit_length<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, (u64, Format))>
where Endian: Endianity
{
let (rest, val) = try!(parse_u32_as_u64(input));
if val < MAX_DWARF_32_UNIT_LENGTH {
Ok((rest, (val, Format::Dwarf32)))
} else if val == DWARF_64_INITIAL_UNIT_LENGTH {
let (rest, val) = try!(parse_u64(rest));
Ok((rest, (val, Format::Dwarf64)))
} else {
Err(Error::UnknownReservedLength)
}
}
#[test]
fn test_parse_unit_length_32_ok() {
let buf = [0x12, 0x34, 0x56, 0x78];
match parse_unit_length(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((rest, (length, format))) => {
assert_eq!(rest.len(), 0);
assert_eq!(format, Format::Dwarf32);
assert_eq!(0x78563412, length);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_parse_unit_length_64_ok() {
let buf = [
// Dwarf_64_INITIAL_UNIT_LENGTH
0xff, 0xff, 0xff, 0xff,
// Actual length
0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xff
];
match parse_unit_length(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((rest, (length, format))) => {
assert_eq!(rest.len(), 0);
assert_eq!(format, Format::Dwarf64);
assert_eq!(0xffdebc9a78563412, length);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
fn test_parse_unit_length_unknown_reserved_value() {
let buf = [0xfe, 0xff, 0xff, 0xff];
match parse_unit_length(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnknownReservedLength) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_parse_unit_length_incomplete() {
let buf = [0xff, 0xff, 0xff]; // Need at least 4 bytes.
match parse_unit_length(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_parse_unit_length_64_incomplete() {
let buf = [
// DWARF_64_INITIAL_UNIT_LENGTH
0xff, 0xff, 0xff, 0xff,
// Actual length is not long enough.
0x12, 0x34, 0x56, 0x78
];
match parse_unit_length(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
/// Parse the DWARF version from the compilation unit header.
fn parse_version<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u16)>
where Endian: Endianity
{
let (rest, val) = try!(parse_u16(input));
// DWARF 1 was very different, and is obsolete, so isn't supported by this
// reader.
if 2 <= val && val <= 4 {
Ok((rest, val))
} else {
Err(Error::UnknownVersion)
}
}
#[test]
fn test_unit_version_ok() {
// Version 4 and two extra bytes
let buf = [0x04, 0x00, 0xff, 0xff];
match parse_version(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((rest, val)) => {
assert_eq!(val, 4);
assert_eq!(rest, EndianBuf::new(&[0xff, 0xff]));
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_unit_version_unknown_version() {
let buf = [0xab, 0xcd];
match parse_version(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnknownVersion) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
let buf = [0x1, 0x0];
match parse_version(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnknownVersion) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_unit_version_incomplete() {
let buf = [0x04];
match parse_version(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
/// Parse the `debug_abbrev_offset` in the compilation unit header.
fn parse_debug_abbrev_offset<'input, Endian>
(input: EndianBuf<'input, Endian>, format: Format)
-> ParseResult<(EndianBuf<'input, Endian>, DebugAbbrevOffset)>
where Endian: Endianity
{
let offset = match format {
Format::Dwarf32 => parse_u32_as_u64(input),
Format::Dwarf64 => parse_u64(input),
};
offset.map(|(rest, offset)| (rest, DebugAbbrevOffset(offset)))
}
#[test]
fn test_parse_debug_abbrev_offset_32() {
let buf = [0x01, 0x02, 0x03, 0x04];
match parse_debug_abbrev_offset(EndianBuf::<LittleEndian>::new(&buf),
Format::Dwarf32) {
Ok((_, val)) => assert_eq!(val, DebugAbbrevOffset(0x04030201)),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_parse_debug_abbrev_offset_32_incomplete() {
let buf = [0x01, 0x02];
match parse_debug_abbrev_offset(EndianBuf::<LittleEndian>::new(&buf),
Format::Dwarf32) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_parse_debug_abbrev_offset_64() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
match parse_debug_abbrev_offset(EndianBuf::<LittleEndian>::new(&buf),
Format::Dwarf64) {
Ok((_, val)) => assert_eq!(val, DebugAbbrevOffset(0x0807060504030201)),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_parse_debug_abbrev_offset_64_incomplete() {
let buf = [0x01, 0x02];
match parse_debug_abbrev_offset(EndianBuf::<LittleEndian>::new(&buf),
Format::Dwarf64) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
/// Parse the size of addresses (in bytes) on the target architecture.
fn parse_address_size(input: &[u8]) -> ParseResult<(&[u8], u8)> {
parse_u8(input)
}
#[test]
fn test_parse_address_size_ok() {
let buf = [0x04];
match parse_address_size(&buf) {
Ok((_, val)) => assert_eq!(val, 4),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
/// The header of a compilation unit's debugging information.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct UnitHeader<'input, Endian>
where Endian: Endianity
{
unit_length: u64,
version: u16,
debug_abbrev_offset: DebugAbbrevOffset,
address_size: u8,
format: Format,
entries_buf: EndianBuf<'input, Endian>,
}
/// Static methods.
impl<'input, Endian> UnitHeader<'input, Endian>
where Endian: Endianity
{
/// Construct a new `UnitHeader`.
pub fn new(unit_length: u64,
version: u16,
debug_abbrev_offset: DebugAbbrevOffset,
address_size: u8,
format: Format,
entries_buf: &'input [u8])
-> UnitHeader<'input, Endian> {
UnitHeader {
unit_length: unit_length,
version: version,
debug_abbrev_offset: debug_abbrev_offset,
address_size: address_size,
format: format,
entries_buf: EndianBuf(entries_buf, PhantomData),
}
}
/// Return the serialized size of the `unit_length` attribute for the given
/// DWARF format.
pub fn size_of_unit_length(format: Format) -> usize {
match format {
Format::Dwarf32 => 4,
Format::Dwarf64 => 12,
}
}
/// Return the serialized size of the compilation unit header for the given
/// DWARF format.
pub fn size_of_header(format: Format) -> usize {
let unit_length_size = Self::size_of_unit_length(format);
let version_size = 2;
let debug_abbrev_offset_size = match format {
Format::Dwarf32 => 4,
Format::Dwarf64 => 8,
};
let address_size_size = 1;
unit_length_size + version_size + debug_abbrev_offset_size + address_size_size
}
}
/// Instance methods.
impl<'input, Endian> UnitHeader<'input, Endian>
where Endian: Endianity
{
/// Get the length of the debugging info for this compilation unit, not
/// including the byte length of the encoded length itself.
pub fn unit_length(&self) -> u64 {
self.unit_length
}
/// Get the length of the debugging info for this compilation unit,
/// uncluding the byte length of the encoded length itself.
pub fn length_including_self(&self) -> u64 {
match self.format {
// Length of the 32-bit header plus the unit length.
Format::Dwarf32 => 4 + self.unit_length,
// Length of the 4 byte 0xffffffff value to enable 64-bit mode plus
// the actual 64-bit length.
Format::Dwarf64 => 4 + 8 + self.unit_length,
}
}
/// Get the DWARF version of the debugging info for this compilation unit.
pub fn version(&self) -> u16 {
self.version
}
/// The offset into the `.debug_abbrev` section for this compilation unit's
/// debugging information entries' abbreviations.
pub fn debug_abbrev_offset(&self) -> DebugAbbrevOffset {
self.debug_abbrev_offset
}
/// The size of addresses (in bytes) in this compilation unit.
pub fn address_size(&self) -> u8 {
self.address_size
}
/// Whether this compilation unit is encoded in 64- or 32-bit DWARF.
pub fn format(&self) -> Format {
self.format
}
fn is_valid_offset(&self, offset: UnitOffset) -> bool {
let size_of_header = Self::size_of_header(self.format);
if !offset.0 as usize >= size_of_header {
return false;
}
let relative_to_entries_buf = offset.0 as usize - size_of_header;
relative_to_entries_buf < self.entries_buf.len()
}
/// Get the underlying bytes for the supplied range.
pub fn range(&self, idx: Range<UnitOffset>) -> &'input [u8] {
assert!(self.is_valid_offset(idx.start));
assert!(self.is_valid_offset(idx.end));
assert!(idx.start <= idx.end);
let size_of_header = Self::size_of_header(self.format);
let start = idx.start.0 as usize - size_of_header;
let end = idx.end.0 as usize - size_of_header;
&self.entries_buf.0[start..end]
}
/// Get the underlying bytes for the supplied range.
pub fn range_from(&self, idx: RangeFrom<UnitOffset>) -> &'input [u8] {
assert!(self.is_valid_offset(idx.start));
let start = idx.start.0 as usize - Self::size_of_header(self.format);
&self.entries_buf.0[start..]
}
/// Get the underlying bytes for the supplied range.
pub fn range_to(&self, idx: RangeTo<UnitOffset>) -> &'input [u8] {
assert!(self.is_valid_offset(idx.end));
let end = idx.end.0 as usize - Self::size_of_header(self.format);
&self.entries_buf.0[..end]
}
/// Navigate this compilation unit's `DebuggingInformationEntry`s.
pub fn entries<'me, 'abbrev>(&'me self,
abbreviations: &'abbrev Abbreviations)
-> EntriesCursor<'input, 'abbrev, 'me, Endian> {
EntriesCursor {
unit: self,
input: self.entries_buf.into(),
abbreviations: abbreviations,
cached_current: RefCell::new(None),
}
}
/// Parse the abbreviations at the given `offset` within this
/// `.debug_abbrev` section.
///
/// The `offset` should generally be retrieved from a unit header.
///
/// ```
/// use gimli::DebugAbbrev;
/// # use gimli::{DebugInfo, LittleEndian};
/// # let info_buf = [
/// # // Comilation unit header
/// #
/// # // 32-bit unit length = 25
/// # 0x19, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// # ];
/// # let debug_info = DebugInfo::<LittleEndian>::new(&info_buf);
/// #
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_yes
/// # 0x01,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// #
/// # let get_some_unit = || debug_info.units().next().unwrap().unwrap();
///
/// let unit = get_some_unit();
///
/// # let read_debug_abbrev_section_somehow = || &abbrev_buf;
/// let debug_abbrev = DebugAbbrev::<LittleEndian>::new(read_debug_abbrev_section_somehow());
/// let abbrevs_for_unit = unit.abbreviations(debug_abbrev).unwrap();
/// ```
pub fn abbreviations<'abbrev>(&self,
debug_abbrev: DebugAbbrev<'abbrev, Endian>)
-> ParseResult<Abbreviations> {
parse_abbreviations(&debug_abbrev.debug_abbrev_section.0[self.debug_abbrev_offset
.0 as usize..])
.map(|(_, abbrevs)| abbrevs)
}
}
/// Parse a compilation unit header.
fn parse_unit_header<'input, Endian>
(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, UnitHeader<'input, Endian>)>
where Endian: Endianity
{
let (rest, (unit_length, format)) = try!(parse_unit_length(input));
let (rest, version) = try!(parse_version(rest));
let (rest, offset) = try!(parse_debug_abbrev_offset(rest, format));
let (rest, address_size) = try!(parse_address_size(rest.into()));
let size_of_unit_length = UnitHeader::<Endian>::size_of_unit_length(format);
let size_of_header = UnitHeader::<Endian>::size_of_header(format);
if unit_length as usize + size_of_unit_length < size_of_header {
return Err(Error::UnitHeaderLengthTooShort);
}
let end = unit_length as usize + size_of_unit_length - size_of_header;
if end > rest.len() {
return Err(Error::UnexpectedEof);
}
let entries_buf = &rest[..end];
Ok((EndianBuf::new(rest),
UnitHeader::new(unit_length,
version,
offset,
address_size,
format,
entries_buf)))
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_parse_unit_header_32_ok() {
let buf = [
// 32-bit unit length
0x07, 0x00, 0x00, 0x00,
// Version 4
0x04, 0x00,
// Debug_abbrev_offset
0x05, 0x06, 0x07, 0x08,
// Address size
0x04
];
match parse_unit_header(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((_, header)) => {
assert_eq!(header,
UnitHeader::new(7,
4,
DebugAbbrevOffset(0x08070605),
4,
Format::Dwarf32,
&[]))
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_parse_unit_header_64_ok() {
let buf = [
// Enable 64-bit
0xff, 0xff, 0xff, 0xff,
// Unit length = 11
0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// Version 4
0x04, 0x00,
// debug_abbrev_offset
0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
// Address size
0x08
];
match parse_unit_header(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((_, header)) => {
let expected = UnitHeader::new(11,
4,
DebugAbbrevOffset(0x0102030405060708),
8,
Format::Dwarf64,
&[]);
assert_eq!(header, expected)
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
/// A Debugging Information Entry (DIE).
///
/// DIEs have a set of attributes and optionally have children DIEs as well.
#[derive(Clone, Debug)]
pub struct DebuggingInformationEntry<'input, 'abbrev, 'unit, Endian>
where 'input: 'unit,
Endian: Endianity + 'unit
{
attrs_slice: &'input [u8],
after_attrs: Cell<Option<&'input [u8]>>,
code: u64,
abbrev: &'abbrev Abbreviation,
unit: &'unit UnitHeader<'input, Endian>,
}
impl<'input, 'abbrev, 'unit, Endian> DebuggingInformationEntry<'input, 'abbrev, 'unit, Endian>
where Endian: Endianity
{
/// Get this entry's code.
pub fn code(&self) -> u64 {
self.code
}
/// Get this entry's `DW_TAG_whatever` tag.
///
/// ```
/// # use gimli::{DebugAbbrev, DebugInfo, LittleEndian};
/// # let info_buf = [
/// # // Comilation unit header
/// #
/// # // 32-bit unit length = 12
/// # 0x0c, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// # ];
/// # let debug_info = DebugInfo::<LittleEndian>::new(&info_buf);
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_no
/// # 0x00,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// # let debug_abbrev = DebugAbbrev::<LittleEndian>::new(&abbrev_buf);
/// # let unit = debug_info.units().next().unwrap().unwrap();
/// # let abbrevs = unit.abbreviations(debug_abbrev).unwrap();
/// # let mut cursor = unit.entries(&abbrevs);
/// # let mut get_some_entry = || cursor.current().unwrap().unwrap();
/// let entry = get_some_entry();
///
/// match entry.tag() {
/// gimli::DW_TAG_subprogram =>
/// println!("this entry contains debug info about a function"),
/// gimli::DW_TAG_inlined_subroutine =>
/// println!("this entry contains debug info about a particular instance of inlining"),
/// gimli::DW_TAG_variable =>
/// println!("this entry contains debug info about a local variable"),
/// gimli::DW_TAG_formal_parameter =>
/// println!("this entry contains debug info about a function parameter"),
/// otherwise =>
/// println!("this entry is some other kind of data: {:?}", otherwise),
/// };
/// ```
pub fn tag(&self) -> constants::DwTag {
self.abbrev.tag()
}
/// Iterate over this entry's set of attributes.
///
/// ```
/// use gimli::{DebugAbbrev, DebugInfo, LittleEndian};
///
/// // Read the `.debug_info` section.
///
/// # let info_buf = [
/// # // Comilation unit header
/// #
/// # // 32-bit unit length = 12
/// # 0x0c, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// # ];
/// # let read_debug_info_section_somehow = || &info_buf;
/// let debug_info = DebugInfo::<LittleEndian>::new(read_debug_info_section_somehow());
///
/// // Get the data about the first compilation unit out of the `.debug_info`.
///
/// let unit = debug_info.units().next()
/// .expect("Should have at least one compilation unit")
/// .expect("and it should parse ok");
///
/// // Read the `.debug_abbrev` section and parse the
/// // abbreviations for our compilation unit.
///
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_no
/// # 0x00,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// # let read_debug_abbrev_section_somehow = || &abbrev_buf;
/// let debug_abbrev = DebugAbbrev::<LittleEndian>::new(read_debug_abbrev_section_somehow());
/// let abbrevs = unit.abbreviations(debug_abbrev).unwrap();
///
/// // Get the first entry from that compilation unit.
///
/// let mut cursor = unit.entries(&abbrevs);
/// let entry = cursor.current()
/// .expect("Should have at least one entry")
/// .expect("and it should parse ok");
///
/// // Finally, print the first entry's attributes.
///
/// for attr_result in entry.attrs() {
/// let attr = attr_result.unwrap();
///
/// println!("Attribute name = {:?}", attr.name());
/// println!("Attribute value = {:?}", attr.value());
/// }
/// ```
pub fn attrs<'me>(&'me self) -> AttrsIter<'input, 'abbrev, 'me, 'unit, Endian> {
AttrsIter {
input: self.attrs_slice,
attributes: &self.abbrev.attributes[..],
entry: self,
}
}
/// Find the first attribute in this entry which has the given name,
/// and return its value. Returns `Ok(None)` if no attribute is found.
pub fn attr_value(&self, name: constants::DwAt) -> Option<AttributeValue<'input>> {
self.attrs()
.take_while(|res| res.is_ok())
.find(|res| res.unwrap().name() == name)
.map(|res| res.unwrap().value())
}
}
/// The value of an attribute in a `DebuggingInformationEntry`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum AttributeValue<'input> {
/// A slice that is UnitHeaderHeader::address_size bytes long.
Addr(&'input [u8]),
/// A slice of an arbitrary number of bytes.
Block(&'input [u8]),
/// A one, two, four, or eight byte constant data value. How to interpret
/// the bytes depends on context.
///
/// From section 7 of the standard: "Depending on context, it may be a
/// signed integer, an unsigned integer, a floating-point constant, or
/// anything else."
Data(&'input [u8]),
/// A signed integer constant.
Sdata(i64),
/// An unsigned integer constant.
Udata(u64),
/// "The information bytes contain a DWARF expression (see Section 2.5) or
/// location description (see Section 2.6)."
Exprloc(&'input [u8]),
/// A boolean typically used to describe the presence or absence of another
/// attribute.
Flag(bool),
/// An offset into another section. Which section this is an offset into
/// depends on context.
SecOffset(u64),
/// An offset into the current compilation unit.
UnitRef(UnitOffset),
/// An offset into the current `.debug_info` section, but possibly a
/// different compilation unit from the current one.
DebugInfoRef(DebugInfoOffset),
/// An offset into the `.debug_types` section.
DebugTypesRef(DebugTypesOffset),
/// An offset into the `.debug_str` section.
DebugStrRef(DebugStrOffset),
/// A null terminated C string, including the final null byte. Not
/// guaranteed to be UTF-8 or anything like that.
String(&'input [u8]),
}
/// An attribute in a `DebuggingInformationEntry`, consisting of a name and
/// associated value.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Attribute<'input> {
name: constants::DwAt,
value: AttributeValue<'input>,
}
impl<'input> Attribute<'input> {
/// Get this attribute's name.
pub fn name(&self) -> constants::DwAt {
self.name
}
/// Get this attribute's value.
pub fn value(&self) -> AttributeValue<'input> {
self.value
}
}
/// Take a slice of size `bytes` from the input.
fn take(bytes: usize, input: &[u8]) -> ParseResult<(&[u8], &[u8])> {
if input.len() < bytes {
Err(Error::UnexpectedEof)
} else {
Ok((&input[bytes..], &input[0..bytes]))
}
}
fn length_u8_value(input: &[u8]) -> ParseResult<(&[u8], &[u8])> {
let (rest, len) = try!(parse_u8(input));
take(len as usize, rest)
}
fn length_u16_value<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, &'input [u8])>
where Endian: Endianity
{
let (rest, len) = try!(parse_u16(input));
take(len as usize, rest.into()).map(|(rest, result)| (EndianBuf::new(rest), result))
}
fn length_u32_value<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, &'input [u8])>
where Endian: Endianity
{
let (rest, len) = try!(parse_u32(input));
take(len as usize, rest.into()).map(|(rest, result)| (EndianBuf::new(rest), result))
}
fn length_leb_value(input: &[u8]) -> ParseResult<(&[u8], &[u8])> {
let (rest, len) = try!(parse_unsigned_leb(input));
take(len as usize, rest)
}
fn parse_attribute<'input, 'unit, Endian>
(mut input: EndianBuf<'input, Endian>,
unit: &'unit UnitHeader<'input, Endian>,
spec: AttributeSpecification)
-> ParseResult<(EndianBuf<'input, Endian>, Attribute<'input>)>
where Endian: Endianity
{
let mut form = spec.form;
loop {
match form {
constants::DW_FORM_indirect => {
let (rest, dynamic_form) = try!(parse_attribute_form(input.into()));
form = dynamic_form;
input = EndianBuf::new(rest);
continue;
}
constants::DW_FORM_addr => {
return take(unit.address_size() as usize, input.into()).map(|(rest, addr)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Addr(addr),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_block1 => {
return length_u8_value(input.into()).map(|(rest, block)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Block(block),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_block2 => {
return length_u16_value(input.into()).map(|(rest, block)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Block(block),
};
(rest, attr)
});
}
constants::DW_FORM_block4 => {
return length_u32_value(input.into()).map(|(rest, block)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Block(block),
};
(rest, attr)
});
}
constants::DW_FORM_block => {
return length_leb_value(input.into()).map(|(rest, block)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Block(block),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_data1 => {
return take(1, input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Data(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_data2 => {
return take(2, input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Data(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_data4 => {
return take(4, input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Data(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_data8 => {
return take(8, input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Data(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_udata => {
return parse_unsigned_leb(input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Udata(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_sdata => {
return parse_signed_leb(input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Sdata(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_exprloc => {
return length_leb_value(input.into()).map(|(rest, block)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Exprloc(block),
};
(EndianBuf::new(rest), attr)
})
}
constants::DW_FORM_flag => {
return parse_u8(input.into()).map(|(rest, present)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Flag(present != 0),
};
(EndianBuf::new(rest), attr)
})
}
constants::DW_FORM_flag_present => {
// FlagPresent is this weird compile time always true thing that
// isn't actually present in the serialized DIEs, only in Ok(
return Ok((input,
Attribute {
name: spec.name,
value: AttributeValue::Flag(true),
}));
}
constants::DW_FORM_sec_offset => {
return match unit.format() {
Format::Dwarf32 => {
parse_u32(input.into()).map(|(rest, offset)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::SecOffset(offset as u64),
};
(rest, attr)
})
}
Format::Dwarf64 => {
parse_u64(input.into()).map(|(rest, offset)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::SecOffset(offset),
};
(rest, attr)
})
}
};
}
constants::DW_FORM_ref1 => {
return parse_u8(input.into()).map(|(rest, reference)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::UnitRef(UnitOffset(reference as u64)),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_ref2 => {
return parse_u16(input.into()).map(|(rest, reference)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::UnitRef(UnitOffset(reference as u64)),
};
(rest, attr)
});
}
constants::DW_FORM_ref4 => {
return parse_u32(input.into()).map(|(rest, reference)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::UnitRef(UnitOffset(reference as u64)),
};
(rest, attr)
});
}
constants::DW_FORM_ref8 => {
return parse_u64(input.into()).map(|(rest, reference)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::UnitRef(UnitOffset(reference)),
};
(rest, attr)
});
}
constants::DW_FORM_ref_udata => {
return parse_unsigned_leb(input.into()).map(|(rest, reference)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::UnitRef(UnitOffset(reference)),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_ref_addr => {
return match unit.format() {
Format::Dwarf32 => {
parse_u32(input.into()).map(|(rest, offset)| {
let offset = DebugInfoOffset(offset as u64);
let attr = Attribute {
name: spec.name,
value: AttributeValue::DebugInfoRef(offset),
};
(rest, attr)
})
}
Format::Dwarf64 => {
parse_u64(input.into()).map(|(rest, offset)| {
let offset = DebugInfoOffset(offset);
let attr = Attribute {
name: spec.name,
value: AttributeValue::DebugInfoRef(offset),
};
(rest, attr)
})
}
};
}
constants::DW_FORM_ref_sig8 => {
return parse_u64(input.into()).map(|(rest, offset)| {
let offset = DebugTypesOffset(offset);
let attr = Attribute {
name: spec.name,
value: AttributeValue::DebugTypesRef(offset),
};
(rest, attr)
});
}
constants::DW_FORM_string => {
let null_idx = input.iter().position(|ch| *ch == 0);
if let Some(idx) = null_idx {
let buf: &[u8] = input.into();
return Ok((input.range_from(idx + 1..),
Attribute {
name: spec.name,
value: AttributeValue::String(&buf[0..idx + 1]),
}));
} else {
return Err(Error::UnexpectedEof);
}
}
constants::DW_FORM_strp => {
return match unit.format() {
Format::Dwarf32 => {
parse_u32(input.into()).map(|(rest, offset)| {
let offset = DebugStrOffset(offset as u64);
let attr = Attribute {
name: spec.name,
value: AttributeValue::DebugStrRef(offset),
};
(rest, attr)
})
}
Format::Dwarf64 => {
parse_u64(input.into()).map(|(rest, offset)| {
let offset = DebugStrOffset(offset);
let attr = Attribute {
name: spec.name,
value: AttributeValue::DebugStrRef(offset),
};
(rest, attr)
})
}
};
}
_ => {
return Err(Error::UnknownForm);
}
};
}
}
#[cfg(test)]
fn test_parse_attribute_unit<Endian>(address_size: u8,
format: Format)
-> UnitHeader<'static, Endian>
where Endian: Endianity
{
UnitHeader::<Endian>::new(7,
4,
DebugAbbrevOffset(0x08070605),
address_size,
format,
&[])
}
#[cfg(test)]
fn test_parse_attribute_unit_default() -> UnitHeader<'static, LittleEndian> {
test_parse_attribute_unit(4, Format::Dwarf32)
}
#[cfg(test)]
fn test_parse_attribute<Endian>(buf: &[u8],
len: usize,
unit: &UnitHeader<Endian>,
form: constants::DwForm,
value: AttributeValue)
where Endian: Endianity
{
let spec = AttributeSpecification {
name: constants::DW_AT_low_pc,
form: form,
};
let expect = Attribute {
name: constants::DW_AT_low_pc,
value: value,
};
match parse_attribute(EndianBuf::new(buf), unit, spec) {
Ok((rest, attr)) => {
assert_eq!(attr, expect);
assert_eq!(rest, EndianBuf::new(&buf[len..]));
}
otherwise => {
println!("Unexpected parse result = {:#?}", otherwise);
assert!(false);
}
};
}
#[test]
fn test_parse_attribute_addr() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf32);
let form = constants::DW_FORM_addr;
let value = AttributeValue::Addr(&buf[..4]);
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_addr8() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
let unit = test_parse_attribute_unit::<LittleEndian>(8, Format::Dwarf32);
let form = constants::DW_FORM_addr;
let value = AttributeValue::Addr(&buf[..8]);
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_block1() {
// Length of data (3), three bytes of data, two bytes of left over input.
let buf = [0x03, 0x09, 0x09, 0x09, 0x00, 0x00];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_block1;
let value = AttributeValue::Block(&buf[1..4]);
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_block2() {
// Two byte length of data (2), two bytes of data, two bytes of left over input.
let buf = [0x02, 0x00, 0x09, 0x09, 0x00, 0x00];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_block2;
let value = AttributeValue::Block(&buf[2..4]);
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_block4() {
// Four byte length of data (2), two bytes of data, no left over input.
let buf = [0x02, 0x00, 0x00, 0x00, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_block4;
let value = AttributeValue::Block(&buf[4..]);
test_parse_attribute(&buf, 6, &unit, form, value);
}
#[test]
fn test_parse_attribute_block() {
// LEB length of data (2, one byte), two bytes of data, no left over input.
let buf = [0x02, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_block;
let value = AttributeValue::Block(&buf[1..]);
test_parse_attribute(&buf, 3, &unit, form, value);
}
#[test]
fn test_parse_attribute_data1() {
let buf = [0x03];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_data1;
let value = AttributeValue::Data(&buf[..]);
test_parse_attribute(&buf, 1, &unit, form, value);
}
#[test]
fn test_parse_attribute_data2() {
let buf = [0x02, 0x01, 0x0];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_data2;
let value = AttributeValue::Data(&buf[..2]);
test_parse_attribute(&buf, 2, &unit, form, value);
}
#[test]
fn test_parse_attribute_data4() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_data4;
let value = AttributeValue::Data(&buf[..4]);
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_data8() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_data8;
let value = AttributeValue::Data(&buf[..8]);
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_udata() {
let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let bytes_written = {
let mut writable = &mut buf[..];
leb128::write::unsigned(&mut writable, 4097).expect("should write ok")
};
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_udata;
let value = AttributeValue::Udata(4097);
test_parse_attribute(&buf, bytes_written, &unit, form, value);
}
#[test]
fn test_parse_attribute_sdata() {
let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let bytes_written = {
let mut writable = &mut buf[..];
leb128::write::signed(&mut writable, -4097).expect("should write ok")
};
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_sdata;
let value = AttributeValue::Sdata(-4097);
test_parse_attribute(&buf, bytes_written, &unit, form, value);
}
#[test]
fn test_parse_attribute_exprloc() {
// LEB length of data (2, one byte), two bytes of data, one byte left over input.
let buf = [0x02, 0x99, 0x99, 0x11];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_exprloc;
let value = AttributeValue::Exprloc(&buf[1..3]);
test_parse_attribute(&buf, 3, &unit, form, value);
}
#[test]
fn test_parse_attribute_flag_true() {
let buf = [0x42];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_flag;
let value = AttributeValue::Flag(true);
test_parse_attribute(&buf, 1, &unit, form, value);
}
#[test]
fn test_parse_attribute_flag_false() {
let buf = [0x00];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_flag;
let value = AttributeValue::Flag(false);
test_parse_attribute(&buf, 1, &unit, form, value);
}
#[test]
fn test_parse_attribute_flag_present() {
let buf = [0x01, 0x02, 0x03, 0x04];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_flag_present;
let value = AttributeValue::Flag(true);
// DW_FORM_flag_present does not consume any bytes of the input stream.
test_parse_attribute(&buf, 0, &unit, form, value);
}
#[test]
fn test_parse_attribute_sec_offset_32() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf32);
let form = constants::DW_FORM_sec_offset;
let value = AttributeValue::SecOffset(0x04030201);
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_sec_offset_64() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf64);
let form = constants::DW_FORM_sec_offset;
let value = AttributeValue::SecOffset(0x0807060504030201);
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_ref1() {
let buf = [0x03];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref1;
let value = AttributeValue::UnitRef(UnitOffset(3));
test_parse_attribute(&buf, 1, &unit, form, value);
}
#[test]
fn test_parse_attribute_ref2() {
let buf = [0x02, 0x01, 0x0];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref2;
let value = AttributeValue::UnitRef(UnitOffset(258));
test_parse_attribute(&buf, 2, &unit, form, value);
}
#[test]
fn test_parse_attribute_ref4() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref4;
let value = AttributeValue::UnitRef(UnitOffset(67305985));
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_ref8() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref8;
let value = AttributeValue::UnitRef(UnitOffset(578437695752307201));
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_refudata() {
let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let bytes_written = {
let mut writable = &mut buf[..];
leb128::write::unsigned(&mut writable, 4097).expect("should write ok")
};
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref_udata;
let value = AttributeValue::UnitRef(UnitOffset(4097));
test_parse_attribute(&buf, bytes_written, &unit, form, value);
}
#[test]
fn test_parse_attribute_refaddr_32() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf32);
let form = constants::DW_FORM_ref_addr;
let value = AttributeValue::DebugInfoRef(DebugInfoOffset(67305985));
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_refaddr_64() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf64);
let form = constants::DW_FORM_ref_addr;
let value = AttributeValue::DebugInfoRef(DebugInfoOffset(578437695752307201));
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_refsig8() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref_sig8;
let value = AttributeValue::DebugTypesRef(DebugTypesOffset(578437695752307201));
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_string() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x0, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_string;
let value = AttributeValue::String(&buf[..6]);
test_parse_attribute(&buf, 6, &unit, form, value);
}
#[test]
fn test_parse_attribute_strp_32() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf32);
let form = constants::DW_FORM_strp;
let value = AttributeValue::DebugStrRef(DebugStrOffset(67305985));
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_strp_64() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf64);
let form = constants::DW_FORM_strp;
let value = AttributeValue::DebugStrRef(DebugStrOffset(578437695752307201));
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_indirect() {
let mut buf = [0; 100];
let bytes_written = {
let mut writable = &mut buf[..];
leb128::write::unsigned(&mut writable, constants::DW_FORM_udata.0)
.expect("should write udata") +
leb128::write::unsigned(&mut writable, 9999999).expect("should write value")
};
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_indirect;
let value = AttributeValue::Udata(9999999);
test_parse_attribute(&buf, bytes_written, &unit, form, value);
}
/// An iterator over a particular entry's attributes.
///
/// See [the documentation for
/// `DebuggingInformationEntry::attrs()`](./struct.DebuggingInformationEntry.html#method.attrs)
/// for details.
#[derive(Clone, Copy, Debug)]
pub struct AttrsIter<'input, 'abbrev, 'entry, 'unit, Endian>
where 'input: 'entry + 'unit,
'abbrev: 'entry,
'unit: 'entry,
Endian: Endianity + 'entry + 'unit
{
input: &'input [u8],
attributes: &'abbrev [AttributeSpecification],
entry: &'entry DebuggingInformationEntry<'input, 'abbrev, 'unit, Endian>,
}
impl<'input, 'abbrev, 'entry, 'unit, Endian> Iterator for AttrsIter<'input,
'abbrev,
'entry,
'unit,
Endian>
where Endian: Endianity
{
type Item = ParseResult<Attribute<'input>>;
fn next(&mut self) -> Option<Self::Item> {
if self.attributes.len() == 0 {
// Now that we have parsed all of the attributes, we know where
// either (1) this entry's children start, if the abbreviation says
// this entry has children; or (2) where this entry's siblings
// begin.
if let Some(end) = self.entry.after_attrs.get() {
debug_assert!(end == self.input);
} else {
self.entry.after_attrs.set(Some(self.input));
}
return None;
}
let attr = self.attributes[0];
self.attributes = &self.attributes[1..];
match parse_attribute(EndianBuf::new(self.input), self.entry.unit, attr) {
Ok((rest, attr)) => {
self.input = rest.into();
Some(Ok(attr))
}
Err(e) => {
self.attributes = &[];
Some(Err(e))
}
}
}
}
#[test]
fn test_attrs_iter() {
let unit = UnitHeader::<LittleEndian>::new(7,
4,
DebugAbbrevOffset(0x08070605),
4,
Format::Dwarf32,
&[]);
let abbrev = Abbreviation {
code: 42,
tag: constants::DW_TAG_subprogram,
has_children: constants::DW_CHILDREN_yes,
attributes: vec![
AttributeSpecification {
name: constants::DW_AT_name,
form: constants::DW_FORM_string,
},
AttributeSpecification {
name: constants::DW_AT_low_pc,
form: constants::DW_FORM_addr,
},
AttributeSpecification {
name: constants::DW_AT_high_pc,
form: constants::DW_FORM_addr,
},
],
};
// "foo", 42, 1337, 4 dangling bytes of 0xaa where children would be
let buf = [0x66, 0x6f, 0x6f, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x39, 0x05, 0x00, 0x00, 0xaa, 0xaa,
0xaa, 0xaa];
let entry = DebuggingInformationEntry {
attrs_slice: &buf,
after_attrs: Cell::new(None),
code: 1,
abbrev: &abbrev,
unit: &unit,
};
let mut attrs = AttrsIter {
input: &buf[..],
attributes: &abbrev.attributes[..],
entry: &entry,
};
match attrs.next() {
Some(Ok(attr)) => {
assert_eq!(attr,
Attribute {
name: constants::DW_AT_name,
value: AttributeValue::String(b"foo\0"),
});
}
otherwise => {
println!("Unexpected parse result = {:#?}", otherwise);
assert!(false);
}
}
assert!(entry.after_attrs.get().is_none());
match attrs.next() {
Some(Ok(attr)) => {
assert_eq!(attr,
Attribute {
name: constants::DW_AT_low_pc,
value: AttributeValue::Addr(&[0x2a, 0x00, 0x00, 0x00]),
});
}
otherwise => {
println!("Unexpected parse result = {:#?}", otherwise);
assert!(false);
}
}
assert!(entry.after_attrs.get().is_none());
match attrs.next() {
Some(Ok(attr)) => {
assert_eq!(attr,
Attribute {
name: constants::DW_AT_high_pc,
value: AttributeValue::Addr(&[0x39, 0x05, 0x00, 0x00]),
});
}
otherwise => {
println!("Unexpected parse result = {:#?}", otherwise);
assert!(false);
}
}
assert!(entry.after_attrs.get().is_none());
assert!(attrs.next().is_none());
assert!(entry.after_attrs.get().is_some());
assert_eq!(entry.after_attrs.get().expect("should have entry.after_attrs"),
&buf[buf.len() - 4..])
}
/// A cursor into the Debugging Information Entries tree for a compilation unit.
///
/// The `EntriesCursor` can traverse the DIE tree in either DFS order, or skip
/// to the next sibling of the entry the cursor is currently pointing to.
#[derive(Clone, Debug)]
pub struct EntriesCursor<'input, 'abbrev, 'unit, Endian>
where 'input: 'unit,
Endian: Endianity + 'unit
{
input: &'input [u8],
unit: &'unit UnitHeader<'input, Endian>,
abbreviations: &'abbrev Abbreviations,
cached_current: RefCell<Option<ParseResult<DebuggingInformationEntry<'input,
'abbrev,
'unit,
Endian>>>>,
}
impl<'input, 'abbrev, 'unit, Endian> EntriesCursor<'input, 'abbrev, 'unit, Endian>
where Endian: Endianity
{
/// Get the entry that the cursor is currently pointing to.
pub fn current<'me>
(&'me mut self)
-> Option<ParseResult<DebuggingInformationEntry<'input, 'abbrev, 'unit, Endian>>> {
// First, check for a cached result.
{
let cached = self.cached_current.borrow();
if let Some(ref cached) = *cached {
debug_assert!(cached.is_ok());
return Some(cached.clone());
}
}
if self.input.len() == 0 {
return None;
}
match parse_unsigned_leb(self.input) {
Err(e) => Some(Err(e)),
// Null abbreviation is the lack of an entry.
Ok((_, 0)) => None,
Ok((rest, code)) => {
if let Some(abbrev) = self.abbreviations.get(code) {
let result = Some(Ok(DebuggingInformationEntry {
attrs_slice: rest,
after_attrs: Cell::new(None),
code: code,
abbrev: abbrev,
unit: self.unit,
}));
let mut cached = self.cached_current.borrow_mut();
debug_assert!(cached.is_none());
mem::replace(&mut *cached, result.clone());
result
} else {
Some(Err(Error::UnknownAbbreviation))
}
}
}
}
/// Move the cursor to the next DIE in the tree in DFS order.
///
/// Upon successful movement of the cursor, return the delta traversal
/// depth:
///
/// * If we moved down into the previous current entry's children, we get
/// `Some(1)`.
///
/// * If we moved to the previous current entry's sibling, we get
/// `Some(0)`.
///
/// * If the previous entry does not have any siblings and we move up to
/// its parent's next sibling, then we get `Some(-1)`. Note that if the
/// parent doesn't have a next sibling, then it could go up to the
/// parent's parent's next sibling and return `Some(-2)`, etc.
///
/// If there is no next entry, then `None` is returned.
///
/// Here is an example that finds the first entry in a compilation unit that
/// does not have any children.
///
/// ```
/// # use gimli::{UnitHeader, DebugAbbrev, DebugInfo, LittleEndian};
/// # let info_buf = [
/// # // Comilation unit header
/// #
/// # // 32-bit unit length = 25
/// # 0x19, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// # ];
/// # let debug_info = DebugInfo::<LittleEndian>::new(&info_buf);
/// #
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_yes
/// # 0x01,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// # let debug_abbrev = DebugAbbrev::<LittleEndian>::new(&abbrev_buf);
/// #
/// # let get_some_unit = || debug_info.units().next().unwrap().unwrap();
///
/// let unit = get_some_unit();
/// # let get_abbrevs_for_unit = |_| unit.abbreviations(debug_abbrev).unwrap();
/// let abbrevs = get_abbrevs_for_unit(&unit);
///
/// let mut first_entry_with_no_children = None;
/// let mut cursor = unit.entries(&abbrevs);
///
/// // Keep looping while the cursor is moving deeper into the DIE tree.
/// while let Some(delta_depth) = cursor.next_dfs() {
/// // 0 means we moved to a sibling, a negative number means we went back
/// // up to a parent's sibling. In either case, bail out of the loop because
/// // we aren't going deeper into the tree anymore.
/// if delta_depth <= 0 {
/// break;
/// }
///
/// let current = cursor.current()
/// .expect("Should be at an entry")
/// .expect("And we should parse the entry ok");
/// first_entry_with_no_children = Some(current);
/// }
///
/// println!("The first entry with no children is {:?}",
/// first_entry_with_no_children.unwrap());
/// ```
pub fn next_dfs(&mut self) -> Option<isize> {
match self.current() {
Some(Ok(current)) => {
self.input = if let Some(after_attrs) = current.after_attrs.get() {
after_attrs
} else {
for _ in current.attrs() {
}
current.after_attrs
.get()
.expect("should have after_attrs after iterating attrs")
};
let mut delta_depth = if current.abbrev.has_children() {
1
} else {
0
};
// Keep eating null entries that mark the end of an entry's
// children.
while self.input.len() > 0 && self.input[0] == 0 {
delta_depth -= 1;
self.input = &self.input[1..];
}
let mut cached_current = self.cached_current.borrow_mut();
mem::replace(&mut *cached_current, None);
if self.input.len() > 0 {
Some(delta_depth)
} else {
None
}
}
_ => None,
}
}
/// Move the cursor to the next sibling DIE of the current one.
///
/// Returns `Some` when the cursor the cursor has been moved to the next
/// sibling, `None` when there is no next sibling.
///
/// After returning `None`, the cursor is exhausted.
///
/// Here is an example that iterates over all of the direct children of the
/// root entry:
///
/// ```
/// # use gimli::{DebugAbbrev, DebugInfo, LittleEndian};
/// # let info_buf = [
/// # // Comilation unit header
/// #
/// # // 32-bit unit length = 25
/// # 0x19, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// # ];
/// # let debug_info = DebugInfo::<LittleEndian>::new(&info_buf);
/// #
/// # let get_some_unit = || debug_info.units().next().unwrap().unwrap();
///
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_yes
/// # 0x01,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// # let debug_abbrev = DebugAbbrev::<LittleEndian>::new(&abbrev_buf);
/// #
/// let unit = get_some_unit();
/// # let get_abbrevs_for_unit = |_| unit.abbreviations(debug_abbrev).unwrap();
/// let abbrevs = get_abbrevs_for_unit(&unit);
///
/// let mut cursor = unit.entries(&abbrevs);
///
/// // Move the cursor to the root's first child.
/// assert_eq!(cursor.next_dfs().unwrap(), 1);
///
/// // Iterate the root's children.
/// loop {
/// let current = cursor.current()
/// .expect("Should be at an entry")
/// .expect("And we should parse the entry ok");
///
/// println!("{:?} is a child of the root", current);
///
/// if cursor.next_sibling().is_none() {
/// break;
/// }
/// }
/// ```
pub fn next_sibling(&mut self) -> Option<()> {
match self.current() {
Some(Ok(current)) => {
let sibling_ptr = current.attr_value(constants::DW_AT_sibling);
if let Some(AttributeValue::UnitRef(offset)) = sibling_ptr {
if self.unit.is_valid_offset(offset) {
// Fast path: this entry has a DW_AT_sibling
// attribute pointing to its sibling.
self.input = &self.unit.range_from(offset..);
if self.input.len() > 0 && self.input[0] != 0 {
return Some(());
} else {
self.input = &[];
return None;
}
}
}
// Slow path: either the entry doesn't have a sibling pointer,
// or the pointer is bogus. Do a DFS until we get to the next
// sibling.
let mut depth = 0;
while let Some(delta_depth) = self.next_dfs() {
depth += delta_depth;
if depth == 0 && self.input[0] != 0 {
// We found the next sibling.
return Some(());
}
if depth < 0 {
// We moved up to the original entry's parent's (or
// parent's parent's, etc ...) siblings.
self.input = &[];
return None;
}
}
// No sibling found.
self.input = &[];
None
}
_ => {
self.input = &[];
None
}
}
}
}
/// Parse a type unit header's unique type signature. Callers should handle
/// unique-ness checking.
fn parse_type_signature<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u64)>
where Endian: Endianity
{
parse_u64(input)
}
#[test]
fn test_parse_type_signature_ok() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
match parse_type_signature(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((_, val)) => assert_eq!(val, 0x0807060504030201),
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
fn test_parse_type_signature_incomplete() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07];
match parse_type_signature(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
/// Parse a type unit header's type offset.
fn parse_type_offset<'input, Endian>
(input: EndianBuf<'input, Endian>, format: Format)
-> ParseResult<(EndianBuf<'input, Endian>, DebugTypesOffset)>
where Endian: Endianity
{
let result = match format {
Format::Dwarf32 => parse_u32_as_u64(input),
Format::Dwarf64 => parse_u64(input),
};
result.map(|(rest, offset)| (rest, DebugTypesOffset(offset)))
}
#[test]
fn test_parse_type_offset_32_ok() {
let buf = [0x12, 0x34, 0x56, 0x78, 0x00];
match parse_type_offset(EndianBuf::<LittleEndian>::new(&buf), Format::Dwarf32) {
Ok((rest, offset)) => {
assert_eq!(rest.len(), 1);
assert_eq!(DebugTypesOffset(0x78563412), offset);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
fn test_parse_type_offset_64_ok() {
let buf = [0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xff, 0x00];
match parse_type_offset(EndianBuf::<LittleEndian>::new(&buf), Format::Dwarf64) {
Ok((rest, offset)) => {
assert_eq!(rest.len(), 1);
assert_eq!(DebugTypesOffset(0xffdebc9a78563412), offset);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
fn test_parse_type_offset_incomplete() {
// Need at least 4 bytes.
let buf = [0xff, 0xff, 0xff];
match parse_type_offset(EndianBuf::<LittleEndian>::new(&buf), Format::Dwarf32) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
/// The `DebugTypes` struct represents the DWARF type information
/// found in the `.debug_types` section.
#[derive(Debug, Clone, Copy)]
pub struct DebugTypes<'input, Endian>
where Endian: Endianity
{
debug_types_section: EndianBuf<'input, Endian>,
}
impl<'input, Endian> DebugTypes<'input, Endian>
where Endian: Endianity
{
/// Construct a new `DebugTypes` instance from the data in the `.debug_types`
/// section.
///
/// It is the caller's responsibility to read the `.debug_types` section and
/// present it as a `&[u8]` slice. That means using some ELF loader on
/// Linux, a Mach-O loader on OSX, etc.
///
/// ```
/// use gimli::{DebugTypes, LittleEndian};
///
/// # let buf = [0x00, 0x01, 0x02, 0x03];
/// # let read_debug_types_section_somehow = || &buf;
/// let debug_types = DebugTypes::<LittleEndian>::new(read_debug_types_section_somehow());
/// ```
pub fn new(debug_types_section: &'input [u8]) -> DebugTypes<'input, Endian> {
DebugTypes { debug_types_section: EndianBuf(debug_types_section, PhantomData) }
}
/// Iterate the type-units in this `.debug_types` section.
///
/// ```
/// use gimli::{DebugTypes, LittleEndian};
///
/// # let buf = [];
/// # let read_debug_types_section_somehow = || &buf;
/// let debug_types = DebugTypes::<LittleEndian>::new(read_debug_types_section_somehow());
///
/// for parse_result in debug_types.units() {
/// let unit = parse_result.unwrap();
/// println!("unit's length is {}", unit.unit_length());
/// }
/// ```
pub fn units(&self) -> TypeUnitHeadersIter<'input, Endian> {
TypeUnitHeadersIter { input: self.debug_types_section }
}
}
/// An iterator over the type-units of this `.debug_types` section.
///
/// See the [documentation on
/// `DebugTypes::units`](./struct.DebugTypes.html#method.units) for
/// more detail.
pub struct TypeUnitHeadersIter<'input, Endian>
where Endian: Endianity
{
input: EndianBuf<'input, Endian>,
}
impl<'input, Endian> Iterator for TypeUnitHeadersIter<'input, Endian>
where Endian: Endianity
{
type Item = ParseResult<TypeUnitHeader<'input, Endian>>;
fn next(&mut self) -> Option<Self::Item> {
if self.input.is_empty() {
None
} else {
match parse_type_unit_header(self.input) {
Ok((_, header)) => {
let unit_len = header.length_including_self() as usize;
if self.input.len() < unit_len {
self.input = self.input.range_to(..0);
} else {
self.input = self.input.range_from(unit_len..);
}
Some(Ok(header))
}
Err(e) => {
self.input = self.input.range_to(..0);
Some(Err(e))
}
}
}
}
}
/// The header of a type unit's debugging information.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct TypeUnitHeader<'input, Endian>
where Endian: Endianity
{
header: UnitHeader<'input, Endian>,
type_signature: u64,
type_offset: DebugTypesOffset,
}
impl<'input, Endian> TypeUnitHeader<'input, Endian>
where Endian: Endianity
{
/// Construct a new `TypeUnitHeader`.
fn new(mut header: UnitHeader<'input, Endian>,
type_signature: u64,
type_offset: DebugTypesOffset)
-> TypeUnitHeader<'input, Endian> {
// First, fix up the header's entries_buf. Currently it points
// right after end of the header, but since this is a type
// unit header, there are two more fields before entries
// begin to account for.
let additional = Self::additional_header_size(header.format);
header.entries_buf = header.entries_buf.range_from(additional..);
TypeUnitHeader {
header: header,
type_signature: type_signature,
type_offset: type_offset,
}
}
/// Get the length of the debugging info for this type-unit.
pub fn unit_length(&self) -> u64 {
self.header.unit_length
}
fn additional_header_size(format: Format) -> usize {
// There are two additional fields in a type-unit compared to
// compilation- and partial-units. The type_signature is
// always 64 bits regardless of format, the type_offset is 32
// or 64 bits depending on the format.
let type_signature_size = 8;
let type_offset_size = match format {
Format::Dwarf32 => 4,
Format::Dwarf64 => 8,
};
type_signature_size + type_offset_size
}
/// Get the length of the debugging info for this type-unit,
/// uncluding the byte length of the encoded length itself.
pub fn length_including_self(&self) -> u64 {
self.header.length_including_self() +
Self::additional_header_size(self.header.format) as u64
}
/// Get the DWARF version of the debugging info for this type-unit.
pub fn version(&self) -> u16 {
self.header.version
}
/// The offset into the `.debug_abbrev` section for this type-unit's
/// debugging information entries.
pub fn debug_abbrev_offset(&self) -> DebugAbbrevOffset {
self.header.debug_abbrev_offset
}
/// The size of addresses (in bytes) in this type-unit.
pub fn address_size(&self) -> u8 {
self.header.address_size
}
/// Get the unique type signature for this type unit.
pub fn type_signature(&self) -> u64 {
self.type_signature
}
/// Get the offset within this type unit where the type is defined.
pub fn type_offset(&self) -> DebugTypesOffset {
self.type_offset
}
/// Navigate this type unit's `DebuggingInformationEntry`s.
pub fn entries<'me, 'abbrev>(&'me self,
abbreviations: &'abbrev Abbreviations)
-> EntriesCursor<'input, 'abbrev, 'me, Endian> {
EntriesCursor {
unit: &self.header,
input: self.header.entries_buf.into(),
abbreviations: abbreviations,
cached_current: RefCell::new(None),
}
}
/// Parse this type unit's abbreviations.
///
/// ```
/// use gimli::DebugAbbrev;
/// # use gimli::{DebugTypes, LittleEndian};
/// # let types_buf = [
/// # // Type unit header
/// #
/// # // 32-bit unit length = 37
/// # 0x25, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// # // Type signature
/// # 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
/// # // Type offset
/// # 0x01, 0x02, 0x03, 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// # ];
/// # let debug_types = DebugTypes::<LittleEndian>::new(&types_buf);
/// #
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_yes
/// # 0x01,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// #
/// # let get_some_type_unit = || debug_types.units().next().unwrap().unwrap();
///
/// let unit = get_some_type_unit();
///
/// # let read_debug_abbrev_section_somehow = || &abbrev_buf;
/// let debug_abbrev = DebugAbbrev::<LittleEndian>::new(read_debug_abbrev_section_somehow());
/// let abbrevs_for_unit = unit.abbreviations(debug_abbrev).unwrap();
/// ```
pub fn abbreviations<'abbrev>(&self,
debug_abbrev: DebugAbbrev<'abbrev, Endian>)
-> ParseResult<Abbreviations> {
let offset = self.debug_abbrev_offset().0 as usize;
parse_abbreviations(&debug_abbrev.debug_abbrev_section.0[offset..])
.map(|(_, abbrevs)| abbrevs)
}
}
/// Parse a type unit header.
fn parse_type_unit_header<'input, Endian>
(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, TypeUnitHeader<'input, Endian>)>
where Endian: Endianity
{
let (rest, header) = try!(parse_unit_header(input));
let (rest, signature) = try!(parse_type_signature(rest));
let (rest, offset) = try!(parse_type_offset(rest, header.format()));
Ok((rest, TypeUnitHeader::new(header, signature, offset)))
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_parse_type_unit_header_64_ok() {
let buf = [
// Enable 64-bit unit length mode.
0xff, 0xff, 0xff, 0xff,
// The actual unit length (27).
0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// Version 4
0x04, 0x00,
// debug_abbrev_offset
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
// Address size
0x08,
// Type signature
0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde,
// type offset
0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78
];
let result = parse_type_unit_header(EndianBuf::<LittleEndian>::new(&buf));
match result {
Ok((_, header)) => {
assert_eq!(header,
TypeUnitHeader::new(UnitHeader::new(27,
4,
DebugAbbrevOffset(0x0807060504030201),
8,
Format::Dwarf64,
&buf[buf.len() - 16..]),
0xdeadbeefdeadbeef,
DebugTypesOffset(0x7856341278563412)))
},
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
Reformat with rustfmt
//! Functions for parsing DWARF debugging information.
use byteorder;
use constants;
use leb128;
use std::cell::{Cell, RefCell};
use std::collections::hash_map;
use std::error;
use std::fmt::{self, Debug};
use std::io;
use std::marker::PhantomData;
use std::mem;
use std::ops::{Deref, Index, Range, RangeFrom, RangeTo};
/// A trait describing the endianity of some buffer.
///
/// All interesting methods are from the `byteorder` crate's `ByteOrder`
/// trait. All methods are static. You shouldn't instantiate concrete objects
/// that implement this trait, it is just used as compile-time phantom data.
pub trait Endianity
: byteorder::ByteOrder + Debug + Clone + Copy + PartialEq + Eq {
}
/// Little endian byte order.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LittleEndian {}
impl byteorder::ByteOrder for LittleEndian {
fn read_u16(buf: &[u8]) -> u16 {
byteorder::LittleEndian::read_u16(buf)
}
fn read_u32(buf: &[u8]) -> u32 {
byteorder::LittleEndian::read_u32(buf)
}
fn read_u64(buf: &[u8]) -> u64 {
byteorder::LittleEndian::read_u64(buf)
}
fn read_uint(buf: &[u8], nbytes: usize) -> u64 {
byteorder::LittleEndian::read_uint(buf, nbytes)
}
fn write_u16(buf: &mut [u8], n: u16) {
byteorder::LittleEndian::write_u16(buf, n)
}
fn write_u32(buf: &mut [u8], n: u32) {
byteorder::LittleEndian::write_u32(buf, n)
}
fn write_u64(buf: &mut [u8], n: u64) {
byteorder::LittleEndian::write_u64(buf, n)
}
fn write_uint(buf: &mut [u8], n: u64, nbytes: usize) {
byteorder::LittleEndian::write_uint(buf, n, nbytes)
}
}
impl Endianity for LittleEndian {}
/// Big endian byte order.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BigEndian {}
impl byteorder::ByteOrder for BigEndian {
fn read_u16(buf: &[u8]) -> u16 {
byteorder::BigEndian::read_u16(buf)
}
fn read_u32(buf: &[u8]) -> u32 {
byteorder::BigEndian::read_u32(buf)
}
fn read_u64(buf: &[u8]) -> u64 {
byteorder::BigEndian::read_u64(buf)
}
fn read_uint(buf: &[u8], nbytes: usize) -> u64 {
byteorder::BigEndian::read_uint(buf, nbytes)
}
fn write_u16(buf: &mut [u8], n: u16) {
byteorder::BigEndian::write_u16(buf, n)
}
fn write_u32(buf: &mut [u8], n: u32) {
byteorder::BigEndian::write_u32(buf, n)
}
fn write_u64(buf: &mut [u8], n: u64) {
byteorder::BigEndian::write_u64(buf, n)
}
fn write_uint(buf: &mut [u8], n: u64, nbytes: usize) {
byteorder::BigEndian::write_uint(buf, n, nbytes)
}
}
impl Endianity for BigEndian {}
/// An error that occurred when parsing.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Error {
/// An error parsing an unsigned LEB128 value.
BadUnsignedLeb128,
/// An error parsing a signed LEB128 value.
BadSignedLeb128,
/// An abbreviation declared that its code is zero, but zero is reserved for
/// null records.
AbbreviationCodeZero,
/// Found an unknown `DW_TAG_*` type.
UnknownTag,
/// The abbreviation's has-children byte was not one of
/// `DW_CHILDREN_{yes,no}`.
BadHasChildren,
/// Found an unknown `DW_FORM_*` type.
UnknownForm,
/// Expected a zero, found something else.
ExpectedZero,
/// Found an abbreviation code that has already been used.
DuplicateAbbreviationCode,
/// Found an unknown reserved length value.
UnknownReservedLength,
/// Found an unknown DWARF version.
UnknownVersion,
/// The unit header's claimed length is too short to even hold the header
/// itself.
UnitHeaderLengthTooShort,
/// Found a record with an unknown abbreviation code.
UnknownAbbreviation,
/// Hit the end of input before it was expected.
UnexpectedEof,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Debug::fmt(self, f)
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::BadUnsignedLeb128 => "An error parsing an unsigned LEB128 value",
Error::BadSignedLeb128 => "An error parsing a signed LEB128 value",
Error::AbbreviationCodeZero => {
"An abbreviation declared that its code is zero,
but zero is reserved for null records"
}
Error::UnknownTag => "Found an unknown `DW_TAG_*` type",
Error::BadHasChildren => {
"The abbreviation's has-children byte was not one of
`DW_CHILDREN_{yes,no}`"
}
Error::UnknownForm => "Found an unknown `DW_FORM_*` type",
Error::ExpectedZero => "Expected a zero, found something else",
Error::DuplicateAbbreviationCode => {
"Found an abbreviation code that has already been used"
}
Error::UnknownReservedLength => "Found an unknown reserved length value",
Error::UnknownVersion => "Found an unknown DWARF version",
Error::UnitHeaderLengthTooShort => {
"The unit header's claimed length is too short to even hold
the header itself"
}
Error::UnknownAbbreviation => "Found a record with an unknown abbreviation code",
Error::UnexpectedEof => "Hit the end of input before it was expected",
}
}
}
/// The result of a parse.
pub type ParseResult<T> = Result<T, Error>;
/// A &[u8] slice with compile-time endianity metadata.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct EndianBuf<'input, Endian>(&'input [u8], PhantomData<Endian>) where Endian: Endianity;
impl<'input, Endian> EndianBuf<'input, Endian>
where Endian: Endianity
{
fn new(buf: &'input [u8]) -> EndianBuf<'input, Endian> {
EndianBuf(buf, PhantomData)
}
// Unfortunately, std::ops::Index *must* return a reference, so we can't
// implement Index<Range<usize>> to return a new EndianBuf the way we would
// like to. Instead, we abandon fancy indexing operators and have these
// plain old methods.
#[allow(dead_code)]
fn range_from(&self, idx: RangeFrom<usize>) -> EndianBuf<'input, Endian> {
EndianBuf(&self.0[idx], self.1)
}
fn range_to(&self, idx: RangeTo<usize>) -> EndianBuf<'input, Endian> {
EndianBuf(&self.0[idx], self.1)
}
}
impl<'input, Endian> Index<usize> for EndianBuf<'input, Endian>
where Endian: Endianity
{
type Output = u8;
fn index(&self, idx: usize) -> &Self::Output {
&self.0[idx]
}
}
impl<'input, Endian> Deref for EndianBuf<'input, Endian>
where Endian: Endianity
{
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.0
}
}
impl<'input, Endian> Into<&'input [u8]> for EndianBuf<'input, Endian>
where Endian: Endianity
{
fn into(self) -> &'input [u8] {
self.0
}
}
fn parse_u8(input: &[u8]) -> ParseResult<(&[u8], u8)> {
if input.len() == 0 {
Err(Error::UnexpectedEof)
} else {
Ok((&input[1..], input[0]))
}
}
fn parse_u16<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u16)>
where Endian: Endianity
{
if input.len() < 2 {
Err(Error::UnexpectedEof)
} else {
Ok((input.range_from(2..), Endian::read_u16(&input)))
}
}
fn parse_u32<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u32)>
where Endian: Endianity
{
if input.len() < 4 {
Err(Error::UnexpectedEof)
} else {
Ok((input.range_from(4..), Endian::read_u32(&input)))
}
}
fn parse_u64<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u64)>
where Endian: Endianity
{
if input.len() < 8 {
Err(Error::UnexpectedEof)
} else {
Ok((input.range_from(8..), Endian::read_u64(&input)))
}
}
fn parse_u32_as_u64<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u64)>
where Endian: Endianity
{
if input.len() < 4 {
Err(Error::UnexpectedEof)
} else {
Ok((input.range_from(4..), Endian::read_u32(&input) as u64))
}
}
/// An offset into the `.debug_types` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugTypesOffset(pub u64);
/// An offset into the `.debug_str` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugStrOffset(pub u64);
/// An offset into the `.debug_abbrev` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugAbbrevOffset(pub u64);
/// An offset into the `.debug_info` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugInfoOffset(pub u64);
/// An offset into the `.debug_line` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugLineOffset(pub u64);
/// An offset into the `.debug_loc` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugLocOffset(pub u64);
/// An offset into the `.debug_macinfo` section.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DebugMacinfoOffset(pub u64);
/// An offset into the current compilation or type unit.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
pub struct UnitOffset(pub u64);
/// The `DebugAbbrev` struct represents the abbreviations describing
/// `DebuggingInformationEntry`s' attribute names and forms found in the
/// `.debug_abbrev` section.
#[derive(Debug, Clone, Copy)]
pub struct DebugAbbrev<'input, Endian>
where Endian: Endianity
{
debug_abbrev_section: EndianBuf<'input, Endian>,
}
impl<'input, Endian> DebugAbbrev<'input, Endian>
where Endian: Endianity
{
/// Construct a new `DebugAbbrev` instance from the data in the `.debug_abbrev`
/// section.
///
/// It is the caller's responsibility to read the `.debug_abbrev` section and
/// present it as a `&[u8]` slice. That means using some ELF loader on
/// Linux, a Mach-O loader on OSX, etc.
///
/// ```
/// use gimli::{DebugAbbrev, LittleEndian};
///
/// # let buf = [0x00, 0x01, 0x02, 0x03];
/// # let read_debug_abbrev_section_somehow = || &buf;
/// let debug_abbrev = DebugAbbrev::<LittleEndian>::new(read_debug_abbrev_section_somehow());
/// ```
pub fn new(debug_abbrev_section: &'input [u8]) -> DebugAbbrev<'input, Endian> {
DebugAbbrev { debug_abbrev_section: EndianBuf(debug_abbrev_section, PhantomData) }
}
}
/// The `DebugInfo` struct represents the DWARF debugging information found in
/// the `.debug_info` section.
#[derive(Debug, Clone, Copy)]
pub struct DebugInfo<'input, Endian>
where Endian: Endianity
{
debug_info_section: EndianBuf<'input, Endian>,
}
impl<'input, Endian> DebugInfo<'input, Endian>
where Endian: Endianity
{
/// Construct a new `DebugInfo` instance from the data in the `.debug_info`
/// section.
///
/// It is the caller's responsibility to read the `.debug_info` section and
/// present it as a `&[u8]` slice. That means using some ELF loader on
/// Linux, a Mach-O loader on OSX, etc.
///
/// ```
/// use gimli::{DebugInfo, LittleEndian};
///
/// # let buf = [0x00, 0x01, 0x02, 0x03];
/// # let read_debug_info_section_somehow = || &buf;
/// let debug_info = DebugInfo::<LittleEndian>::new(read_debug_info_section_somehow());
/// ```
pub fn new(debug_info_section: &'input [u8]) -> DebugInfo<'input, Endian> {
DebugInfo { debug_info_section: EndianBuf(debug_info_section, PhantomData) }
}
/// Iterate the compilation- and partial-units in this
/// `.debug_info` section.
///
/// ```
/// use gimli::{DebugInfo, LittleEndian};
///
/// # let buf = [];
/// # let read_debug_info_section_somehow = || &buf;
/// let debug_info = DebugInfo::<LittleEndian>::new(read_debug_info_section_somehow());
///
/// for parse_result in debug_info.units() {
/// let unit = parse_result.unwrap();
/// println!("unit's length is {}", unit.unit_length());
/// }
/// ```
pub fn units(&self) -> UnitHeadersIter<'input, Endian> {
UnitHeadersIter { input: self.debug_info_section }
}
}
/// An iterator over the compilation- and partial-units of a section.
///
/// See the [documentation on
/// `DebugInfo::units`](./struct.DebugInfo.html#method.units)
/// for more detail.
pub struct UnitHeadersIter<'input, Endian>
where Endian: Endianity
{
input: EndianBuf<'input, Endian>,
}
impl<'input, Endian> Iterator for UnitHeadersIter<'input, Endian>
where Endian: Endianity
{
type Item = ParseResult<UnitHeader<'input, Endian>>;
fn next(&mut self) -> Option<Self::Item> {
if self.input.is_empty() {
None
} else {
match parse_unit_header(self.input) {
Ok((_, header)) => {
let unit_len = header.length_including_self() as usize;
if self.input.len() < unit_len {
self.input = self.input.range_to(..0);
} else {
self.input = self.input.range_from(unit_len..);
}
Some(Ok(header))
}
Err(e) => {
self.input = self.input.range_to(..0);
Some(Err(e))
}
}
}
}
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_units() {
let buf = [
// First compilation unit.
// Enable 64-bit DWARF.
0xff, 0xff, 0xff, 0xff,
// Unit length = 43
0x2b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// Version 4
0x04, 0x00,
// debug_abbrev_offset
0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
// address size
0x08,
// Placeholder data for first compilation unit's DIEs.
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
// Second compilation unit
// 32-bit unit length = 39
0x27, 0x00, 0x00, 0x00,
// Version 4
0x04, 0x00,
// debug_abbrev_offset
0x05, 0x06, 0x07, 0x08,
// Address size
0x04,
// Placeholder data for second compilation unit's DIEs.
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
];
let debug_info = DebugInfo::<LittleEndian>::new(&buf);
let mut units = debug_info.units();
match units.next() {
Some(Ok(header)) => {
let expected = UnitHeader::<LittleEndian>::new(0x000000000000002b,
4,
DebugAbbrevOffset(0x0102030405060708),
8,
Format::Dwarf64,
&buf[23..23+32]);
assert_eq!(header, expected);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
match units.next() {
Some(Ok(header)) => {
let expected =
UnitHeader::new(0x00000027,
4,
DebugAbbrevOffset(0x08070605),
4,
Format::Dwarf32,
&buf[buf.len()-32..]);
assert_eq!(header, expected);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
assert!(units.next().is_none());
}
/// Parse an unsigned LEB128 encoded integer.
fn parse_unsigned_leb(mut input: &[u8]) -> ParseResult<(&[u8], u64)> {
match leb128::read::unsigned(&mut input) {
Ok(val) => Ok((input, val)),
Err(leb128::read::Error::IoError(ref e)) if e.kind() == io::ErrorKind::UnexpectedEof => {
Err(Error::UnexpectedEof)
}
Err(_) => Err(Error::BadUnsignedLeb128),
}
}
/// Parse a signed LEB128 encoded integer.
fn parse_signed_leb(mut input: &[u8]) -> ParseResult<(&[u8], i64)> {
match leb128::read::signed(&mut input) {
Ok(val) => Ok((input, val)),
Err(leb128::read::Error::IoError(ref e)) if e.kind() == io::ErrorKind::UnexpectedEof => {
Err(Error::UnexpectedEof)
}
Err(_) => Err(Error::BadSignedLeb128),
}
}
/// Parse an abbreviation's code.
fn parse_abbreviation_code(input: &[u8]) -> ParseResult<(&[u8], u64)> {
let (rest, code) = try!(parse_unsigned_leb(input));
if code == 0 {
Err(Error::AbbreviationCodeZero)
} else {
Ok((rest, code))
}
}
/// Parse an abbreviation's tag.
fn parse_abbreviation_tag(input: &[u8]) -> ParseResult<(&[u8], constants::DwTag)> {
let (rest, val) = try!(parse_unsigned_leb(input));
if val == 0 {
Err(Error::AbbreviationCodeZero)
} else {
Ok((rest, constants::DwTag(val)))
}
}
/// Parse an abbreviation's "does the type have children?" byte.
fn parse_abbreviation_has_children(input: &[u8]) -> ParseResult<(&[u8], constants::DwChildren)> {
let (rest, val) = try!(parse_u8(input));
let val = constants::DwChildren(val);
if val == constants::DW_CHILDREN_no || val == constants::DW_CHILDREN_yes {
Ok((rest, val))
} else {
Err(Error::BadHasChildren)
}
}
/// Parse an attribute's name.
fn parse_attribute_name(input: &[u8]) -> ParseResult<(&[u8], constants::DwAt)> {
let (rest, val) = try!(parse_unsigned_leb(input));
Ok((rest, constants::DwAt(val)))
}
/// Parse an attribute's form.
fn parse_attribute_form(input: &[u8]) -> ParseResult<(&[u8], constants::DwForm)> {
let (rest, val) = try!(parse_unsigned_leb(input));
Ok((rest, constants::DwForm(val)))
}
/// The description of an attribute in an abbreviated type. It is a pair of name
/// and form.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct AttributeSpecification {
name: constants::DwAt,
form: constants::DwForm,
}
impl AttributeSpecification {
/// Construct a new `AttributeSpecification` from the given name and form.
pub fn new(name: constants::DwAt, form: constants::DwForm) -> AttributeSpecification {
AttributeSpecification {
name: name,
form: form,
}
}
/// Get the attribute's name.
pub fn name(&self) -> constants::DwAt {
self.name
}
/// Get the attribute's form.
pub fn form(&self) -> constants::DwForm {
self.form
}
/// Return the size of the attribute, in bytes.
///
/// Note that because some attributes are variably sized, the size cannot
/// always be known without parsing, in which case we return `None`.
pub fn size<'me, 'input, 'unit, Endian>(&'me self,
header: &'unit UnitHeader<'input, Endian>)
-> Option<usize>
where Endian: Endianity
{
match self.form {
constants::DW_FORM_addr => Some(header.address_size() as usize),
constants::DW_FORM_flag |
constants::DW_FORM_flag_present |
constants::DW_FORM_data1 |
constants::DW_FORM_ref1 => Some(1),
constants::DW_FORM_data2 |
constants::DW_FORM_ref2 => Some(2),
constants::DW_FORM_data4 |
constants::DW_FORM_ref4 => Some(4),
constants::DW_FORM_data8 |
constants::DW_FORM_ref8 => Some(8),
constants::DW_FORM_sec_offset |
constants::DW_FORM_ref_addr |
constants::DW_FORM_ref_sig8 |
constants::DW_FORM_strp => {
match header.format() {
Format::Dwarf32 => Some(4),
Format::Dwarf64 => Some(8),
}
}
constants::DW_FORM_block |
constants::DW_FORM_block1 |
constants::DW_FORM_block2 |
constants::DW_FORM_block4 |
constants::DW_FORM_exprloc |
constants::DW_FORM_ref_udata |
constants::DW_FORM_string |
constants::DW_FORM_sdata |
constants::DW_FORM_udata |
constants::DW_FORM_indirect => None,
// We don't know the size of unknown forms.
_ => None,
}
}
}
/// Parse a non-null attribute specification.
fn parse_attribute_specification(input: &[u8]) -> ParseResult<(&[u8], AttributeSpecification)> {
let (rest, name) = try!(parse_attribute_name(input));
let (rest, form) = try!(parse_attribute_form(rest));
let spec = AttributeSpecification::new(name, form);
Ok((rest, spec))
}
/// Parse the null attribute specification.
fn parse_null_attribute_specification(input: &[u8]) -> ParseResult<(&[u8], ())> {
let (rest, name) = try!(parse_unsigned_leb(input));
if name != 0 {
return Err(Error::ExpectedZero);
}
let (rest, form) = try!(parse_unsigned_leb(rest));
if form != 0 {
return Err(Error::ExpectedZero);
}
Ok((rest, ()))
}
/// Parse a series of attribute specifications, terminated by a null attribute
/// specification.
fn parse_attribute_specifications(mut input: &[u8])
-> ParseResult<(&[u8], Vec<AttributeSpecification>)> {
let mut attrs = Vec::new();
loop {
let result = parse_null_attribute_specification(input).map(|(rest, _)| (rest, None));
let result = result.or_else(|_| parse_attribute_specification(input).map(|(rest, a)| (rest, Some(a))));
let (rest, attr) = try!(result);
input = rest;
match attr {
None => break,
Some(attr) => attrs.push(attr),
};
}
Ok((input, attrs))
}
/// An abbreviation describes the shape of a `DebuggingInformationEntry`'s type:
/// its code, tag type, whether it has children, and its set of attributes.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Abbreviation {
code: u64,
tag: constants::DwTag,
has_children: constants::DwChildren,
attributes: Vec<AttributeSpecification>,
}
impl Abbreviation {
/// Construct a new `Abbreviation`.
///
/// ### Panics
///
/// Panics if `code` is `0`.
pub fn new(code: u64,
tag: constants::DwTag,
has_children: constants::DwChildren,
attributes: Vec<AttributeSpecification>)
-> Abbreviation {
assert!(code != 0);
Abbreviation {
code: code,
tag: tag,
has_children: has_children,
attributes: attributes,
}
}
/// Get this abbreviation's code.
pub fn code(&self) -> u64 {
self.code
}
/// Get this abbreviation's tag.
pub fn tag(&self) -> constants::DwTag {
self.tag
}
/// Return true if this abbreviation's type has children, false otherwise.
pub fn has_children(&self) -> bool {
self.has_children == constants::DW_CHILDREN_yes
}
/// Get this abbreviation's attributes.
pub fn attributes(&self) -> &[AttributeSpecification] {
&self.attributes[..]
}
}
/// Parse a non-null abbreviation.
fn parse_abbreviation(input: &[u8]) -> ParseResult<(&[u8], Abbreviation)> {
let (rest, code) = try!(parse_abbreviation_code(input));
let (rest, tag) = try!(parse_abbreviation_tag(rest));
let (rest, has_children) = try!(parse_abbreviation_has_children(rest));
let (rest, attributes) = try!(parse_attribute_specifications(rest));
let abbrev = Abbreviation::new(code, tag, has_children, attributes);
Ok((rest, abbrev))
}
/// Parse a null abbreviation.
fn parse_null_abbreviation(input: &[u8]) -> ParseResult<(&[u8], ())> {
let (rest, name) = try!(parse_unsigned_leb(input));
if name == 0 {
Ok((rest, ()))
} else {
Err(Error::ExpectedZero)
}
}
/// A set of type abbreviations.
///
/// Construct an `Abbreviations` instance with the
/// [`abbreviations()`](struct.UnitHeader.html#method.abbreviations)
/// method.
#[derive(Debug, Default, Clone)]
pub struct Abbreviations {
abbrevs: hash_map::HashMap<u64, Abbreviation>,
}
impl Abbreviations {
/// Construct a new, empty set of abbreviations.
fn empty() -> Abbreviations {
Abbreviations { abbrevs: hash_map::HashMap::new() }
}
/// Insert an abbreviation into the set.
///
/// Returns `Ok` if it is the first abbreviation in the set with its code,
/// `Err` if the code is a duplicate and there already exists an
/// abbreviation in the set with the given abbreviation's code.
fn insert(&mut self, abbrev: Abbreviation) -> Result<(), ()> {
match self.abbrevs.entry(abbrev.code) {
hash_map::Entry::Occupied(_) => Err(()),
hash_map::Entry::Vacant(entry) => {
entry.insert(abbrev);
Ok(())
}
}
}
/// Get the abbreviation associated with the given code.
fn get(&self, code: u64) -> Option<&Abbreviation> {
self.abbrevs.get(&code)
}
}
/// Parse a series of abbreviations, terminated by a null abbreviation.
fn parse_abbreviations(mut input: &[u8]) -> ParseResult<(&[u8], Abbreviations)> {
let mut abbrevs = Abbreviations::empty();
loop {
let result = parse_null_abbreviation(input).map(|(rest, _)| (rest, None));
let result = result.or_else(|_| parse_abbreviation(input).map(|(rest, a)| (rest, Some(a))));
let (rest, abbrev) = try!(result);
input = rest;
match abbrev {
None => break,
Some(abbrev) => {
if let Err(_) = abbrevs.insert(abbrev) {
return Err(Error::DuplicateAbbreviationCode);
}
}
}
}
Ok((input, abbrevs))
}
/// Whether the format of a compilation unit is 32- or 64-bit.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Format {
/// 64-bit DWARF
Dwarf64,
/// 32-bit DWARF
Dwarf32,
}
const MAX_DWARF_32_UNIT_LENGTH: u64 = 0xfffffff0;
const DWARF_64_INITIAL_UNIT_LENGTH: u64 = 0xffffffff;
/// Parse the compilation unit header's length.
fn parse_unit_length<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, (u64, Format))>
where Endian: Endianity
{
let (rest, val) = try!(parse_u32_as_u64(input));
if val < MAX_DWARF_32_UNIT_LENGTH {
Ok((rest, (val, Format::Dwarf32)))
} else if val == DWARF_64_INITIAL_UNIT_LENGTH {
let (rest, val) = try!(parse_u64(rest));
Ok((rest, (val, Format::Dwarf64)))
} else {
Err(Error::UnknownReservedLength)
}
}
#[test]
fn test_parse_unit_length_32_ok() {
let buf = [0x12, 0x34, 0x56, 0x78];
match parse_unit_length(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((rest, (length, format))) => {
assert_eq!(rest.len(), 0);
assert_eq!(format, Format::Dwarf32);
assert_eq!(0x78563412, length);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_parse_unit_length_64_ok() {
let buf = [
// Dwarf_64_INITIAL_UNIT_LENGTH
0xff, 0xff, 0xff, 0xff,
// Actual length
0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xff
];
match parse_unit_length(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((rest, (length, format))) => {
assert_eq!(rest.len(), 0);
assert_eq!(format, Format::Dwarf64);
assert_eq!(0xffdebc9a78563412, length);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
fn test_parse_unit_length_unknown_reserved_value() {
let buf = [0xfe, 0xff, 0xff, 0xff];
match parse_unit_length(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnknownReservedLength) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_parse_unit_length_incomplete() {
let buf = [0xff, 0xff, 0xff]; // Need at least 4 bytes.
match parse_unit_length(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_parse_unit_length_64_incomplete() {
let buf = [
// DWARF_64_INITIAL_UNIT_LENGTH
0xff, 0xff, 0xff, 0xff,
// Actual length is not long enough.
0x12, 0x34, 0x56, 0x78
];
match parse_unit_length(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
/// Parse the DWARF version from the compilation unit header.
fn parse_version<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u16)>
where Endian: Endianity
{
let (rest, val) = try!(parse_u16(input));
// DWARF 1 was very different, and is obsolete, so isn't supported by this
// reader.
if 2 <= val && val <= 4 {
Ok((rest, val))
} else {
Err(Error::UnknownVersion)
}
}
#[test]
fn test_unit_version_ok() {
// Version 4 and two extra bytes
let buf = [0x04, 0x00, 0xff, 0xff];
match parse_version(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((rest, val)) => {
assert_eq!(val, 4);
assert_eq!(rest, EndianBuf::new(&[0xff, 0xff]));
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_unit_version_unknown_version() {
let buf = [0xab, 0xcd];
match parse_version(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnknownVersion) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
let buf = [0x1, 0x0];
match parse_version(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnknownVersion) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_unit_version_incomplete() {
let buf = [0x04];
match parse_version(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
/// Parse the `debug_abbrev_offset` in the compilation unit header.
fn parse_debug_abbrev_offset<'input, Endian>
(input: EndianBuf<'input, Endian>,
format: Format)
-> ParseResult<(EndianBuf<'input, Endian>, DebugAbbrevOffset)>
where Endian: Endianity
{
let offset = match format {
Format::Dwarf32 => parse_u32_as_u64(input),
Format::Dwarf64 => parse_u64(input),
};
offset.map(|(rest, offset)| (rest, DebugAbbrevOffset(offset)))
}
#[test]
fn test_parse_debug_abbrev_offset_32() {
let buf = [0x01, 0x02, 0x03, 0x04];
match parse_debug_abbrev_offset(EndianBuf::<LittleEndian>::new(&buf), Format::Dwarf32) {
Ok((_, val)) => assert_eq!(val, DebugAbbrevOffset(0x04030201)),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_parse_debug_abbrev_offset_32_incomplete() {
let buf = [0x01, 0x02];
match parse_debug_abbrev_offset(EndianBuf::<LittleEndian>::new(&buf), Format::Dwarf32) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_parse_debug_abbrev_offset_64() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
match parse_debug_abbrev_offset(EndianBuf::<LittleEndian>::new(&buf), Format::Dwarf64) {
Ok((_, val)) => assert_eq!(val, DebugAbbrevOffset(0x0807060504030201)),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
#[test]
fn test_parse_debug_abbrev_offset_64_incomplete() {
let buf = [0x01, 0x02];
match parse_debug_abbrev_offset(EndianBuf::<LittleEndian>::new(&buf), Format::Dwarf64) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
/// Parse the size of addresses (in bytes) on the target architecture.
fn parse_address_size(input: &[u8]) -> ParseResult<(&[u8], u8)> {
parse_u8(input)
}
#[test]
fn test_parse_address_size_ok() {
let buf = [0x04];
match parse_address_size(&buf) {
Ok((_, val)) => assert_eq!(val, 4),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
/// The header of a compilation unit's debugging information.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct UnitHeader<'input, Endian>
where Endian: Endianity
{
unit_length: u64,
version: u16,
debug_abbrev_offset: DebugAbbrevOffset,
address_size: u8,
format: Format,
entries_buf: EndianBuf<'input, Endian>,
}
/// Static methods.
impl<'input, Endian> UnitHeader<'input, Endian>
where Endian: Endianity
{
/// Construct a new `UnitHeader`.
pub fn new(unit_length: u64,
version: u16,
debug_abbrev_offset: DebugAbbrevOffset,
address_size: u8,
format: Format,
entries_buf: &'input [u8])
-> UnitHeader<'input, Endian> {
UnitHeader {
unit_length: unit_length,
version: version,
debug_abbrev_offset: debug_abbrev_offset,
address_size: address_size,
format: format,
entries_buf: EndianBuf(entries_buf, PhantomData),
}
}
/// Return the serialized size of the `unit_length` attribute for the given
/// DWARF format.
pub fn size_of_unit_length(format: Format) -> usize {
match format {
Format::Dwarf32 => 4,
Format::Dwarf64 => 12,
}
}
/// Return the serialized size of the compilation unit header for the given
/// DWARF format.
pub fn size_of_header(format: Format) -> usize {
let unit_length_size = Self::size_of_unit_length(format);
let version_size = 2;
let debug_abbrev_offset_size = match format {
Format::Dwarf32 => 4,
Format::Dwarf64 => 8,
};
let address_size_size = 1;
unit_length_size + version_size + debug_abbrev_offset_size + address_size_size
}
}
/// Instance methods.
impl<'input, Endian> UnitHeader<'input, Endian>
where Endian: Endianity
{
/// Get the length of the debugging info for this compilation unit, not
/// including the byte length of the encoded length itself.
pub fn unit_length(&self) -> u64 {
self.unit_length
}
/// Get the length of the debugging info for this compilation unit,
/// uncluding the byte length of the encoded length itself.
pub fn length_including_self(&self) -> u64 {
match self.format {
// Length of the 32-bit header plus the unit length.
Format::Dwarf32 => 4 + self.unit_length,
// Length of the 4 byte 0xffffffff value to enable 64-bit mode plus
// the actual 64-bit length.
Format::Dwarf64 => 4 + 8 + self.unit_length,
}
}
/// Get the DWARF version of the debugging info for this compilation unit.
pub fn version(&self) -> u16 {
self.version
}
/// The offset into the `.debug_abbrev` section for this compilation unit's
/// debugging information entries' abbreviations.
pub fn debug_abbrev_offset(&self) -> DebugAbbrevOffset {
self.debug_abbrev_offset
}
/// The size of addresses (in bytes) in this compilation unit.
pub fn address_size(&self) -> u8 {
self.address_size
}
/// Whether this compilation unit is encoded in 64- or 32-bit DWARF.
pub fn format(&self) -> Format {
self.format
}
fn is_valid_offset(&self, offset: UnitOffset) -> bool {
let size_of_header = Self::size_of_header(self.format);
if !offset.0 as usize >= size_of_header {
return false;
}
let relative_to_entries_buf = offset.0 as usize - size_of_header;
relative_to_entries_buf < self.entries_buf.len()
}
/// Get the underlying bytes for the supplied range.
pub fn range(&self, idx: Range<UnitOffset>) -> &'input [u8] {
assert!(self.is_valid_offset(idx.start));
assert!(self.is_valid_offset(idx.end));
assert!(idx.start <= idx.end);
let size_of_header = Self::size_of_header(self.format);
let start = idx.start.0 as usize - size_of_header;
let end = idx.end.0 as usize - size_of_header;
&self.entries_buf.0[start..end]
}
/// Get the underlying bytes for the supplied range.
pub fn range_from(&self, idx: RangeFrom<UnitOffset>) -> &'input [u8] {
assert!(self.is_valid_offset(idx.start));
let start = idx.start.0 as usize - Self::size_of_header(self.format);
&self.entries_buf.0[start..]
}
/// Get the underlying bytes for the supplied range.
pub fn range_to(&self, idx: RangeTo<UnitOffset>) -> &'input [u8] {
assert!(self.is_valid_offset(idx.end));
let end = idx.end.0 as usize - Self::size_of_header(self.format);
&self.entries_buf.0[..end]
}
/// Navigate this compilation unit's `DebuggingInformationEntry`s.
pub fn entries<'me, 'abbrev>(&'me self,
abbreviations: &'abbrev Abbreviations)
-> EntriesCursor<'input, 'abbrev, 'me, Endian> {
EntriesCursor {
unit: self,
input: self.entries_buf.into(),
abbreviations: abbreviations,
cached_current: RefCell::new(None),
}
}
/// Parse the abbreviations at the given `offset` within this
/// `.debug_abbrev` section.
///
/// The `offset` should generally be retrieved from a unit header.
///
/// ```
/// use gimli::DebugAbbrev;
/// # use gimli::{DebugInfo, LittleEndian};
/// # let info_buf = [
/// # // Comilation unit header
/// #
/// # // 32-bit unit length = 25
/// # 0x19, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// # ];
/// # let debug_info = DebugInfo::<LittleEndian>::new(&info_buf);
/// #
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_yes
/// # 0x01,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// #
/// # let get_some_unit = || debug_info.units().next().unwrap().unwrap();
///
/// let unit = get_some_unit();
///
/// # let read_debug_abbrev_section_somehow = || &abbrev_buf;
/// let debug_abbrev = DebugAbbrev::<LittleEndian>::new(read_debug_abbrev_section_somehow());
/// let abbrevs_for_unit = unit.abbreviations(debug_abbrev).unwrap();
/// ```
pub fn abbreviations<'abbrev>(&self,
debug_abbrev: DebugAbbrev<'abbrev, Endian>)
-> ParseResult<Abbreviations> {
parse_abbreviations(&debug_abbrev.debug_abbrev_section.0[self.debug_abbrev_offset
.0 as usize..])
.map(|(_, abbrevs)| abbrevs)
}
}
/// Parse a compilation unit header.
fn parse_unit_header<'input, Endian>
(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, UnitHeader<'input, Endian>)>
where Endian: Endianity
{
let (rest, (unit_length, format)) = try!(parse_unit_length(input));
let (rest, version) = try!(parse_version(rest));
let (rest, offset) = try!(parse_debug_abbrev_offset(rest, format));
let (rest, address_size) = try!(parse_address_size(rest.into()));
let size_of_unit_length = UnitHeader::<Endian>::size_of_unit_length(format);
let size_of_header = UnitHeader::<Endian>::size_of_header(format);
if unit_length as usize + size_of_unit_length < size_of_header {
return Err(Error::UnitHeaderLengthTooShort);
}
let end = unit_length as usize + size_of_unit_length - size_of_header;
if end > rest.len() {
return Err(Error::UnexpectedEof);
}
let entries_buf = &rest[..end];
Ok((EndianBuf::new(rest),
UnitHeader::new(unit_length,
version,
offset,
address_size,
format,
entries_buf)))
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_parse_unit_header_32_ok() {
let buf = [
// 32-bit unit length
0x07, 0x00, 0x00, 0x00,
// Version 4
0x04, 0x00,
// Debug_abbrev_offset
0x05, 0x06, 0x07, 0x08,
// Address size
0x04
];
match parse_unit_header(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((_, header)) => {
assert_eq!(header,
UnitHeader::new(7,
4,
DebugAbbrevOffset(0x08070605),
4,
Format::Dwarf32,
&[]))
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_parse_unit_header_64_ok() {
let buf = [
// Enable 64-bit
0xff, 0xff, 0xff, 0xff,
// Unit length = 11
0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// Version 4
0x04, 0x00,
// debug_abbrev_offset
0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
// Address size
0x08
];
match parse_unit_header(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((_, header)) => {
let expected = UnitHeader::new(11,
4,
DebugAbbrevOffset(0x0102030405060708),
8,
Format::Dwarf64,
&[]);
assert_eq!(header, expected)
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
/// A Debugging Information Entry (DIE).
///
/// DIEs have a set of attributes and optionally have children DIEs as well.
#[derive(Clone, Debug)]
pub struct DebuggingInformationEntry<'input, 'abbrev, 'unit, Endian>
where 'input: 'unit,
Endian: Endianity + 'unit
{
attrs_slice: &'input [u8],
after_attrs: Cell<Option<&'input [u8]>>,
code: u64,
abbrev: &'abbrev Abbreviation,
unit: &'unit UnitHeader<'input, Endian>,
}
impl<'input, 'abbrev, 'unit, Endian> DebuggingInformationEntry<'input, 'abbrev, 'unit, Endian>
where Endian: Endianity
{
/// Get this entry's code.
pub fn code(&self) -> u64 {
self.code
}
/// Get this entry's `DW_TAG_whatever` tag.
///
/// ```
/// # use gimli::{DebugAbbrev, DebugInfo, LittleEndian};
/// # let info_buf = [
/// # // Comilation unit header
/// #
/// # // 32-bit unit length = 12
/// # 0x0c, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// # ];
/// # let debug_info = DebugInfo::<LittleEndian>::new(&info_buf);
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_no
/// # 0x00,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// # let debug_abbrev = DebugAbbrev::<LittleEndian>::new(&abbrev_buf);
/// # let unit = debug_info.units().next().unwrap().unwrap();
/// # let abbrevs = unit.abbreviations(debug_abbrev).unwrap();
/// # let mut cursor = unit.entries(&abbrevs);
/// # let mut get_some_entry = || cursor.current().unwrap().unwrap();
/// let entry = get_some_entry();
///
/// match entry.tag() {
/// gimli::DW_TAG_subprogram =>
/// println!("this entry contains debug info about a function"),
/// gimli::DW_TAG_inlined_subroutine =>
/// println!("this entry contains debug info about a particular instance of inlining"),
/// gimli::DW_TAG_variable =>
/// println!("this entry contains debug info about a local variable"),
/// gimli::DW_TAG_formal_parameter =>
/// println!("this entry contains debug info about a function parameter"),
/// otherwise =>
/// println!("this entry is some other kind of data: {:?}", otherwise),
/// };
/// ```
pub fn tag(&self) -> constants::DwTag {
self.abbrev.tag()
}
/// Iterate over this entry's set of attributes.
///
/// ```
/// use gimli::{DebugAbbrev, DebugInfo, LittleEndian};
///
/// // Read the `.debug_info` section.
///
/// # let info_buf = [
/// # // Comilation unit header
/// #
/// # // 32-bit unit length = 12
/// # 0x0c, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// # ];
/// # let read_debug_info_section_somehow = || &info_buf;
/// let debug_info = DebugInfo::<LittleEndian>::new(read_debug_info_section_somehow());
///
/// // Get the data about the first compilation unit out of the `.debug_info`.
///
/// let unit = debug_info.units().next()
/// .expect("Should have at least one compilation unit")
/// .expect("and it should parse ok");
///
/// // Read the `.debug_abbrev` section and parse the
/// // abbreviations for our compilation unit.
///
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_no
/// # 0x00,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// # let read_debug_abbrev_section_somehow = || &abbrev_buf;
/// let debug_abbrev = DebugAbbrev::<LittleEndian>::new(read_debug_abbrev_section_somehow());
/// let abbrevs = unit.abbreviations(debug_abbrev).unwrap();
///
/// // Get the first entry from that compilation unit.
///
/// let mut cursor = unit.entries(&abbrevs);
/// let entry = cursor.current()
/// .expect("Should have at least one entry")
/// .expect("and it should parse ok");
///
/// // Finally, print the first entry's attributes.
///
/// for attr_result in entry.attrs() {
/// let attr = attr_result.unwrap();
///
/// println!("Attribute name = {:?}", attr.name());
/// println!("Attribute value = {:?}", attr.value());
/// }
/// ```
pub fn attrs<'me>(&'me self) -> AttrsIter<'input, 'abbrev, 'me, 'unit, Endian> {
AttrsIter {
input: self.attrs_slice,
attributes: &self.abbrev.attributes[..],
entry: self,
}
}
/// Find the first attribute in this entry which has the given name,
/// and return its value. Returns `Ok(None)` if no attribute is found.
pub fn attr_value(&self, name: constants::DwAt) -> Option<AttributeValue<'input>> {
self.attrs()
.take_while(|res| res.is_ok())
.find(|res| res.unwrap().name() == name)
.map(|res| res.unwrap().value())
}
}
/// The value of an attribute in a `DebuggingInformationEntry`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum AttributeValue<'input> {
/// A slice that is UnitHeaderHeader::address_size bytes long.
Addr(&'input [u8]),
/// A slice of an arbitrary number of bytes.
Block(&'input [u8]),
/// A one, two, four, or eight byte constant data value. How to interpret
/// the bytes depends on context.
///
/// From section 7 of the standard: "Depending on context, it may be a
/// signed integer, an unsigned integer, a floating-point constant, or
/// anything else."
Data(&'input [u8]),
/// A signed integer constant.
Sdata(i64),
/// An unsigned integer constant.
Udata(u64),
/// "The information bytes contain a DWARF expression (see Section 2.5) or
/// location description (see Section 2.6)."
Exprloc(&'input [u8]),
/// A boolean typically used to describe the presence or absence of another
/// attribute.
Flag(bool),
/// An offset into another section. Which section this is an offset into
/// depends on context.
SecOffset(u64),
/// An offset into the current compilation unit.
UnitRef(UnitOffset),
/// An offset into the current `.debug_info` section, but possibly a
/// different compilation unit from the current one.
DebugInfoRef(DebugInfoOffset),
/// An offset into the `.debug_types` section.
DebugTypesRef(DebugTypesOffset),
/// An offset into the `.debug_str` section.
DebugStrRef(DebugStrOffset),
/// A null terminated C string, including the final null byte. Not
/// guaranteed to be UTF-8 or anything like that.
String(&'input [u8]),
}
/// An attribute in a `DebuggingInformationEntry`, consisting of a name and
/// associated value.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Attribute<'input> {
name: constants::DwAt,
value: AttributeValue<'input>,
}
impl<'input> Attribute<'input> {
/// Get this attribute's name.
pub fn name(&self) -> constants::DwAt {
self.name
}
/// Get this attribute's value.
pub fn value(&self) -> AttributeValue<'input> {
self.value
}
}
/// Take a slice of size `bytes` from the input.
fn take(bytes: usize, input: &[u8]) -> ParseResult<(&[u8], &[u8])> {
if input.len() < bytes {
Err(Error::UnexpectedEof)
} else {
Ok((&input[bytes..], &input[0..bytes]))
}
}
fn length_u8_value(input: &[u8]) -> ParseResult<(&[u8], &[u8])> {
let (rest, len) = try!(parse_u8(input));
take(len as usize, rest)
}
fn length_u16_value<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, &'input [u8])>
where Endian: Endianity
{
let (rest, len) = try!(parse_u16(input));
take(len as usize, rest.into()).map(|(rest, result)| (EndianBuf::new(rest), result))
}
fn length_u32_value<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, &'input [u8])>
where Endian: Endianity
{
let (rest, len) = try!(parse_u32(input));
take(len as usize, rest.into()).map(|(rest, result)| (EndianBuf::new(rest), result))
}
fn length_leb_value(input: &[u8]) -> ParseResult<(&[u8], &[u8])> {
let (rest, len) = try!(parse_unsigned_leb(input));
take(len as usize, rest)
}
fn parse_attribute<'input, 'unit, Endian>
(mut input: EndianBuf<'input, Endian>,
unit: &'unit UnitHeader<'input, Endian>,
spec: AttributeSpecification)
-> ParseResult<(EndianBuf<'input, Endian>, Attribute<'input>)>
where Endian: Endianity
{
let mut form = spec.form;
loop {
match form {
constants::DW_FORM_indirect => {
let (rest, dynamic_form) = try!(parse_attribute_form(input.into()));
form = dynamic_form;
input = EndianBuf::new(rest);
continue;
}
constants::DW_FORM_addr => {
return take(unit.address_size() as usize, input.into()).map(|(rest, addr)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Addr(addr),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_block1 => {
return length_u8_value(input.into()).map(|(rest, block)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Block(block),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_block2 => {
return length_u16_value(input.into()).map(|(rest, block)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Block(block),
};
(rest, attr)
});
}
constants::DW_FORM_block4 => {
return length_u32_value(input.into()).map(|(rest, block)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Block(block),
};
(rest, attr)
});
}
constants::DW_FORM_block => {
return length_leb_value(input.into()).map(|(rest, block)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Block(block),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_data1 => {
return take(1, input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Data(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_data2 => {
return take(2, input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Data(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_data4 => {
return take(4, input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Data(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_data8 => {
return take(8, input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Data(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_udata => {
return parse_unsigned_leb(input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Udata(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_sdata => {
return parse_signed_leb(input.into()).map(|(rest, data)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Sdata(data),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_exprloc => {
return length_leb_value(input.into()).map(|(rest, block)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Exprloc(block),
};
(EndianBuf::new(rest), attr)
})
}
constants::DW_FORM_flag => {
return parse_u8(input.into()).map(|(rest, present)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::Flag(present != 0),
};
(EndianBuf::new(rest), attr)
})
}
constants::DW_FORM_flag_present => {
// FlagPresent is this weird compile time always true thing that
// isn't actually present in the serialized DIEs, only in Ok(
return Ok((input,
Attribute {
name: spec.name,
value: AttributeValue::Flag(true),
}));
}
constants::DW_FORM_sec_offset => {
return match unit.format() {
Format::Dwarf32 => {
parse_u32(input.into()).map(|(rest, offset)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::SecOffset(offset as u64),
};
(rest, attr)
})
}
Format::Dwarf64 => {
parse_u64(input.into()).map(|(rest, offset)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::SecOffset(offset),
};
(rest, attr)
})
}
};
}
constants::DW_FORM_ref1 => {
return parse_u8(input.into()).map(|(rest, reference)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::UnitRef(UnitOffset(reference as u64)),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_ref2 => {
return parse_u16(input.into()).map(|(rest, reference)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::UnitRef(UnitOffset(reference as u64)),
};
(rest, attr)
});
}
constants::DW_FORM_ref4 => {
return parse_u32(input.into()).map(|(rest, reference)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::UnitRef(UnitOffset(reference as u64)),
};
(rest, attr)
});
}
constants::DW_FORM_ref8 => {
return parse_u64(input.into()).map(|(rest, reference)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::UnitRef(UnitOffset(reference)),
};
(rest, attr)
});
}
constants::DW_FORM_ref_udata => {
return parse_unsigned_leb(input.into()).map(|(rest, reference)| {
let attr = Attribute {
name: spec.name,
value: AttributeValue::UnitRef(UnitOffset(reference)),
};
(EndianBuf::new(rest), attr)
});
}
constants::DW_FORM_ref_addr => {
return match unit.format() {
Format::Dwarf32 => {
parse_u32(input.into()).map(|(rest, offset)| {
let offset = DebugInfoOffset(offset as u64);
let attr = Attribute {
name: spec.name,
value: AttributeValue::DebugInfoRef(offset),
};
(rest, attr)
})
}
Format::Dwarf64 => {
parse_u64(input.into()).map(|(rest, offset)| {
let offset = DebugInfoOffset(offset);
let attr = Attribute {
name: spec.name,
value: AttributeValue::DebugInfoRef(offset),
};
(rest, attr)
})
}
};
}
constants::DW_FORM_ref_sig8 => {
return parse_u64(input.into()).map(|(rest, offset)| {
let offset = DebugTypesOffset(offset);
let attr = Attribute {
name: spec.name,
value: AttributeValue::DebugTypesRef(offset),
};
(rest, attr)
});
}
constants::DW_FORM_string => {
let null_idx = input.iter().position(|ch| *ch == 0);
if let Some(idx) = null_idx {
let buf: &[u8] = input.into();
return Ok((input.range_from(idx + 1..),
Attribute {
name: spec.name,
value: AttributeValue::String(&buf[0..idx + 1]),
}));
} else {
return Err(Error::UnexpectedEof);
}
}
constants::DW_FORM_strp => {
return match unit.format() {
Format::Dwarf32 => {
parse_u32(input.into()).map(|(rest, offset)| {
let offset = DebugStrOffset(offset as u64);
let attr = Attribute {
name: spec.name,
value: AttributeValue::DebugStrRef(offset),
};
(rest, attr)
})
}
Format::Dwarf64 => {
parse_u64(input.into()).map(|(rest, offset)| {
let offset = DebugStrOffset(offset);
let attr = Attribute {
name: spec.name,
value: AttributeValue::DebugStrRef(offset),
};
(rest, attr)
})
}
};
}
_ => {
return Err(Error::UnknownForm);
}
};
}
}
#[cfg(test)]
fn test_parse_attribute_unit<Endian>(address_size: u8,
format: Format)
-> UnitHeader<'static, Endian>
where Endian: Endianity
{
UnitHeader::<Endian>::new(7,
4,
DebugAbbrevOffset(0x08070605),
address_size,
format,
&[])
}
#[cfg(test)]
fn test_parse_attribute_unit_default() -> UnitHeader<'static, LittleEndian> {
test_parse_attribute_unit(4, Format::Dwarf32)
}
#[cfg(test)]
fn test_parse_attribute<Endian>(buf: &[u8],
len: usize,
unit: &UnitHeader<Endian>,
form: constants::DwForm,
value: AttributeValue)
where Endian: Endianity
{
let spec = AttributeSpecification {
name: constants::DW_AT_low_pc,
form: form,
};
let expect = Attribute {
name: constants::DW_AT_low_pc,
value: value,
};
match parse_attribute(EndianBuf::new(buf), unit, spec) {
Ok((rest, attr)) => {
assert_eq!(attr, expect);
assert_eq!(rest, EndianBuf::new(&buf[len..]));
}
otherwise => {
println!("Unexpected parse result = {:#?}", otherwise);
assert!(false);
}
};
}
#[test]
fn test_parse_attribute_addr() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf32);
let form = constants::DW_FORM_addr;
let value = AttributeValue::Addr(&buf[..4]);
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_addr8() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
let unit = test_parse_attribute_unit::<LittleEndian>(8, Format::Dwarf32);
let form = constants::DW_FORM_addr;
let value = AttributeValue::Addr(&buf[..8]);
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_block1() {
// Length of data (3), three bytes of data, two bytes of left over input.
let buf = [0x03, 0x09, 0x09, 0x09, 0x00, 0x00];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_block1;
let value = AttributeValue::Block(&buf[1..4]);
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_block2() {
// Two byte length of data (2), two bytes of data, two bytes of left over input.
let buf = [0x02, 0x00, 0x09, 0x09, 0x00, 0x00];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_block2;
let value = AttributeValue::Block(&buf[2..4]);
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_block4() {
// Four byte length of data (2), two bytes of data, no left over input.
let buf = [0x02, 0x00, 0x00, 0x00, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_block4;
let value = AttributeValue::Block(&buf[4..]);
test_parse_attribute(&buf, 6, &unit, form, value);
}
#[test]
fn test_parse_attribute_block() {
// LEB length of data (2, one byte), two bytes of data, no left over input.
let buf = [0x02, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_block;
let value = AttributeValue::Block(&buf[1..]);
test_parse_attribute(&buf, 3, &unit, form, value);
}
#[test]
fn test_parse_attribute_data1() {
let buf = [0x03];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_data1;
let value = AttributeValue::Data(&buf[..]);
test_parse_attribute(&buf, 1, &unit, form, value);
}
#[test]
fn test_parse_attribute_data2() {
let buf = [0x02, 0x01, 0x0];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_data2;
let value = AttributeValue::Data(&buf[..2]);
test_parse_attribute(&buf, 2, &unit, form, value);
}
#[test]
fn test_parse_attribute_data4() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_data4;
let value = AttributeValue::Data(&buf[..4]);
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_data8() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_data8;
let value = AttributeValue::Data(&buf[..8]);
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_udata() {
let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let bytes_written = {
let mut writable = &mut buf[..];
leb128::write::unsigned(&mut writable, 4097).expect("should write ok")
};
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_udata;
let value = AttributeValue::Udata(4097);
test_parse_attribute(&buf, bytes_written, &unit, form, value);
}
#[test]
fn test_parse_attribute_sdata() {
let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let bytes_written = {
let mut writable = &mut buf[..];
leb128::write::signed(&mut writable, -4097).expect("should write ok")
};
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_sdata;
let value = AttributeValue::Sdata(-4097);
test_parse_attribute(&buf, bytes_written, &unit, form, value);
}
#[test]
fn test_parse_attribute_exprloc() {
// LEB length of data (2, one byte), two bytes of data, one byte left over input.
let buf = [0x02, 0x99, 0x99, 0x11];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_exprloc;
let value = AttributeValue::Exprloc(&buf[1..3]);
test_parse_attribute(&buf, 3, &unit, form, value);
}
#[test]
fn test_parse_attribute_flag_true() {
let buf = [0x42];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_flag;
let value = AttributeValue::Flag(true);
test_parse_attribute(&buf, 1, &unit, form, value);
}
#[test]
fn test_parse_attribute_flag_false() {
let buf = [0x00];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_flag;
let value = AttributeValue::Flag(false);
test_parse_attribute(&buf, 1, &unit, form, value);
}
#[test]
fn test_parse_attribute_flag_present() {
let buf = [0x01, 0x02, 0x03, 0x04];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_flag_present;
let value = AttributeValue::Flag(true);
// DW_FORM_flag_present does not consume any bytes of the input stream.
test_parse_attribute(&buf, 0, &unit, form, value);
}
#[test]
fn test_parse_attribute_sec_offset_32() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf32);
let form = constants::DW_FORM_sec_offset;
let value = AttributeValue::SecOffset(0x04030201);
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_sec_offset_64() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf64);
let form = constants::DW_FORM_sec_offset;
let value = AttributeValue::SecOffset(0x0807060504030201);
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_ref1() {
let buf = [0x03];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref1;
let value = AttributeValue::UnitRef(UnitOffset(3));
test_parse_attribute(&buf, 1, &unit, form, value);
}
#[test]
fn test_parse_attribute_ref2() {
let buf = [0x02, 0x01, 0x0];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref2;
let value = AttributeValue::UnitRef(UnitOffset(258));
test_parse_attribute(&buf, 2, &unit, form, value);
}
#[test]
fn test_parse_attribute_ref4() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref4;
let value = AttributeValue::UnitRef(UnitOffset(67305985));
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_ref8() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref8;
let value = AttributeValue::UnitRef(UnitOffset(578437695752307201));
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_refudata() {
let mut buf = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let bytes_written = {
let mut writable = &mut buf[..];
leb128::write::unsigned(&mut writable, 4097).expect("should write ok")
};
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref_udata;
let value = AttributeValue::UnitRef(UnitOffset(4097));
test_parse_attribute(&buf, bytes_written, &unit, form, value);
}
#[test]
fn test_parse_attribute_refaddr_32() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf32);
let form = constants::DW_FORM_ref_addr;
let value = AttributeValue::DebugInfoRef(DebugInfoOffset(67305985));
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_refaddr_64() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf64);
let form = constants::DW_FORM_ref_addr;
let value = AttributeValue::DebugInfoRef(DebugInfoOffset(578437695752307201));
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_refsig8() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_ref_sig8;
let value = AttributeValue::DebugTypesRef(DebugTypesOffset(578437695752307201));
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_string() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x0, 0x99, 0x99];
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_string;
let value = AttributeValue::String(&buf[..6]);
test_parse_attribute(&buf, 6, &unit, form, value);
}
#[test]
fn test_parse_attribute_strp_32() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf32);
let form = constants::DW_FORM_strp;
let value = AttributeValue::DebugStrRef(DebugStrOffset(67305985));
test_parse_attribute(&buf, 4, &unit, form, value);
}
#[test]
fn test_parse_attribute_strp_64() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x99, 0x99];
let unit = test_parse_attribute_unit::<LittleEndian>(4, Format::Dwarf64);
let form = constants::DW_FORM_strp;
let value = AttributeValue::DebugStrRef(DebugStrOffset(578437695752307201));
test_parse_attribute(&buf, 8, &unit, form, value);
}
#[test]
fn test_parse_attribute_indirect() {
let mut buf = [0; 100];
let bytes_written = {
let mut writable = &mut buf[..];
leb128::write::unsigned(&mut writable, constants::DW_FORM_udata.0)
.expect("should write udata") +
leb128::write::unsigned(&mut writable, 9999999).expect("should write value")
};
let unit = test_parse_attribute_unit_default();
let form = constants::DW_FORM_indirect;
let value = AttributeValue::Udata(9999999);
test_parse_attribute(&buf, bytes_written, &unit, form, value);
}
/// An iterator over a particular entry's attributes.
///
/// See [the documentation for
/// `DebuggingInformationEntry::attrs()`](./struct.DebuggingInformationEntry.html#method.attrs)
/// for details.
#[derive(Clone, Copy, Debug)]
pub struct AttrsIter<'input, 'abbrev, 'entry, 'unit, Endian>
where 'input: 'entry + 'unit,
'abbrev: 'entry,
'unit: 'entry,
Endian: Endianity + 'entry + 'unit
{
input: &'input [u8],
attributes: &'abbrev [AttributeSpecification],
entry: &'entry DebuggingInformationEntry<'input, 'abbrev, 'unit, Endian>,
}
impl<'input, 'abbrev, 'entry, 'unit, Endian> Iterator for AttrsIter<'input,
'abbrev,
'entry,
'unit,
Endian>
where Endian: Endianity
{
type Item = ParseResult<Attribute<'input>>;
fn next(&mut self) -> Option<Self::Item> {
if self.attributes.len() == 0 {
// Now that we have parsed all of the attributes, we know where
// either (1) this entry's children start, if the abbreviation says
// this entry has children; or (2) where this entry's siblings
// begin.
if let Some(end) = self.entry.after_attrs.get() {
debug_assert!(end == self.input);
} else {
self.entry.after_attrs.set(Some(self.input));
}
return None;
}
let attr = self.attributes[0];
self.attributes = &self.attributes[1..];
match parse_attribute(EndianBuf::new(self.input), self.entry.unit, attr) {
Ok((rest, attr)) => {
self.input = rest.into();
Some(Ok(attr))
}
Err(e) => {
self.attributes = &[];
Some(Err(e))
}
}
}
}
#[test]
fn test_attrs_iter() {
let unit = UnitHeader::<LittleEndian>::new(7,
4,
DebugAbbrevOffset(0x08070605),
4,
Format::Dwarf32,
&[]);
let abbrev = Abbreviation {
code: 42,
tag: constants::DW_TAG_subprogram,
has_children: constants::DW_CHILDREN_yes,
attributes: vec![
AttributeSpecification {
name: constants::DW_AT_name,
form: constants::DW_FORM_string,
},
AttributeSpecification {
name: constants::DW_AT_low_pc,
form: constants::DW_FORM_addr,
},
AttributeSpecification {
name: constants::DW_AT_high_pc,
form: constants::DW_FORM_addr,
},
],
};
// "foo", 42, 1337, 4 dangling bytes of 0xaa where children would be
let buf = [0x66, 0x6f, 0x6f, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x39, 0x05, 0x00, 0x00, 0xaa, 0xaa,
0xaa, 0xaa];
let entry = DebuggingInformationEntry {
attrs_slice: &buf,
after_attrs: Cell::new(None),
code: 1,
abbrev: &abbrev,
unit: &unit,
};
let mut attrs = AttrsIter {
input: &buf[..],
attributes: &abbrev.attributes[..],
entry: &entry,
};
match attrs.next() {
Some(Ok(attr)) => {
assert_eq!(attr,
Attribute {
name: constants::DW_AT_name,
value: AttributeValue::String(b"foo\0"),
});
}
otherwise => {
println!("Unexpected parse result = {:#?}", otherwise);
assert!(false);
}
}
assert!(entry.after_attrs.get().is_none());
match attrs.next() {
Some(Ok(attr)) => {
assert_eq!(attr,
Attribute {
name: constants::DW_AT_low_pc,
value: AttributeValue::Addr(&[0x2a, 0x00, 0x00, 0x00]),
});
}
otherwise => {
println!("Unexpected parse result = {:#?}", otherwise);
assert!(false);
}
}
assert!(entry.after_attrs.get().is_none());
match attrs.next() {
Some(Ok(attr)) => {
assert_eq!(attr,
Attribute {
name: constants::DW_AT_high_pc,
value: AttributeValue::Addr(&[0x39, 0x05, 0x00, 0x00]),
});
}
otherwise => {
println!("Unexpected parse result = {:#?}", otherwise);
assert!(false);
}
}
assert!(entry.after_attrs.get().is_none());
assert!(attrs.next().is_none());
assert!(entry.after_attrs.get().is_some());
assert_eq!(entry.after_attrs.get().expect("should have entry.after_attrs"),
&buf[buf.len() - 4..])
}
/// A cursor into the Debugging Information Entries tree for a compilation unit.
///
/// The `EntriesCursor` can traverse the DIE tree in either DFS order, or skip
/// to the next sibling of the entry the cursor is currently pointing to.
#[derive(Clone, Debug)]
pub struct EntriesCursor<'input, 'abbrev, 'unit, Endian>
where 'input: 'unit,
Endian: Endianity + 'unit
{
input: &'input [u8],
unit: &'unit UnitHeader<'input, Endian>,
abbreviations: &'abbrev Abbreviations,
cached_current: RefCell<Option<ParseResult<DebuggingInformationEntry<'input,
'abbrev,
'unit,
Endian>>>>,
}
impl<'input, 'abbrev, 'unit, Endian> EntriesCursor<'input, 'abbrev, 'unit, Endian>
where Endian: Endianity
{
/// Get the entry that the cursor is currently pointing to.
pub fn current<'me>
(&'me mut self)
-> Option<ParseResult<DebuggingInformationEntry<'input, 'abbrev, 'unit, Endian>>> {
// First, check for a cached result.
{
let cached = self.cached_current.borrow();
if let Some(ref cached) = *cached {
debug_assert!(cached.is_ok());
return Some(cached.clone());
}
}
if self.input.len() == 0 {
return None;
}
match parse_unsigned_leb(self.input) {
Err(e) => Some(Err(e)),
// Null abbreviation is the lack of an entry.
Ok((_, 0)) => None,
Ok((rest, code)) => {
if let Some(abbrev) = self.abbreviations.get(code) {
let result = Some(Ok(DebuggingInformationEntry {
attrs_slice: rest,
after_attrs: Cell::new(None),
code: code,
abbrev: abbrev,
unit: self.unit,
}));
let mut cached = self.cached_current.borrow_mut();
debug_assert!(cached.is_none());
mem::replace(&mut *cached, result.clone());
result
} else {
Some(Err(Error::UnknownAbbreviation))
}
}
}
}
/// Move the cursor to the next DIE in the tree in DFS order.
///
/// Upon successful movement of the cursor, return the delta traversal
/// depth:
///
/// * If we moved down into the previous current entry's children, we get
/// `Some(1)`.
///
/// * If we moved to the previous current entry's sibling, we get
/// `Some(0)`.
///
/// * If the previous entry does not have any siblings and we move up to
/// its parent's next sibling, then we get `Some(-1)`. Note that if the
/// parent doesn't have a next sibling, then it could go up to the
/// parent's parent's next sibling and return `Some(-2)`, etc.
///
/// If there is no next entry, then `None` is returned.
///
/// Here is an example that finds the first entry in a compilation unit that
/// does not have any children.
///
/// ```
/// # use gimli::{UnitHeader, DebugAbbrev, DebugInfo, LittleEndian};
/// # let info_buf = [
/// # // Comilation unit header
/// #
/// # // 32-bit unit length = 25
/// # 0x19, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// # ];
/// # let debug_info = DebugInfo::<LittleEndian>::new(&info_buf);
/// #
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_yes
/// # 0x01,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// # let debug_abbrev = DebugAbbrev::<LittleEndian>::new(&abbrev_buf);
/// #
/// # let get_some_unit = || debug_info.units().next().unwrap().unwrap();
///
/// let unit = get_some_unit();
/// # let get_abbrevs_for_unit = |_| unit.abbreviations(debug_abbrev).unwrap();
/// let abbrevs = get_abbrevs_for_unit(&unit);
///
/// let mut first_entry_with_no_children = None;
/// let mut cursor = unit.entries(&abbrevs);
///
/// // Keep looping while the cursor is moving deeper into the DIE tree.
/// while let Some(delta_depth) = cursor.next_dfs() {
/// // 0 means we moved to a sibling, a negative number means we went back
/// // up to a parent's sibling. In either case, bail out of the loop because
/// // we aren't going deeper into the tree anymore.
/// if delta_depth <= 0 {
/// break;
/// }
///
/// let current = cursor.current()
/// .expect("Should be at an entry")
/// .expect("And we should parse the entry ok");
/// first_entry_with_no_children = Some(current);
/// }
///
/// println!("The first entry with no children is {:?}",
/// first_entry_with_no_children.unwrap());
/// ```
pub fn next_dfs(&mut self) -> Option<isize> {
match self.current() {
Some(Ok(current)) => {
self.input = if let Some(after_attrs) = current.after_attrs.get() {
after_attrs
} else {
for _ in current.attrs() {
}
current.after_attrs
.get()
.expect("should have after_attrs after iterating attrs")
};
let mut delta_depth = if current.abbrev.has_children() {
1
} else {
0
};
// Keep eating null entries that mark the end of an entry's
// children.
while self.input.len() > 0 && self.input[0] == 0 {
delta_depth -= 1;
self.input = &self.input[1..];
}
let mut cached_current = self.cached_current.borrow_mut();
mem::replace(&mut *cached_current, None);
if self.input.len() > 0 {
Some(delta_depth)
} else {
None
}
}
_ => None,
}
}
/// Move the cursor to the next sibling DIE of the current one.
///
/// Returns `Some` when the cursor the cursor has been moved to the next
/// sibling, `None` when there is no next sibling.
///
/// After returning `None`, the cursor is exhausted.
///
/// Here is an example that iterates over all of the direct children of the
/// root entry:
///
/// ```
/// # use gimli::{DebugAbbrev, DebugInfo, LittleEndian};
/// # let info_buf = [
/// # // Comilation unit header
/// #
/// # // 32-bit unit length = 25
/// # 0x19, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// # ];
/// # let debug_info = DebugInfo::<LittleEndian>::new(&info_buf);
/// #
/// # let get_some_unit = || debug_info.units().next().unwrap().unwrap();
///
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_yes
/// # 0x01,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// # let debug_abbrev = DebugAbbrev::<LittleEndian>::new(&abbrev_buf);
/// #
/// let unit = get_some_unit();
/// # let get_abbrevs_for_unit = |_| unit.abbreviations(debug_abbrev).unwrap();
/// let abbrevs = get_abbrevs_for_unit(&unit);
///
/// let mut cursor = unit.entries(&abbrevs);
///
/// // Move the cursor to the root's first child.
/// assert_eq!(cursor.next_dfs().unwrap(), 1);
///
/// // Iterate the root's children.
/// loop {
/// let current = cursor.current()
/// .expect("Should be at an entry")
/// .expect("And we should parse the entry ok");
///
/// println!("{:?} is a child of the root", current);
///
/// if cursor.next_sibling().is_none() {
/// break;
/// }
/// }
/// ```
pub fn next_sibling(&mut self) -> Option<()> {
match self.current() {
Some(Ok(current)) => {
let sibling_ptr = current.attr_value(constants::DW_AT_sibling);
if let Some(AttributeValue::UnitRef(offset)) = sibling_ptr {
if self.unit.is_valid_offset(offset) {
// Fast path: this entry has a DW_AT_sibling
// attribute pointing to its sibling.
self.input = &self.unit.range_from(offset..);
if self.input.len() > 0 && self.input[0] != 0 {
return Some(());
} else {
self.input = &[];
return None;
}
}
}
// Slow path: either the entry doesn't have a sibling pointer,
// or the pointer is bogus. Do a DFS until we get to the next
// sibling.
let mut depth = 0;
while let Some(delta_depth) = self.next_dfs() {
depth += delta_depth;
if depth == 0 && self.input[0] != 0 {
// We found the next sibling.
return Some(());
}
if depth < 0 {
// We moved up to the original entry's parent's (or
// parent's parent's, etc ...) siblings.
self.input = &[];
return None;
}
}
// No sibling found.
self.input = &[];
None
}
_ => {
self.input = &[];
None
}
}
}
}
/// Parse a type unit header's unique type signature. Callers should handle
/// unique-ness checking.
fn parse_type_signature<'input, Endian>(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, u64)>
where Endian: Endianity
{
parse_u64(input)
}
#[test]
fn test_parse_type_signature_ok() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
match parse_type_signature(EndianBuf::<LittleEndian>::new(&buf)) {
Ok((_, val)) => assert_eq!(val, 0x0807060504030201),
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
fn test_parse_type_signature_incomplete() {
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07];
match parse_type_signature(EndianBuf::<LittleEndian>::new(&buf)) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
/// Parse a type unit header's type offset.
fn parse_type_offset<'input, Endian>
(input: EndianBuf<'input, Endian>,
format: Format)
-> ParseResult<(EndianBuf<'input, Endian>, DebugTypesOffset)>
where Endian: Endianity
{
let result = match format {
Format::Dwarf32 => parse_u32_as_u64(input),
Format::Dwarf64 => parse_u64(input),
};
result.map(|(rest, offset)| (rest, DebugTypesOffset(offset)))
}
#[test]
fn test_parse_type_offset_32_ok() {
let buf = [0x12, 0x34, 0x56, 0x78, 0x00];
match parse_type_offset(EndianBuf::<LittleEndian>::new(&buf), Format::Dwarf32) {
Ok((rest, offset)) => {
assert_eq!(rest.len(), 1);
assert_eq!(DebugTypesOffset(0x78563412), offset);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
fn test_parse_type_offset_64_ok() {
let buf = [0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xff, 0x00];
match parse_type_offset(EndianBuf::<LittleEndian>::new(&buf), Format::Dwarf64) {
Ok((rest, offset)) => {
assert_eq!(rest.len(), 1);
assert_eq!(DebugTypesOffset(0xffdebc9a78563412), offset);
}
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
#[test]
fn test_parse_type_offset_incomplete() {
// Need at least 4 bytes.
let buf = [0xff, 0xff, 0xff];
match parse_type_offset(EndianBuf::<LittleEndian>::new(&buf), Format::Dwarf32) {
Err(Error::UnexpectedEof) => assert!(true),
otherwise => panic!("Unexpected result: {:?}", otherwise),
};
}
/// The `DebugTypes` struct represents the DWARF type information
/// found in the `.debug_types` section.
#[derive(Debug, Clone, Copy)]
pub struct DebugTypes<'input, Endian>
where Endian: Endianity
{
debug_types_section: EndianBuf<'input, Endian>,
}
impl<'input, Endian> DebugTypes<'input, Endian>
where Endian: Endianity
{
/// Construct a new `DebugTypes` instance from the data in the `.debug_types`
/// section.
///
/// It is the caller's responsibility to read the `.debug_types` section and
/// present it as a `&[u8]` slice. That means using some ELF loader on
/// Linux, a Mach-O loader on OSX, etc.
///
/// ```
/// use gimli::{DebugTypes, LittleEndian};
///
/// # let buf = [0x00, 0x01, 0x02, 0x03];
/// # let read_debug_types_section_somehow = || &buf;
/// let debug_types = DebugTypes::<LittleEndian>::new(read_debug_types_section_somehow());
/// ```
pub fn new(debug_types_section: &'input [u8]) -> DebugTypes<'input, Endian> {
DebugTypes { debug_types_section: EndianBuf(debug_types_section, PhantomData) }
}
/// Iterate the type-units in this `.debug_types` section.
///
/// ```
/// use gimli::{DebugTypes, LittleEndian};
///
/// # let buf = [];
/// # let read_debug_types_section_somehow = || &buf;
/// let debug_types = DebugTypes::<LittleEndian>::new(read_debug_types_section_somehow());
///
/// for parse_result in debug_types.units() {
/// let unit = parse_result.unwrap();
/// println!("unit's length is {}", unit.unit_length());
/// }
/// ```
pub fn units(&self) -> TypeUnitHeadersIter<'input, Endian> {
TypeUnitHeadersIter { input: self.debug_types_section }
}
}
/// An iterator over the type-units of this `.debug_types` section.
///
/// See the [documentation on
/// `DebugTypes::units`](./struct.DebugTypes.html#method.units) for
/// more detail.
pub struct TypeUnitHeadersIter<'input, Endian>
where Endian: Endianity
{
input: EndianBuf<'input, Endian>,
}
impl<'input, Endian> Iterator for TypeUnitHeadersIter<'input, Endian>
where Endian: Endianity
{
type Item = ParseResult<TypeUnitHeader<'input, Endian>>;
fn next(&mut self) -> Option<Self::Item> {
if self.input.is_empty() {
None
} else {
match parse_type_unit_header(self.input) {
Ok((_, header)) => {
let unit_len = header.length_including_self() as usize;
if self.input.len() < unit_len {
self.input = self.input.range_to(..0);
} else {
self.input = self.input.range_from(unit_len..);
}
Some(Ok(header))
}
Err(e) => {
self.input = self.input.range_to(..0);
Some(Err(e))
}
}
}
}
}
/// The header of a type unit's debugging information.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct TypeUnitHeader<'input, Endian>
where Endian: Endianity
{
header: UnitHeader<'input, Endian>,
type_signature: u64,
type_offset: DebugTypesOffset,
}
impl<'input, Endian> TypeUnitHeader<'input, Endian>
where Endian: Endianity
{
/// Construct a new `TypeUnitHeader`.
fn new(mut header: UnitHeader<'input, Endian>,
type_signature: u64,
type_offset: DebugTypesOffset)
-> TypeUnitHeader<'input, Endian> {
// First, fix up the header's entries_buf. Currently it points
// right after end of the header, but since this is a type
// unit header, there are two more fields before entries
// begin to account for.
let additional = Self::additional_header_size(header.format);
header.entries_buf = header.entries_buf.range_from(additional..);
TypeUnitHeader {
header: header,
type_signature: type_signature,
type_offset: type_offset,
}
}
/// Get the length of the debugging info for this type-unit.
pub fn unit_length(&self) -> u64 {
self.header.unit_length
}
fn additional_header_size(format: Format) -> usize {
// There are two additional fields in a type-unit compared to
// compilation- and partial-units. The type_signature is
// always 64 bits regardless of format, the type_offset is 32
// or 64 bits depending on the format.
let type_signature_size = 8;
let type_offset_size = match format {
Format::Dwarf32 => 4,
Format::Dwarf64 => 8,
};
type_signature_size + type_offset_size
}
/// Get the length of the debugging info for this type-unit,
/// uncluding the byte length of the encoded length itself.
pub fn length_including_self(&self) -> u64 {
self.header.length_including_self() +
Self::additional_header_size(self.header.format) as u64
}
/// Get the DWARF version of the debugging info for this type-unit.
pub fn version(&self) -> u16 {
self.header.version
}
/// The offset into the `.debug_abbrev` section for this type-unit's
/// debugging information entries.
pub fn debug_abbrev_offset(&self) -> DebugAbbrevOffset {
self.header.debug_abbrev_offset
}
/// The size of addresses (in bytes) in this type-unit.
pub fn address_size(&self) -> u8 {
self.header.address_size
}
/// Get the unique type signature for this type unit.
pub fn type_signature(&self) -> u64 {
self.type_signature
}
/// Get the offset within this type unit where the type is defined.
pub fn type_offset(&self) -> DebugTypesOffset {
self.type_offset
}
/// Navigate this type unit's `DebuggingInformationEntry`s.
pub fn entries<'me, 'abbrev>(&'me self,
abbreviations: &'abbrev Abbreviations)
-> EntriesCursor<'input, 'abbrev, 'me, Endian> {
EntriesCursor {
unit: &self.header,
input: self.header.entries_buf.into(),
abbreviations: abbreviations,
cached_current: RefCell::new(None),
}
}
/// Parse this type unit's abbreviations.
///
/// ```
/// use gimli::DebugAbbrev;
/// # use gimli::{DebugTypes, LittleEndian};
/// # let types_buf = [
/// # // Type unit header
/// #
/// # // 32-bit unit length = 37
/// # 0x25, 0x00, 0x00, 0x00,
/// # // Version 4
/// # 0x04, 0x00,
/// # // debug_abbrev_offset
/// # 0x00, 0x00, 0x00, 0x00,
/// # // Address size
/// # 0x04,
/// # // Type signature
/// # 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
/// # // Type offset
/// # 0x01, 0x02, 0x03, 0x04,
/// #
/// # // DIEs
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // Abbreviation code
/// # 0x01,
/// # // Attribute of form DW_FORM_string = "foo\0"
/// # 0x66, 0x6f, 0x6f, 0x00,
/// #
/// # // Children
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// #
/// # // End of children
/// # 0x00,
/// # ];
/// # let debug_types = DebugTypes::<LittleEndian>::new(&types_buf);
/// #
/// # let abbrev_buf = [
/// # // Code
/// # 0x01,
/// # // DW_TAG_subprogram
/// # 0x2e,
/// # // DW_CHILDREN_yes
/// # 0x01,
/// # // Begin attributes
/// # // Attribute name = DW_AT_name
/// # 0x03,
/// # // Attribute form = DW_FORM_string
/// # 0x08,
/// # // End attributes
/// # 0x00,
/// # 0x00,
/// # // Null terminator
/// # 0x00
/// # ];
/// #
/// # let get_some_type_unit = || debug_types.units().next().unwrap().unwrap();
///
/// let unit = get_some_type_unit();
///
/// # let read_debug_abbrev_section_somehow = || &abbrev_buf;
/// let debug_abbrev = DebugAbbrev::<LittleEndian>::new(read_debug_abbrev_section_somehow());
/// let abbrevs_for_unit = unit.abbreviations(debug_abbrev).unwrap();
/// ```
pub fn abbreviations<'abbrev>(&self,
debug_abbrev: DebugAbbrev<'abbrev, Endian>)
-> ParseResult<Abbreviations> {
let offset = self.debug_abbrev_offset().0 as usize;
parse_abbreviations(&debug_abbrev.debug_abbrev_section.0[offset..])
.map(|(_, abbrevs)| abbrevs)
}
}
/// Parse a type unit header.
fn parse_type_unit_header<'input, Endian>
(input: EndianBuf<'input, Endian>)
-> ParseResult<(EndianBuf<'input, Endian>, TypeUnitHeader<'input, Endian>)>
where Endian: Endianity
{
let (rest, header) = try!(parse_unit_header(input));
let (rest, signature) = try!(parse_type_signature(rest));
let (rest, offset) = try!(parse_type_offset(rest, header.format()));
Ok((rest, TypeUnitHeader::new(header, signature, offset)))
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_parse_type_unit_header_64_ok() {
let buf = [
// Enable 64-bit unit length mode.
0xff, 0xff, 0xff, 0xff,
// The actual unit length (27).
0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// Version 4
0x04, 0x00,
// debug_abbrev_offset
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
// Address size
0x08,
// Type signature
0xef, 0xbe, 0xad, 0xde, 0xef, 0xbe, 0xad, 0xde,
// type offset
0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78
];
let result = parse_type_unit_header(EndianBuf::<LittleEndian>::new(&buf));
match result {
Ok((_, header)) => {
assert_eq!(header,
TypeUnitHeader::new(UnitHeader::new(27,
4,
DebugAbbrevOffset(0x0807060504030201),
8,
Format::Dwarf64,
&buf[buf.len() - 16..]),
0xdeadbeefdeadbeef,
DebugTypesOffset(0x7856341278563412)))
},
otherwise => panic!("Unexpected result: {:?}", otherwise),
}
}
|
// rustfmt doesn't do a very good job on nom parser invocations.
#![rustfmt_skip]
#![config_attr(rustfmt, rustfmt_skip)]
use nom::{self, IResult};
use std::str;
use types::*;
fn crlf(c: u8) -> bool {
c == b'\r' || c == b'\n'
}
fn list_wildcards(c: u8) -> bool {
c == b'%' || c == b'*'
}
fn quoted_specials(c: u8) -> bool {
c == b'"' || c == b'\\'
}
fn resp_specials(c: u8) -> bool {
c == b']'
}
fn atom_specials(c: u8) -> bool {
c == b'(' || c == b')' || c == b'{' || c == b' ' || c < 32 || list_wildcards(c)
|| quoted_specials(c) || resp_specials(c)
}
fn atom_char(c: u8) -> bool {
!atom_specials(c)
}
fn astring_char(c: u8) -> bool {
atom_char(c) || resp_specials(c)
}
fn tag_char(c: u8) -> bool {
c != b'+' && astring_char(c)
}
// Ideally this should use nom's `escaped` macro, but it suffers from broken
// type inference unless compiled with the verbose-errors feature enabled.
fn quoted_data(i: &[u8]) -> IResult<&[u8], &[u8]> {
let mut escape = false;
let mut len = 0;
for c in i {
if *c == b'"' && !escape {
break;
}
len += 1;
if *c == b'\\' && !escape {
escape = true
} else if escape {
escape = false;
}
}
IResult::Done(&i[len..], &i[..len])
}
named!(quoted<&[u8]>, do_parse!(
tag_s!("\"") >>
data: quoted_data >>
tag_s!("\"") >>
(data)
));
named!(literal<&[u8]>, do_parse!(
tag_s!("{") >>
len: number >>
tag_s!("}") >>
tag_s!("\r\n") >>
data: take!(len) >>
(data)
));
named!(string<&[u8]>, alt!(quoted | literal));
named!(status_ok<Status>, map!(tag_no_case!("OK"),
|s| Status::Ok
));
named!(status_no<Status>, map!(tag_no_case!("NO"),
|s| Status::No
));
named!(status_bad<Status>, map!(tag_no_case!("BAD"),
|s| Status::Bad
));
named!(status_preauth<Status>, map!(tag_no_case!("PREAUTH"),
|s| Status::PreAuth
));
named!(status_bye<Status>, map!(tag_no_case!("BYE"),
|s| Status::Bye
));
named!(status<Status>, alt!(
status_ok |
status_no |
status_bad |
status_preauth |
status_bye
));
named!(number<u32>, map_res!(
map_res!(nom::digit, str::from_utf8),
str::parse
));
named!(number_64<u64>, map_res!(
map_res!(nom::digit, str::from_utf8),
str::parse
));
named!(text<&str>, map!(take_till_s!(crlf),
|s| str::from_utf8(s).unwrap()
));
named!(atom<&str>, map!(take_while1_s!(atom_char),
|s| str::from_utf8(s).unwrap()
));
named!(astring<&[u8]>, alt!(
take_while1_s!(astring_char) |
string
));
named!(mailbox<&str>, alt!(
map!(tag_s!("INBOX"), |s| "INBOX") |
map!(astring, |s| str::from_utf8(s).unwrap())
));
named!(flag_extension<&str>, map_res!(
recognize!(pair!(tag!("\\"), take_while!(atom_char))),
str::from_utf8
));
named!(flag<&str>, alt!(flag_extension | atom));
named!(flag_list<Vec<&str>>, do_parse!(
tag_s!("(") >>
elements: opt!(do_parse!(
flag0: flag >>
flags: many0!(do_parse!(
tag_s!(" ") >>
flag: flag >>
(flag)
)) >> ({
let mut res = vec![flag0];
res.extend(flags);
res
})
)) >>
tag_s!(")") >> ({
if elements.is_some() {
elements.unwrap()
} else {
Vec::new()
}
})
));
named!(flag_perm<&str>, alt!(
map!(tag_s!("\\*"), |s| str::from_utf8(s).unwrap()) |
flag
));
named!(section_part<Vec<u32>>, do_parse!(
part: number >>
rest: many0!(do_parse!(
tag_s!(".") >>
part: number >>
(part)
)) >> ({
let mut res = vec![part];
res.extend(rest);
res
})
));
named!(section_msgtext<MessageSection>, map!(
alt!(tag_s!("HEADER") | tag_s!("TEXT")),
|s| match s {
b"HEADER" => MessageSection::Header,
b"TEXT" => MessageSection::Text,
_ => panic!("cannot happen"),
}
));
named!(section_text<MessageSection>, alt!(
section_msgtext |
do_parse!(tag_s!("MIME") >> (MessageSection::Mime))
));
named!(section_spec<SectionPath>, alt!(
map!(section_msgtext, |val| SectionPath::Full(val)) |
do_parse!(
part: section_part >>
text: opt!(do_parse!(
tag_s!(".") >>
text: section_text >>
(text)
)) >>
(SectionPath::Part(part, text))
)
));
named!(section<Option<SectionPath>>, do_parse!(
tag_s!("[") >>
spec: opt!(section_spec) >>
tag_s!("]") >>
(spec)
));
named!(resp_text_code_permanent_flags<ResponseCode>, do_parse!(
tag_s!("PERMANENTFLAGS (") >>
elements: opt!(do_parse!(
flag0: flag_perm >>
flags: many0!(do_parse!(
tag_s!(" ") >>
flag: flag_perm >>
(flag)
)) >> ({
let mut res = vec![flag0];
res.extend(flags);
res
})
)) >>
tag_s!(")") >> ({
ResponseCode::PermanentFlags(if elements.is_some() {
elements.unwrap()
} else {
Vec::new()
})
})
));
named!(resp_text_code_highest_mod_seq<ResponseCode>, do_parse!(
tag_s!("HIGHESTMODSEQ ") >>
num: number_64 >>
(ResponseCode::HighestModSeq(num))
));
named!(resp_text_code_read_only<ResponseCode>, do_parse!(
tag_s!("READ-ONLY") >>
(ResponseCode::ReadOnly)
));
named!(resp_text_code_read_write<ResponseCode>, do_parse!(
tag_s!("READ-WRITE") >>
(ResponseCode::ReadWrite)
));
named!(resp_text_code_try_create<ResponseCode>, do_parse!(
tag_s!("TRYCREATE") >>
(ResponseCode::TryCreate)
));
named!(resp_text_code_uid_validity<ResponseCode>, do_parse!(
tag_s!("UIDVALIDITY ") >>
num: number >>
(ResponseCode::UidValidity(num))
));
named!(resp_text_code_uid_next<ResponseCode>, do_parse!(
tag_s!("UIDNEXT ") >>
num: number >>
(ResponseCode::UidNext(num))
));
named!(resp_text_code_unseen<ResponseCode>, do_parse!(
tag_s!("UNSEEN ") >>
num: number >>
(ResponseCode::Unseen(num))
));
named!(resp_text_code<ResponseCode>, do_parse!(
tag_s!("[") >>
coded: alt!(
resp_text_code_permanent_flags |
resp_text_code_uid_validity |
resp_text_code_uid_next |
resp_text_code_unseen |
resp_text_code_read_only |
resp_text_code_read_write |
resp_text_code_try_create |
resp_text_code_highest_mod_seq
) >>
// Per the spec, the closing tag should be "] ".
// See `resp_text` for more on why this is done differently.
tag_s!("]") >>
(coded)
));
named!(capability<&str>, do_parse!(
tag_s!(" ") >>
atom: take_till1_s!(atom_specials) >>
(str::from_utf8(atom).unwrap())
));
named!(capability_data<Response>, do_parse!(
tag_s!("CAPABILITY") >>
capabilities: many1!(capability) >>
(Response::Capabilities(capabilities))
));
named!(mailbox_data_flags<Response>, do_parse!(
tag_s!("FLAGS ") >>
flags: flag_list >>
(Response::MailboxData(MailboxDatum::Flags(flags)))
));
named!(mailbox_data_exists<Response>, do_parse!(
num: number >>
tag_s!(" EXISTS") >>
(Response::MailboxData(MailboxDatum::Exists(num)))
));
named!(mailbox_data_list<Response>, do_parse!(
tag_s!("LIST ") >>
flags: flag_list >>
tag_s!(" ") >>
path: quoted >>
tag_s!(" ") >>
name: mailbox >>
(Response::MailboxData(MailboxDatum::List {
flags,
delimiter: str::from_utf8(path).unwrap(),
name
}))
));
named!(mailbox_data_lsub<Response>, do_parse!(
tag_s!("LSUB ") >>
flags: flag_list >>
tag_s!(" ") >>
path: quoted >>
tag_s!(" ") >>
name: mailbox >>
(Response::MailboxData(MailboxDatum::SubList {
flags,
delimiter: str::from_utf8(path).unwrap(),
name
}))
));
named!(mailbox_data_recent<Response>, do_parse!(
num: number >>
tag_s!(" RECENT") >>
(Response::MailboxData(MailboxDatum::Recent(num)))
));
named!(mailbox_data<Response>, alt!(
mailbox_data_flags |
mailbox_data_exists |
mailbox_data_list |
mailbox_data_lsub |
mailbox_data_recent
));
named!(nstring<Option<&[u8]>>, map!(
alt!(tag_s!("NIL") | string),
|s| if s == b"NIL" { None } else { Some(s) }
));
named!(address<Address>, do_parse!(
tag_s!("(") >>
name: nstring >>
tag_s!(" ") >>
adl: nstring >>
tag_s!(" ") >>
mailbox: nstring >>
tag_s!(" ") >>
host: nstring >>
tag_s!(")") >>
(Address {
name: name.map(|s| str::from_utf8(s).unwrap()),
adl: adl.map(|s| str::from_utf8(s).unwrap()),
mailbox: mailbox.map(|s| str::from_utf8(s).unwrap()),
host: host.map(|s| str::from_utf8(s).unwrap()),
})
));
named!(opt_addresses<Option<Vec<Address>>>, alt!(
map!(tag_s!("NIL"), |s| None) |
do_parse!(
tag_s!("(") >>
addrs: many1!(address) >>
tag_s!(")") >>
(Some(addrs))
)
));
named!(msg_att_body_section<AttributeValue>, do_parse!(
tag_s!("BODY") >>
section: section >>
index: opt!(do_parse!(
tag_s!("<") >>
num: number >>
tag_s!(">") >>
(num)
)) >>
tag_s!(" ") >>
data: nstring >>
(AttributeValue::BodySection { section, index, data })
));
named!(msg_att_envelope<AttributeValue>, do_parse!(
tag_s!("ENVELOPE (") >>
date: nstring >>
tag_s!(" ") >>
subject: nstring >>
tag_s!(" ") >>
from: opt_addresses >>
tag_s!(" ") >>
sender: opt_addresses >>
tag_s!(" ") >>
reply_to: opt_addresses >>
tag_s!(" ") >>
to: opt_addresses >>
tag_s!(" ") >>
cc: opt_addresses >>
tag_s!(" ") >>
bcc: opt_addresses >>
tag_s!(" ") >>
in_reply_to: nstring >>
tag_s!(" ") >>
message_id: nstring >>
tag_s!(")") >> ({
AttributeValue::Envelope(Envelope {
date: date.map(|s| str::from_utf8(s).unwrap()),
subject: subject.map(|s| str::from_utf8(s).unwrap()),
from,
sender,
reply_to,
to,
cc,
bcc,
in_reply_to: in_reply_to.map(|s| str::from_utf8(s).unwrap()),
message_id: message_id.map(|s| str::from_utf8(s).unwrap()),
})
})
));
named!(msg_att_internal_date<AttributeValue>, do_parse!(
tag_s!("INTERNALDATE ") >>
date: nstring >>
(AttributeValue::InternalDate(str::from_utf8(date.unwrap()).unwrap()))
));
named!(msg_att_flags<AttributeValue>, do_parse!(
tag_s!("FLAGS ") >>
flags: flag_list >>
(AttributeValue::Flags(flags))
));
named!(msg_att_rfc822<AttributeValue>, do_parse!(
tag_s!("RFC822 ") >>
raw: nstring >>
(AttributeValue::Rfc822(raw))
));
named!(msg_att_rfc822_size<AttributeValue>, do_parse!(
tag_s!("RFC822.SIZE ") >>
num: number >>
(AttributeValue::Rfc822Size(num))
));
named!(msg_att_mod_seq<AttributeValue>, do_parse!(
tag_s!("MODSEQ (") >>
num: number_64 >>
tag_s!(")") >>
(AttributeValue::ModSeq(num))
));
named!(msg_att_uid<AttributeValue>, do_parse!(
tag_s!("UID ") >>
num: number >>
(AttributeValue::Uid(num))
));
named!(msg_att<AttributeValue>, alt!(
msg_att_body_section |
msg_att_envelope |
msg_att_internal_date |
msg_att_flags |
msg_att_mod_seq |
msg_att_rfc822 |
msg_att_rfc822_size |
msg_att_uid
));
named!(msg_att_list<Vec<AttributeValue>>, do_parse!(
tag_s!("(") >>
elements: do_parse!(
attr0: msg_att >>
attrs: many0!(do_parse!(
tag_s!(" ") >>
attr: msg_att >>
(attr)
)) >> ({
let mut res = vec![attr0];
res.extend(attrs);
res
})
) >>
tag_s!(")") >>
(elements)
));
named!(message_data_fetch<Response>, do_parse!(
num: number >>
tag_s!(" FETCH ") >>
attrs: msg_att_list >>
(Response::Fetch(num, attrs))
));
named!(message_data_expunge<Response>, do_parse!(
num: number >>
tag_s!(" EXPUNGE") >>
(Response::Expunge(num))
));
named!(tag<RequestId>, map!(take_while1_s!(tag_char),
|s| RequestId(str::from_utf8(s).unwrap().to_string())
));
// This is not quite according to spec, which mandates the following:
// ["[" resp-text-code "]" SP] text
// However, examples in RFC 4551 (Conditional STORE) counteract this by giving
// examples of `resp-text` that do not include the trailing space and text.
named!(resp_text<(Option<ResponseCode>, Option<&str>)>, do_parse!(
code: opt!(resp_text_code) >>
text: text >>
({
let res = if text.len() < 1 {
None
} else if code.is_some() {
Some(&text[1..])
} else {
Some(text)
};
(code, res)
})
));
named!(response_tagged<Response>, do_parse!(
tag: tag >>
tag_s!(" ") >>
status: status >>
tag_s!(" ") >>
text: resp_text >>
tag_s!("\r\n") >>
(Response::Done {
tag,
status,
code: text.0,
information: text.1,
})
));
named!(resp_cond<Response>, do_parse!(
status: status >>
tag_s!(" ") >>
text: resp_text >>
(Response::Data {
status,
code: text.0,
information: text.1,
})
));
named!(response_data<Response>, do_parse!(
tag_s!("* ") >>
contents: alt!(
resp_cond |
mailbox_data |
message_data_expunge |
message_data_fetch |
capability_data
) >>
tag_s!("\r\n") >>
(contents)
));
named!(response<Response>, alt!(
response_data |
response_tagged
));
pub type ParseResult<'a> = IResult<&'a [u8], Response<'a>>;
pub fn parse_response(msg: &[u8]) -> ParseResult {
response(msg)
}
#[cfg(test)]
mod tests {
use types::*;
use super::{parse_response, IResult};
#[test]
fn test_number_overflow() {
match parse_response(b"* 2222222222222222222222222222222222222222222C\r\n") {
IResult::Error(_) => {},
_ => panic!("error required for integer overflow"),
}
}
#[test]
fn test_unseen() {
match parse_response(b"* OK [UNSEEN 3] Message 3 is first unseen\r\n").unwrap() {
(_, Response::Data {
status: Status::Ok,
code: Some(ResponseCode::Unseen(3)),
information: Some("Message 3 is first unseen"),
}) => {},
rsp @ _ => panic!("unexpected response {:?}", rsp),
}
}
#[test]
fn test_body_text() {
match parse_response(b"* 2 FETCH (BODY[TEXT] {3}\r\nfoo)\r\n") {
IResult::Done(_, Response::Fetch(_, attrs)) => {
let body = &attrs[0];
assert_eq!(body, &AttributeValue::BodySection {
section: Some(SectionPath::Full(MessageSection::Text)),
index: None,
data: Some(b"foo"),
}, "body = {:?}", body);
},
rsp @ _ => panic!("unexpected response {:?}", rsp),
}
}
}
Disabling rustfmt locally doesn't work in stable Rust
// rustfmt doesn't do a very good job on nom parser invocations.
//#![rustfmt_skip]
//#![config_attr(rustfmt, rustfmt_skip)]
use nom::{self, IResult};
use std::str;
use types::*;
fn crlf(c: u8) -> bool {
c == b'\r' || c == b'\n'
}
fn list_wildcards(c: u8) -> bool {
c == b'%' || c == b'*'
}
fn quoted_specials(c: u8) -> bool {
c == b'"' || c == b'\\'
}
fn resp_specials(c: u8) -> bool {
c == b']'
}
fn atom_specials(c: u8) -> bool {
c == b'(' || c == b')' || c == b'{' || c == b' ' || c < 32 || list_wildcards(c)
|| quoted_specials(c) || resp_specials(c)
}
fn atom_char(c: u8) -> bool {
!atom_specials(c)
}
fn astring_char(c: u8) -> bool {
atom_char(c) || resp_specials(c)
}
fn tag_char(c: u8) -> bool {
c != b'+' && astring_char(c)
}
// Ideally this should use nom's `escaped` macro, but it suffers from broken
// type inference unless compiled with the verbose-errors feature enabled.
fn quoted_data(i: &[u8]) -> IResult<&[u8], &[u8]> {
let mut escape = false;
let mut len = 0;
for c in i {
if *c == b'"' && !escape {
break;
}
len += 1;
if *c == b'\\' && !escape {
escape = true
} else if escape {
escape = false;
}
}
IResult::Done(&i[len..], &i[..len])
}
named!(quoted<&[u8]>, do_parse!(
tag_s!("\"") >>
data: quoted_data >>
tag_s!("\"") >>
(data)
));
named!(literal<&[u8]>, do_parse!(
tag_s!("{") >>
len: number >>
tag_s!("}") >>
tag_s!("\r\n") >>
data: take!(len) >>
(data)
));
named!(string<&[u8]>, alt!(quoted | literal));
named!(status_ok<Status>, map!(tag_no_case!("OK"),
|s| Status::Ok
));
named!(status_no<Status>, map!(tag_no_case!("NO"),
|s| Status::No
));
named!(status_bad<Status>, map!(tag_no_case!("BAD"),
|s| Status::Bad
));
named!(status_preauth<Status>, map!(tag_no_case!("PREAUTH"),
|s| Status::PreAuth
));
named!(status_bye<Status>, map!(tag_no_case!("BYE"),
|s| Status::Bye
));
named!(status<Status>, alt!(
status_ok |
status_no |
status_bad |
status_preauth |
status_bye
));
named!(number<u32>, map_res!(
map_res!(nom::digit, str::from_utf8),
str::parse
));
named!(number_64<u64>, map_res!(
map_res!(nom::digit, str::from_utf8),
str::parse
));
named!(text<&str>, map!(take_till_s!(crlf),
|s| str::from_utf8(s).unwrap()
));
named!(atom<&str>, map!(take_while1_s!(atom_char),
|s| str::from_utf8(s).unwrap()
));
named!(astring<&[u8]>, alt!(
take_while1_s!(astring_char) |
string
));
named!(mailbox<&str>, alt!(
map!(tag_s!("INBOX"), |s| "INBOX") |
map!(astring, |s| str::from_utf8(s).unwrap())
));
named!(flag_extension<&str>, map_res!(
recognize!(pair!(tag!("\\"), take_while!(atom_char))),
str::from_utf8
));
named!(flag<&str>, alt!(flag_extension | atom));
named!(flag_list<Vec<&str>>, do_parse!(
tag_s!("(") >>
elements: opt!(do_parse!(
flag0: flag >>
flags: many0!(do_parse!(
tag_s!(" ") >>
flag: flag >>
(flag)
)) >> ({
let mut res = vec![flag0];
res.extend(flags);
res
})
)) >>
tag_s!(")") >> ({
if elements.is_some() {
elements.unwrap()
} else {
Vec::new()
}
})
));
named!(flag_perm<&str>, alt!(
map!(tag_s!("\\*"), |s| str::from_utf8(s).unwrap()) |
flag
));
named!(section_part<Vec<u32>>, do_parse!(
part: number >>
rest: many0!(do_parse!(
tag_s!(".") >>
part: number >>
(part)
)) >> ({
let mut res = vec![part];
res.extend(rest);
res
})
));
named!(section_msgtext<MessageSection>, map!(
alt!(tag_s!("HEADER") | tag_s!("TEXT")),
|s| match s {
b"HEADER" => MessageSection::Header,
b"TEXT" => MessageSection::Text,
_ => panic!("cannot happen"),
}
));
named!(section_text<MessageSection>, alt!(
section_msgtext |
do_parse!(tag_s!("MIME") >> (MessageSection::Mime))
));
named!(section_spec<SectionPath>, alt!(
map!(section_msgtext, |val| SectionPath::Full(val)) |
do_parse!(
part: section_part >>
text: opt!(do_parse!(
tag_s!(".") >>
text: section_text >>
(text)
)) >>
(SectionPath::Part(part, text))
)
));
named!(section<Option<SectionPath>>, do_parse!(
tag_s!("[") >>
spec: opt!(section_spec) >>
tag_s!("]") >>
(spec)
));
named!(resp_text_code_permanent_flags<ResponseCode>, do_parse!(
tag_s!("PERMANENTFLAGS (") >>
elements: opt!(do_parse!(
flag0: flag_perm >>
flags: many0!(do_parse!(
tag_s!(" ") >>
flag: flag_perm >>
(flag)
)) >> ({
let mut res = vec![flag0];
res.extend(flags);
res
})
)) >>
tag_s!(")") >> ({
ResponseCode::PermanentFlags(if elements.is_some() {
elements.unwrap()
} else {
Vec::new()
})
})
));
named!(resp_text_code_highest_mod_seq<ResponseCode>, do_parse!(
tag_s!("HIGHESTMODSEQ ") >>
num: number_64 >>
(ResponseCode::HighestModSeq(num))
));
named!(resp_text_code_read_only<ResponseCode>, do_parse!(
tag_s!("READ-ONLY") >>
(ResponseCode::ReadOnly)
));
named!(resp_text_code_read_write<ResponseCode>, do_parse!(
tag_s!("READ-WRITE") >>
(ResponseCode::ReadWrite)
));
named!(resp_text_code_try_create<ResponseCode>, do_parse!(
tag_s!("TRYCREATE") >>
(ResponseCode::TryCreate)
));
named!(resp_text_code_uid_validity<ResponseCode>, do_parse!(
tag_s!("UIDVALIDITY ") >>
num: number >>
(ResponseCode::UidValidity(num))
));
named!(resp_text_code_uid_next<ResponseCode>, do_parse!(
tag_s!("UIDNEXT ") >>
num: number >>
(ResponseCode::UidNext(num))
));
named!(resp_text_code_unseen<ResponseCode>, do_parse!(
tag_s!("UNSEEN ") >>
num: number >>
(ResponseCode::Unseen(num))
));
named!(resp_text_code<ResponseCode>, do_parse!(
tag_s!("[") >>
coded: alt!(
resp_text_code_permanent_flags |
resp_text_code_uid_validity |
resp_text_code_uid_next |
resp_text_code_unseen |
resp_text_code_read_only |
resp_text_code_read_write |
resp_text_code_try_create |
resp_text_code_highest_mod_seq
) >>
// Per the spec, the closing tag should be "] ".
// See `resp_text` for more on why this is done differently.
tag_s!("]") >>
(coded)
));
named!(capability<&str>, do_parse!(
tag_s!(" ") >>
atom: take_till1_s!(atom_specials) >>
(str::from_utf8(atom).unwrap())
));
named!(capability_data<Response>, do_parse!(
tag_s!("CAPABILITY") >>
capabilities: many1!(capability) >>
(Response::Capabilities(capabilities))
));
named!(mailbox_data_flags<Response>, do_parse!(
tag_s!("FLAGS ") >>
flags: flag_list >>
(Response::MailboxData(MailboxDatum::Flags(flags)))
));
named!(mailbox_data_exists<Response>, do_parse!(
num: number >>
tag_s!(" EXISTS") >>
(Response::MailboxData(MailboxDatum::Exists(num)))
));
named!(mailbox_data_list<Response>, do_parse!(
tag_s!("LIST ") >>
flags: flag_list >>
tag_s!(" ") >>
path: quoted >>
tag_s!(" ") >>
name: mailbox >>
(Response::MailboxData(MailboxDatum::List {
flags,
delimiter: str::from_utf8(path).unwrap(),
name
}))
));
named!(mailbox_data_lsub<Response>, do_parse!(
tag_s!("LSUB ") >>
flags: flag_list >>
tag_s!(" ") >>
path: quoted >>
tag_s!(" ") >>
name: mailbox >>
(Response::MailboxData(MailboxDatum::SubList {
flags,
delimiter: str::from_utf8(path).unwrap(),
name
}))
));
named!(mailbox_data_recent<Response>, do_parse!(
num: number >>
tag_s!(" RECENT") >>
(Response::MailboxData(MailboxDatum::Recent(num)))
));
named!(mailbox_data<Response>, alt!(
mailbox_data_flags |
mailbox_data_exists |
mailbox_data_list |
mailbox_data_lsub |
mailbox_data_recent
));
named!(nstring<Option<&[u8]>>, map!(
alt!(tag_s!("NIL") | string),
|s| if s == b"NIL" { None } else { Some(s) }
));
named!(address<Address>, do_parse!(
tag_s!("(") >>
name: nstring >>
tag_s!(" ") >>
adl: nstring >>
tag_s!(" ") >>
mailbox: nstring >>
tag_s!(" ") >>
host: nstring >>
tag_s!(")") >>
(Address {
name: name.map(|s| str::from_utf8(s).unwrap()),
adl: adl.map(|s| str::from_utf8(s).unwrap()),
mailbox: mailbox.map(|s| str::from_utf8(s).unwrap()),
host: host.map(|s| str::from_utf8(s).unwrap()),
})
));
named!(opt_addresses<Option<Vec<Address>>>, alt!(
map!(tag_s!("NIL"), |s| None) |
do_parse!(
tag_s!("(") >>
addrs: many1!(address) >>
tag_s!(")") >>
(Some(addrs))
)
));
named!(msg_att_body_section<AttributeValue>, do_parse!(
tag_s!("BODY") >>
section: section >>
index: opt!(do_parse!(
tag_s!("<") >>
num: number >>
tag_s!(">") >>
(num)
)) >>
tag_s!(" ") >>
data: nstring >>
(AttributeValue::BodySection { section, index, data })
));
named!(msg_att_envelope<AttributeValue>, do_parse!(
tag_s!("ENVELOPE (") >>
date: nstring >>
tag_s!(" ") >>
subject: nstring >>
tag_s!(" ") >>
from: opt_addresses >>
tag_s!(" ") >>
sender: opt_addresses >>
tag_s!(" ") >>
reply_to: opt_addresses >>
tag_s!(" ") >>
to: opt_addresses >>
tag_s!(" ") >>
cc: opt_addresses >>
tag_s!(" ") >>
bcc: opt_addresses >>
tag_s!(" ") >>
in_reply_to: nstring >>
tag_s!(" ") >>
message_id: nstring >>
tag_s!(")") >> ({
AttributeValue::Envelope(Envelope {
date: date.map(|s| str::from_utf8(s).unwrap()),
subject: subject.map(|s| str::from_utf8(s).unwrap()),
from,
sender,
reply_to,
to,
cc,
bcc,
in_reply_to: in_reply_to.map(|s| str::from_utf8(s).unwrap()),
message_id: message_id.map(|s| str::from_utf8(s).unwrap()),
})
})
));
named!(msg_att_internal_date<AttributeValue>, do_parse!(
tag_s!("INTERNALDATE ") >>
date: nstring >>
(AttributeValue::InternalDate(str::from_utf8(date.unwrap()).unwrap()))
));
named!(msg_att_flags<AttributeValue>, do_parse!(
tag_s!("FLAGS ") >>
flags: flag_list >>
(AttributeValue::Flags(flags))
));
named!(msg_att_rfc822<AttributeValue>, do_parse!(
tag_s!("RFC822 ") >>
raw: nstring >>
(AttributeValue::Rfc822(raw))
));
named!(msg_att_rfc822_size<AttributeValue>, do_parse!(
tag_s!("RFC822.SIZE ") >>
num: number >>
(AttributeValue::Rfc822Size(num))
));
named!(msg_att_mod_seq<AttributeValue>, do_parse!(
tag_s!("MODSEQ (") >>
num: number_64 >>
tag_s!(")") >>
(AttributeValue::ModSeq(num))
));
named!(msg_att_uid<AttributeValue>, do_parse!(
tag_s!("UID ") >>
num: number >>
(AttributeValue::Uid(num))
));
named!(msg_att<AttributeValue>, alt!(
msg_att_body_section |
msg_att_envelope |
msg_att_internal_date |
msg_att_flags |
msg_att_mod_seq |
msg_att_rfc822 |
msg_att_rfc822_size |
msg_att_uid
));
named!(msg_att_list<Vec<AttributeValue>>, do_parse!(
tag_s!("(") >>
elements: do_parse!(
attr0: msg_att >>
attrs: many0!(do_parse!(
tag_s!(" ") >>
attr: msg_att >>
(attr)
)) >> ({
let mut res = vec![attr0];
res.extend(attrs);
res
})
) >>
tag_s!(")") >>
(elements)
));
named!(message_data_fetch<Response>, do_parse!(
num: number >>
tag_s!(" FETCH ") >>
attrs: msg_att_list >>
(Response::Fetch(num, attrs))
));
named!(message_data_expunge<Response>, do_parse!(
num: number >>
tag_s!(" EXPUNGE") >>
(Response::Expunge(num))
));
named!(tag<RequestId>, map!(take_while1_s!(tag_char),
|s| RequestId(str::from_utf8(s).unwrap().to_string())
));
// This is not quite according to spec, which mandates the following:
// ["[" resp-text-code "]" SP] text
// However, examples in RFC 4551 (Conditional STORE) counteract this by giving
// examples of `resp-text` that do not include the trailing space and text.
named!(resp_text<(Option<ResponseCode>, Option<&str>)>, do_parse!(
code: opt!(resp_text_code) >>
text: text >>
({
let res = if text.len() < 1 {
None
} else if code.is_some() {
Some(&text[1..])
} else {
Some(text)
};
(code, res)
})
));
named!(response_tagged<Response>, do_parse!(
tag: tag >>
tag_s!(" ") >>
status: status >>
tag_s!(" ") >>
text: resp_text >>
tag_s!("\r\n") >>
(Response::Done {
tag,
status,
code: text.0,
information: text.1,
})
));
named!(resp_cond<Response>, do_parse!(
status: status >>
tag_s!(" ") >>
text: resp_text >>
(Response::Data {
status,
code: text.0,
information: text.1,
})
));
named!(response_data<Response>, do_parse!(
tag_s!("* ") >>
contents: alt!(
resp_cond |
mailbox_data |
message_data_expunge |
message_data_fetch |
capability_data
) >>
tag_s!("\r\n") >>
(contents)
));
named!(response<Response>, alt!(
response_data |
response_tagged
));
pub type ParseResult<'a> = IResult<&'a [u8], Response<'a>>;
pub fn parse_response(msg: &[u8]) -> ParseResult {
response(msg)
}
#[cfg(test)]
mod tests {
use types::*;
use super::{parse_response, IResult};
#[test]
fn test_number_overflow() {
match parse_response(b"* 2222222222222222222222222222222222222222222C\r\n") {
IResult::Error(_) => {},
_ => panic!("error required for integer overflow"),
}
}
#[test]
fn test_unseen() {
match parse_response(b"* OK [UNSEEN 3] Message 3 is first unseen\r\n").unwrap() {
(_, Response::Data {
status: Status::Ok,
code: Some(ResponseCode::Unseen(3)),
information: Some("Message 3 is first unseen"),
}) => {},
rsp @ _ => panic!("unexpected response {:?}", rsp),
}
}
#[test]
fn test_body_text() {
match parse_response(b"* 2 FETCH (BODY[TEXT] {3}\r\nfoo)\r\n") {
IResult::Done(_, Response::Fetch(_, attrs)) => {
let body = &attrs[0];
assert_eq!(body, &AttributeValue::BodySection {
section: Some(SectionPath::Full(MessageSection::Text)),
index: None,
data: Some(b"foo"),
}, "body = {:?}", body);
},
rsp @ _ => panic!("unexpected response {:?}", rsp),
}
}
}
|
use std::i32;
use byteorder::{BigEndian, ByteOrder};
use {Header, Packet, Error, Question, Name, QueryType, QueryClass};
use {Type, Class, ResourceRecord, RRData};
impl<'a> Packet<'a> {
pub fn parse(data: &[u8]) -> Result<Packet, Error> {
let header = try!(Header::parse(data));
let mut offset = Header::size();
let mut questions = Vec::with_capacity(header.questions as usize);
for _ in 0..header.questions {
let name = try!(Name::scan(&data[offset..], data));
offset += name.byte_len();
if offset + 4 > data.len() {
return Err(Error::UnexpectedEOF);
}
let qtype = try!(QueryType::parse(
BigEndian::read_u16(&data[offset..offset+2])));
offset += 2;
let qclass = try!(QueryClass::parse(
BigEndian::read_u16(&data[offset..offset+2])));
offset += 2;
questions.push(Question {
qname: name,
qtype: qtype,
qclass: qclass,
});
}
let mut answers = Vec::with_capacity(header.answers as usize);
for _ in 0..header.answers {
answers.push(try!(parse_record(data, &mut offset)));
}
let mut nameservers = Vec::with_capacity(header.nameservers as usize);
for _ in 0..header.nameservers {
nameservers.push(try!(parse_record(data, &mut offset)));
}
Ok(Packet {
header: header,
questions: questions,
answers: answers,
nameservers: nameservers,
additional: Vec::new(), // TODO(tailhook)
})
}
}
// Generic function to parse answer, nameservers, and additional records.
fn parse_record<'a>(data: &'a [u8], offset: &mut usize) -> Result<ResourceRecord<'a>, Error> {
let name = try!(Name::scan(&data[*offset..], data));
*offset += name.byte_len();
if *offset + 10 > data.len() {
return Err(Error::UnexpectedEOF);
}
let typ = try!(Type::parse(
BigEndian::read_u16(&data[*offset..*offset+2])));
*offset += 2;
let cls = try!(Class::parse(
BigEndian::read_u16(&data[*offset..*offset+2])));
*offset += 2;
let mut ttl = BigEndian::read_u32(&data[*offset..*offset+4]);
if ttl > i32::MAX as u32 {
ttl = 0;
}
*offset += 4;
let rdlen = BigEndian::read_u16(&data[*offset..*offset+2]) as usize;
*offset += 2;
if *offset + rdlen > data.len() {
return Err(Error::UnexpectedEOF);
}
let data = try!(RRData::parse(typ,
&data[*offset..*offset+rdlen], data));
*offset += rdlen;
Ok(ResourceRecord {
name: name,
cls: cls,
ttl: ttl,
data: data,
})
}
#[cfg(test)]
mod test {
use std::net::{Ipv4Addr, Ipv6Addr};
use {Packet, Header};
use Opcode::*;
use ResponseCode::NoError;
use QueryType as QT;
use QueryClass as QC;
use Class as C;
use RRData;
#[test]
fn parse_example_query() {
let query = b"\x06%\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\
\x07example\x03com\x00\x00\x01\x00\x01";
let packet = Packet::parse(query).unwrap();
assert_eq!(packet.header, Header {
id: 1573,
query: true,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: false,
response_code: NoError,
questions: 1,
answers: 0,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "example.com");
assert_eq!(packet.answers.len(), 0);
}
#[test]
fn parse_example_response() {
let response = b"\x06%\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\
\x07example\x03com\x00\x00\x01\x00\x01\
\xc0\x0c\x00\x01\x00\x01\x00\x00\x04\xf8\
\x00\x04]\xb8\xd8\"";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 1573,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 1,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "example.com");
assert_eq!(packet.answers.len(), 1);
assert_eq!(&packet.answers[0].name.to_string()[..], "example.com");
assert_eq!(packet.answers[0].cls, C::IN);
assert_eq!(packet.answers[0].ttl, 1272);
match packet.answers[0].data {
RRData::A(addr) => {
assert_eq!(addr, Ipv4Addr::new(93, 184, 216, 34));
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
#[test]
fn parse_ns_response() {
let response = b"\x4a\xf0\x81\x80\x00\x01\x00\x01\x00\x01\x00\x00\
\x03www\x05skype\x03com\x00\x00\x01\x00\x01\
\xc0\x0c\x00\x05\x00\x01\x00\x00\x0e\x10\
\x00\x1c\x07\x6c\x69\x76\x65\x63\x6d\x73\x0e\x74\
\x72\x61\x66\x66\x69\x63\x6d\x61\x6e\x61\x67\x65\
\x72\x03\x6e\x65\x74\x00\
\xc0\x42\x00\x02\x00\x01\x00\x01\xd5\xd3\x00\x11\
\x01\x67\x0c\x67\x74\x6c\x64\x2d\x73\x65\x72\x76\x65\x72\x73\
\xc0\x42";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 19184,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 1,
nameservers: 1,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "www.skype.com");
assert_eq!(packet.answers.len(), 1);
assert_eq!(&packet.answers[0].name.to_string()[..], "www.skype.com");
assert_eq!(packet.answers[0].cls, C::IN);
assert_eq!(packet.answers[0].ttl, 3600);
match packet.answers[0].data {
RRData::CNAME(cname) => {
assert_eq!(&cname.to_string()[..], "livecms.trafficmanager.net");
}
ref x => panic!("Wrong rdata {:?}", x),
}
assert_eq!(packet.nameservers.len(), 1);
assert_eq!(&packet.nameservers[0].name.to_string()[..], "net");
assert_eq!(packet.nameservers[0].cls, C::IN);
assert_eq!(packet.nameservers[0].ttl, 120275);
match packet.nameservers[0].data {
RRData::NS(ns) => {
assert_eq!(&ns.to_string()[..], "g.gtld-servers.net");
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
#[test]
fn parse_multiple_answers() {
let response = b"\x9d\xe9\x81\x80\x00\x01\x00\x06\x00\x00\x00\x00\
\x06google\x03com\x00\x00\x01\x00\x01\xc0\x0c\
\x00\x01\x00\x01\x00\x00\x00\xef\x00\x04@\xe9\
\xa4d\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\xef\
\x00\x04@\xe9\xa4\x8b\xc0\x0c\x00\x01\x00\x01\
\x00\x00\x00\xef\x00\x04@\xe9\xa4q\xc0\x0c\x00\
\x01\x00\x01\x00\x00\x00\xef\x00\x04@\xe9\xa4f\
\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\xef\x00\x04@\
\xe9\xa4e\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\xef\
\x00\x04@\xe9\xa4\x8a";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 40425,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 6,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "google.com");
assert_eq!(packet.answers.len(), 6);
let ips = vec![
Ipv4Addr::new(64, 233, 164, 100),
Ipv4Addr::new(64, 233, 164, 139),
Ipv4Addr::new(64, 233, 164, 113),
Ipv4Addr::new(64, 233, 164, 102),
Ipv4Addr::new(64, 233, 164, 101),
Ipv4Addr::new(64, 233, 164, 138),
];
for i in 0..6 {
assert_eq!(&packet.answers[i].name.to_string()[..], "google.com");
assert_eq!(packet.answers[i].cls, C::IN);
assert_eq!(packet.answers[i].ttl, 239);
match packet.answers[i].data {
RRData::A(addr) => {
assert_eq!(addr, ips[i]);
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
}
#[test]
fn parse_srv_query() {
let query = b"[\xd9\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\
\x0c_xmpp-server\x04_tcp\x05gmail\x03com\x00\x00!\x00\x01";
let packet = Packet::parse(query).unwrap();
assert_eq!(packet.header, Header {
id: 23513,
query: true,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: false,
response_code: NoError,
questions: 1,
answers: 0,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::SRV);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..],
"_xmpp-server._tcp.gmail.com");
assert_eq!(packet.answers.len(), 0);
}
#[test]
fn parse_srv_response() {
let response = b"[\xd9\x81\x80\x00\x01\x00\x05\x00\x00\x00\x00\
\x0c_xmpp-server\x04_tcp\x05gmail\x03com\x00\x00!\x00\x01\
\xc0\x0c\x00!\x00\x01\x00\x00\x03\x84\x00 \x00\x05\x00\x00\
\x14\x95\x0bxmpp-server\x01l\x06google\x03com\x00\xc0\x0c\x00!\
\x00\x01\x00\x00\x03\x84\x00%\x00\x14\x00\x00\x14\x95\
\x04alt3\x0bxmpp-server\x01l\x06google\x03com\x00\
\xc0\x0c\x00!\x00\x01\x00\x00\x03\x84\x00%\x00\x14\x00\x00\
\x14\x95\x04alt1\x0bxmpp-server\x01l\x06google\x03com\x00\
\xc0\x0c\x00!\x00\x01\x00\x00\x03\x84\x00%\x00\x14\x00\x00\
\x14\x95\x04alt2\x0bxmpp-server\x01l\x06google\x03com\x00\
\xc0\x0c\x00!\x00\x01\x00\x00\x03\x84\x00%\x00\x14\x00\x00\
\x14\x95\x04alt4\x0bxmpp-server\x01l\x06google\x03com\x00";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 23513,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 5,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::SRV);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..],
"_xmpp-server._tcp.gmail.com");
assert_eq!(packet.answers.len(), 5);
let items = vec![
(5, 0, 5269, "xmpp-server.l.google.com"),
(20, 0, 5269, "alt3.xmpp-server.l.google.com"),
(20, 0, 5269, "alt1.xmpp-server.l.google.com"),
(20, 0, 5269, "alt2.xmpp-server.l.google.com"),
(20, 0, 5269, "alt4.xmpp-server.l.google.com"),
];
for i in 0..5 {
assert_eq!(&packet.answers[i].name.to_string()[..],
"_xmpp-server._tcp.gmail.com");
assert_eq!(packet.answers[i].cls, C::IN);
assert_eq!(packet.answers[i].ttl, 900);
match *&packet.answers[i].data {
RRData::SRV { priority, weight, port, target } => {
assert_eq!(priority, items[i].0);
assert_eq!(weight, items[i].1);
assert_eq!(port, items[i].2);
assert_eq!(target.to_string(), (items[i].3).to_string());
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
}
#[test]
fn parse_mx_response() {
let response = b"\xe3\xe8\x81\x80\x00\x01\x00\x05\x00\x00\x00\x00\
\x05gmail\x03com\x00\x00\x0f\x00\x01\xc0\x0c\x00\x0f\x00\x01\
\x00\x00\x04|\x00\x1b\x00\x05\rgmail-smtp-in\x01l\x06google\xc0\
\x12\xc0\x0c\x00\x0f\x00\x01\x00\x00\x04|\x00\t\x00\
\n\x04alt1\xc0)\xc0\x0c\x00\x0f\x00\x01\x00\x00\x04|\
\x00\t\x00(\x04alt4\xc0)\xc0\x0c\x00\x0f\x00\x01\x00\
\x00\x04|\x00\t\x00\x14\x04alt2\xc0)\xc0\x0c\x00\x0f\
\x00\x01\x00\x00\x04|\x00\t\x00\x1e\x04alt3\xc0)";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 58344,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 5,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::MX);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..],
"gmail.com");
assert_eq!(packet.answers.len(), 5);
let items = vec![
( 5, "gmail-smtp-in.l.google.com"),
(10, "alt1.gmail-smtp-in.l.google.com"),
(40, "alt4.gmail-smtp-in.l.google.com"),
(20, "alt2.gmail-smtp-in.l.google.com"),
(30, "alt3.gmail-smtp-in.l.google.com"),
];
for i in 0..5 {
assert_eq!(&packet.answers[i].name.to_string()[..],
"gmail.com");
assert_eq!(packet.answers[i].cls, C::IN);
assert_eq!(packet.answers[i].ttl, 1148);
match *&packet.answers[i].data {
RRData::MX { preference, exchange } => {
assert_eq!(preference, items[i].0);
assert_eq!(exchange.to_string(), (items[i].1).to_string());
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
}
#[test]
fn parse_aaaa_response() {
let response = b"\xa9\xd9\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x06\
google\x03com\x00\x00\x1c\x00\x01\xc0\x0c\x00\x1c\x00\x01\x00\x00\
\x00\x8b\x00\x10*\x00\x14P@\t\x08\x12\x00\x00\x00\x00\x00\x00 \x0e";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 43481,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 1,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::AAAA);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "google.com");
assert_eq!(packet.answers.len(), 1);
assert_eq!(&packet.answers[0].name.to_string()[..], "google.com");
assert_eq!(packet.answers[0].cls, C::IN);
assert_eq!(packet.answers[0].ttl, 139);
match packet.answers[0].data {
RRData::AAAA(addr) => {
assert_eq!(addr, Ipv6Addr::new(
0x2A00, 0x1450, 0x4009, 0x812, 0, 0, 0, 0x200e)
);
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
#[test]
fn parse_cname_response() {
let response = b"\xfc\x9d\x81\x80\x00\x01\x00\x06\x00\x02\x00\x02\x03\
cdn\x07sstatic\x03net\x00\x00\x01\x00\x01\xc0\x0c\x00\x05\x00\x01\
\x00\x00\x00f\x00\x02\xc0\x10\xc0\x10\x00\x01\x00\x01\x00\x00\x00\
f\x00\x04h\x10g\xcc\xc0\x10\x00\x01\x00\x01\x00\x00\x00f\x00\x04h\
\x10k\xcc\xc0\x10\x00\x01\x00\x01\x00\x00\x00f\x00\x04h\x10h\xcc\
\xc0\x10\x00\x01\x00\x01\x00\x00\x00f\x00\x04h\x10j\xcc\xc0\x10\
\x00\x01\x00\x01\x00\x00\x00f\x00\x04h\x10i\xcc\xc0\x10\x00\x02\
\x00\x01\x00\x00\x99L\x00\x0b\x08cf-dns02\xc0\x10\xc0\x10\x00\x02\
\x00\x01\x00\x00\x99L\x00\x0b\x08cf-dns01\xc0\x10\xc0\xa2\x00\x01\
\x00\x01\x00\x00\x99L\x00\x04\xad\xf5:5\xc0\x8b\x00\x01\x00\x01\x00\
\x00\x99L\x00\x04\xad\xf5;\x04";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 64669,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 6,
nameservers: 2,
additional: 2,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "cdn.sstatic.net");
assert_eq!(packet.answers.len(), 6);
assert_eq!(&packet.answers[0].name.to_string()[..], "cdn.sstatic.net");
assert_eq!(packet.answers[0].cls, C::IN);
assert_eq!(packet.answers[0].ttl, 102);
match packet.answers[0].data {
RRData::CNAME(cname) => {
assert_eq!(&cname.to_string(), "sstatic.net");
}
ref x => panic!("Wrong rdata {:?}", x),
}
let ips = vec![
Ipv4Addr::new(104, 16, 103, 204),
Ipv4Addr::new(104, 16, 107, 204),
Ipv4Addr::new(104, 16, 104, 204),
Ipv4Addr::new(104, 16, 106, 204),
Ipv4Addr::new(104, 16, 105, 204),
];
for i in 1..6 {
assert_eq!(&packet.answers[i].name.to_string()[..], "sstatic.net");
assert_eq!(packet.answers[i].cls, C::IN);
assert_eq!(packet.answers[i].ttl, 102);
match packet.answers[i].data {
RRData::A(addr) => {
assert_eq!(addr, ips[i-1]);
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
}
}
Implement additional record parsing
use std::i32;
use byteorder::{BigEndian, ByteOrder};
use {Header, Packet, Error, Question, Name, QueryType, QueryClass};
use {Type, Class, ResourceRecord, RRData};
impl<'a> Packet<'a> {
pub fn parse(data: &[u8]) -> Result<Packet, Error> {
let header = try!(Header::parse(data));
let mut offset = Header::size();
let mut questions = Vec::with_capacity(header.questions as usize);
for _ in 0..header.questions {
let name = try!(Name::scan(&data[offset..], data));
offset += name.byte_len();
if offset + 4 > data.len() {
return Err(Error::UnexpectedEOF);
}
let qtype = try!(QueryType::parse(
BigEndian::read_u16(&data[offset..offset+2])));
offset += 2;
let qclass = try!(QueryClass::parse(
BigEndian::read_u16(&data[offset..offset+2])));
offset += 2;
questions.push(Question {
qname: name,
qtype: qtype,
qclass: qclass,
});
}
let mut answers = Vec::with_capacity(header.answers as usize);
for _ in 0..header.answers {
answers.push(try!(parse_record(data, &mut offset)));
}
let mut nameservers = Vec::with_capacity(header.nameservers as usize);
for _ in 0..header.nameservers {
nameservers.push(try!(parse_record(data, &mut offset)));
}
let mut additional = Vec::with_capacity(header.additional as usize);
for _ in 0..header.additional {
additional.push(try!(parse_record(data, &mut offset)));
}
Ok(Packet {
header: header,
questions: questions,
answers: answers,
nameservers: nameservers,
additional: additional,
})
}
}
// Generic function to parse answer, nameservers, and additional records.
fn parse_record<'a>(data: &'a [u8], offset: &mut usize) -> Result<ResourceRecord<'a>, Error> {
let name = try!(Name::scan(&data[*offset..], data));
*offset += name.byte_len();
if *offset + 10 > data.len() {
return Err(Error::UnexpectedEOF);
}
let typ = try!(Type::parse(
BigEndian::read_u16(&data[*offset..*offset+2])));
*offset += 2;
let cls = try!(Class::parse(
BigEndian::read_u16(&data[*offset..*offset+2])));
*offset += 2;
let mut ttl = BigEndian::read_u32(&data[*offset..*offset+4]);
if ttl > i32::MAX as u32 {
ttl = 0;
}
*offset += 4;
let rdlen = BigEndian::read_u16(&data[*offset..*offset+2]) as usize;
*offset += 2;
if *offset + rdlen > data.len() {
return Err(Error::UnexpectedEOF);
}
let data = try!(RRData::parse(typ,
&data[*offset..*offset+rdlen], data));
*offset += rdlen;
Ok(ResourceRecord {
name: name,
cls: cls,
ttl: ttl,
data: data,
})
}
#[cfg(test)]
mod test {
use std::net::{Ipv4Addr, Ipv6Addr};
use {Packet, Header};
use Opcode::*;
use ResponseCode::NoError;
use QueryType as QT;
use QueryClass as QC;
use Class as C;
use RRData;
#[test]
fn parse_example_query() {
let query = b"\x06%\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\
\x07example\x03com\x00\x00\x01\x00\x01";
let packet = Packet::parse(query).unwrap();
assert_eq!(packet.header, Header {
id: 1573,
query: true,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: false,
response_code: NoError,
questions: 1,
answers: 0,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "example.com");
assert_eq!(packet.answers.len(), 0);
}
#[test]
fn parse_example_response() {
let response = b"\x06%\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\
\x07example\x03com\x00\x00\x01\x00\x01\
\xc0\x0c\x00\x01\x00\x01\x00\x00\x04\xf8\
\x00\x04]\xb8\xd8\"";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 1573,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 1,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "example.com");
assert_eq!(packet.answers.len(), 1);
assert_eq!(&packet.answers[0].name.to_string()[..], "example.com");
assert_eq!(packet.answers[0].cls, C::IN);
assert_eq!(packet.answers[0].ttl, 1272);
match packet.answers[0].data {
RRData::A(addr) => {
assert_eq!(addr, Ipv4Addr::new(93, 184, 216, 34));
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
#[test]
fn parse_ns_response() {
let response = b"\x4a\xf0\x81\x80\x00\x01\x00\x01\x00\x01\x00\x00\
\x03www\x05skype\x03com\x00\x00\x01\x00\x01\
\xc0\x0c\x00\x05\x00\x01\x00\x00\x0e\x10\
\x00\x1c\x07\x6c\x69\x76\x65\x63\x6d\x73\x0e\x74\
\x72\x61\x66\x66\x69\x63\x6d\x61\x6e\x61\x67\x65\
\x72\x03\x6e\x65\x74\x00\
\xc0\x42\x00\x02\x00\x01\x00\x01\xd5\xd3\x00\x11\
\x01\x67\x0c\x67\x74\x6c\x64\x2d\x73\x65\x72\x76\x65\x72\x73\
\xc0\x42";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 19184,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 1,
nameservers: 1,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "www.skype.com");
assert_eq!(packet.answers.len(), 1);
assert_eq!(&packet.answers[0].name.to_string()[..], "www.skype.com");
assert_eq!(packet.answers[0].cls, C::IN);
assert_eq!(packet.answers[0].ttl, 3600);
match packet.answers[0].data {
RRData::CNAME(cname) => {
assert_eq!(&cname.to_string()[..], "livecms.trafficmanager.net");
}
ref x => panic!("Wrong rdata {:?}", x),
}
assert_eq!(packet.nameservers.len(), 1);
assert_eq!(&packet.nameservers[0].name.to_string()[..], "net");
assert_eq!(packet.nameservers[0].cls, C::IN);
assert_eq!(packet.nameservers[0].ttl, 120275);
match packet.nameservers[0].data {
RRData::NS(ns) => {
assert_eq!(&ns.to_string()[..], "g.gtld-servers.net");
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
#[test]
fn parse_additional_record_response() {
let response = b"\x4a\xf0\x81\x80\x00\x01\x00\x01\x00\x01\x00\x01\
\x03www\x05skype\x03com\x00\x00\x01\x00\x01\
\xc0\x0c\x00\x05\x00\x01\x00\x00\x0e\x10\
\x00\x1c\x07\x6c\x69\x76\x65\x63\x6d\x73\x0e\x74\
\x72\x61\x66\x66\x69\x63\x6d\x61\x6e\x61\x67\x65\
\x72\x03\x6e\x65\x74\x00\
\xc0\x42\x00\x02\x00\x01\x00\x01\xd5\xd3\x00\x11\
\x01\x67\x0c\x67\x74\x6c\x64\x2d\x73\x65\x72\x76\x65\x72\x73\
\xc0\x42\
\x01\x61\xc0\x55\x00\x01\x00\x01\x00\x00\xa3\x1c\
\x00\x04\xc0\x05\x06\x1e";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 19184,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 1,
nameservers: 1,
additional: 1,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "www.skype.com");
assert_eq!(packet.answers.len(), 1);
assert_eq!(&packet.answers[0].name.to_string()[..], "www.skype.com");
assert_eq!(packet.answers[0].cls, C::IN);
assert_eq!(packet.answers[0].ttl, 3600);
match packet.answers[0].data {
RRData::CNAME(cname) => {
assert_eq!(&cname.to_string()[..], "livecms.trafficmanager.net");
}
ref x => panic!("Wrong rdata {:?}", x),
}
assert_eq!(packet.nameservers.len(), 1);
assert_eq!(&packet.nameservers[0].name.to_string()[..], "net");
assert_eq!(packet.nameservers[0].cls, C::IN);
assert_eq!(packet.nameservers[0].ttl, 120275);
match packet.nameservers[0].data {
RRData::NS(ns) => {
assert_eq!(&ns.to_string()[..], "g.gtld-servers.net");
}
ref x => panic!("Wrong rdata {:?}", x),
}
assert_eq!(packet.additional.len(), 1);
assert_eq!(&packet.additional[0].name.to_string()[..], "a.gtld-servers.net");
assert_eq!(packet.additional[0].cls, C::IN);
assert_eq!(packet.additional[0].ttl, 41756);
match packet.additional[0].data {
RRData::A(addr) => {
assert_eq!(addr, Ipv4Addr::new(192, 5, 6, 30));
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
#[test]
fn parse_multiple_answers() {
let response = b"\x9d\xe9\x81\x80\x00\x01\x00\x06\x00\x00\x00\x00\
\x06google\x03com\x00\x00\x01\x00\x01\xc0\x0c\
\x00\x01\x00\x01\x00\x00\x00\xef\x00\x04@\xe9\
\xa4d\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\xef\
\x00\x04@\xe9\xa4\x8b\xc0\x0c\x00\x01\x00\x01\
\x00\x00\x00\xef\x00\x04@\xe9\xa4q\xc0\x0c\x00\
\x01\x00\x01\x00\x00\x00\xef\x00\x04@\xe9\xa4f\
\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\xef\x00\x04@\
\xe9\xa4e\xc0\x0c\x00\x01\x00\x01\x00\x00\x00\xef\
\x00\x04@\xe9\xa4\x8a";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 40425,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 6,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "google.com");
assert_eq!(packet.answers.len(), 6);
let ips = vec![
Ipv4Addr::new(64, 233, 164, 100),
Ipv4Addr::new(64, 233, 164, 139),
Ipv4Addr::new(64, 233, 164, 113),
Ipv4Addr::new(64, 233, 164, 102),
Ipv4Addr::new(64, 233, 164, 101),
Ipv4Addr::new(64, 233, 164, 138),
];
for i in 0..6 {
assert_eq!(&packet.answers[i].name.to_string()[..], "google.com");
assert_eq!(packet.answers[i].cls, C::IN);
assert_eq!(packet.answers[i].ttl, 239);
match packet.answers[i].data {
RRData::A(addr) => {
assert_eq!(addr, ips[i]);
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
}
#[test]
fn parse_srv_query() {
let query = b"[\xd9\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\
\x0c_xmpp-server\x04_tcp\x05gmail\x03com\x00\x00!\x00\x01";
let packet = Packet::parse(query).unwrap();
assert_eq!(packet.header, Header {
id: 23513,
query: true,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: false,
response_code: NoError,
questions: 1,
answers: 0,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::SRV);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..],
"_xmpp-server._tcp.gmail.com");
assert_eq!(packet.answers.len(), 0);
}
#[test]
fn parse_srv_response() {
let response = b"[\xd9\x81\x80\x00\x01\x00\x05\x00\x00\x00\x00\
\x0c_xmpp-server\x04_tcp\x05gmail\x03com\x00\x00!\x00\x01\
\xc0\x0c\x00!\x00\x01\x00\x00\x03\x84\x00 \x00\x05\x00\x00\
\x14\x95\x0bxmpp-server\x01l\x06google\x03com\x00\xc0\x0c\x00!\
\x00\x01\x00\x00\x03\x84\x00%\x00\x14\x00\x00\x14\x95\
\x04alt3\x0bxmpp-server\x01l\x06google\x03com\x00\
\xc0\x0c\x00!\x00\x01\x00\x00\x03\x84\x00%\x00\x14\x00\x00\
\x14\x95\x04alt1\x0bxmpp-server\x01l\x06google\x03com\x00\
\xc0\x0c\x00!\x00\x01\x00\x00\x03\x84\x00%\x00\x14\x00\x00\
\x14\x95\x04alt2\x0bxmpp-server\x01l\x06google\x03com\x00\
\xc0\x0c\x00!\x00\x01\x00\x00\x03\x84\x00%\x00\x14\x00\x00\
\x14\x95\x04alt4\x0bxmpp-server\x01l\x06google\x03com\x00";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 23513,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 5,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::SRV);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..],
"_xmpp-server._tcp.gmail.com");
assert_eq!(packet.answers.len(), 5);
let items = vec![
(5, 0, 5269, "xmpp-server.l.google.com"),
(20, 0, 5269, "alt3.xmpp-server.l.google.com"),
(20, 0, 5269, "alt1.xmpp-server.l.google.com"),
(20, 0, 5269, "alt2.xmpp-server.l.google.com"),
(20, 0, 5269, "alt4.xmpp-server.l.google.com"),
];
for i in 0..5 {
assert_eq!(&packet.answers[i].name.to_string()[..],
"_xmpp-server._tcp.gmail.com");
assert_eq!(packet.answers[i].cls, C::IN);
assert_eq!(packet.answers[i].ttl, 900);
match *&packet.answers[i].data {
RRData::SRV { priority, weight, port, target } => {
assert_eq!(priority, items[i].0);
assert_eq!(weight, items[i].1);
assert_eq!(port, items[i].2);
assert_eq!(target.to_string(), (items[i].3).to_string());
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
}
#[test]
fn parse_mx_response() {
let response = b"\xe3\xe8\x81\x80\x00\x01\x00\x05\x00\x00\x00\x00\
\x05gmail\x03com\x00\x00\x0f\x00\x01\xc0\x0c\x00\x0f\x00\x01\
\x00\x00\x04|\x00\x1b\x00\x05\rgmail-smtp-in\x01l\x06google\xc0\
\x12\xc0\x0c\x00\x0f\x00\x01\x00\x00\x04|\x00\t\x00\
\n\x04alt1\xc0)\xc0\x0c\x00\x0f\x00\x01\x00\x00\x04|\
\x00\t\x00(\x04alt4\xc0)\xc0\x0c\x00\x0f\x00\x01\x00\
\x00\x04|\x00\t\x00\x14\x04alt2\xc0)\xc0\x0c\x00\x0f\
\x00\x01\x00\x00\x04|\x00\t\x00\x1e\x04alt3\xc0)";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 58344,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 5,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::MX);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..],
"gmail.com");
assert_eq!(packet.answers.len(), 5);
let items = vec![
( 5, "gmail-smtp-in.l.google.com"),
(10, "alt1.gmail-smtp-in.l.google.com"),
(40, "alt4.gmail-smtp-in.l.google.com"),
(20, "alt2.gmail-smtp-in.l.google.com"),
(30, "alt3.gmail-smtp-in.l.google.com"),
];
for i in 0..5 {
assert_eq!(&packet.answers[i].name.to_string()[..],
"gmail.com");
assert_eq!(packet.answers[i].cls, C::IN);
assert_eq!(packet.answers[i].ttl, 1148);
match *&packet.answers[i].data {
RRData::MX { preference, exchange } => {
assert_eq!(preference, items[i].0);
assert_eq!(exchange.to_string(), (items[i].1).to_string());
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
}
#[test]
fn parse_aaaa_response() {
let response = b"\xa9\xd9\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x06\
google\x03com\x00\x00\x1c\x00\x01\xc0\x0c\x00\x1c\x00\x01\x00\x00\
\x00\x8b\x00\x10*\x00\x14P@\t\x08\x12\x00\x00\x00\x00\x00\x00 \x0e";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 43481,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 1,
nameservers: 0,
additional: 0,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::AAAA);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "google.com");
assert_eq!(packet.answers.len(), 1);
assert_eq!(&packet.answers[0].name.to_string()[..], "google.com");
assert_eq!(packet.answers[0].cls, C::IN);
assert_eq!(packet.answers[0].ttl, 139);
match packet.answers[0].data {
RRData::AAAA(addr) => {
assert_eq!(addr, Ipv6Addr::new(
0x2A00, 0x1450, 0x4009, 0x812, 0, 0, 0, 0x200e)
);
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
#[test]
fn parse_cname_response() {
let response = b"\xfc\x9d\x81\x80\x00\x01\x00\x06\x00\x02\x00\x02\x03\
cdn\x07sstatic\x03net\x00\x00\x01\x00\x01\xc0\x0c\x00\x05\x00\x01\
\x00\x00\x00f\x00\x02\xc0\x10\xc0\x10\x00\x01\x00\x01\x00\x00\x00\
f\x00\x04h\x10g\xcc\xc0\x10\x00\x01\x00\x01\x00\x00\x00f\x00\x04h\
\x10k\xcc\xc0\x10\x00\x01\x00\x01\x00\x00\x00f\x00\x04h\x10h\xcc\
\xc0\x10\x00\x01\x00\x01\x00\x00\x00f\x00\x04h\x10j\xcc\xc0\x10\
\x00\x01\x00\x01\x00\x00\x00f\x00\x04h\x10i\xcc\xc0\x10\x00\x02\
\x00\x01\x00\x00\x99L\x00\x0b\x08cf-dns02\xc0\x10\xc0\x10\x00\x02\
\x00\x01\x00\x00\x99L\x00\x0b\x08cf-dns01\xc0\x10\xc0\xa2\x00\x01\
\x00\x01\x00\x00\x99L\x00\x04\xad\xf5:5\xc0\x8b\x00\x01\x00\x01\x00\
\x00\x99L\x00\x04\xad\xf5;\x04";
let packet = Packet::parse(response).unwrap();
assert_eq!(packet.header, Header {
id: 64669,
query: false,
opcode: StandardQuery,
authoritative: false,
truncated: false,
recursion_desired: true,
recursion_available: true,
response_code: NoError,
questions: 1,
answers: 6,
nameservers: 2,
additional: 2,
});
assert_eq!(packet.questions.len(), 1);
assert_eq!(packet.questions[0].qtype, QT::A);
assert_eq!(packet.questions[0].qclass, QC::IN);
assert_eq!(&packet.questions[0].qname.to_string()[..], "cdn.sstatic.net");
assert_eq!(packet.answers.len(), 6);
assert_eq!(&packet.answers[0].name.to_string()[..], "cdn.sstatic.net");
assert_eq!(packet.answers[0].cls, C::IN);
assert_eq!(packet.answers[0].ttl, 102);
match packet.answers[0].data {
RRData::CNAME(cname) => {
assert_eq!(&cname.to_string(), "sstatic.net");
}
ref x => panic!("Wrong rdata {:?}", x),
}
let ips = vec![
Ipv4Addr::new(104, 16, 103, 204),
Ipv4Addr::new(104, 16, 107, 204),
Ipv4Addr::new(104, 16, 104, 204),
Ipv4Addr::new(104, 16, 106, 204),
Ipv4Addr::new(104, 16, 105, 204),
];
for i in 1..6 {
assert_eq!(&packet.answers[i].name.to_string()[..], "sstatic.net");
assert_eq!(packet.answers[i].cls, C::IN);
assert_eq!(packet.answers[i].ttl, 102);
match packet.answers[i].data {
RRData::A(addr) => {
assert_eq!(addr, ips[i-1]);
}
ref x => panic!("Wrong rdata {:?}", x),
}
}
}
}
|
use ast::*;
use scanner::{Token, TokenWithContext};
use std::iter::Peekable;
pub fn parse(tokens: Vec<TokenWithContext>) -> Result<Option<Expr>, String> {
let mut iter = tokens.iter().peekable();
// TODO: add recovery
parse_expression(&mut iter)
}
fn parse_expression<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
parse_equality(tokens)
}
fn parse_binary<'a, I>(tokens: &mut Peekable<I>,
map_operator: &Fn(&Token) -> Option<Operator>,
parse_subexpression: &Fn(&mut Peekable<I>) -> Result<Option<Expr>, String>)
-> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
let mut expr;
{
if let Some(e) = try!(parse_subexpression(tokens)) {
expr = e;
} else {
return Ok(None);
}
};
while let Some(Some(mapped_operator)) = tokens.peek().map(|pt| map_operator(&pt.token)) {
{
// Just advance, we know all we need from the peeked value
let _ = tokens.next();
}
let right;
{
if let Some(e) = try!(parse_subexpression(tokens)) {
right = e
} else {
// TODO add context
return Err("Expected subexpression".into());
}
};
let binary_expression = BinaryExpr {
left: expr,
operator: mapped_operator,
right: right,
};
expr = Expr::Binary(Box::new(binary_expression));
}
Ok(Some(expr))
}
fn parse_equality<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
fn map_operator(token: &Token) -> Option<Operator> {
match token {
&Token::BangEqual => Some(Operator::NotEqual),
&Token::EqualEqual => Some(Operator::Equal),
_ => None,
}
}
parse_binary(tokens, &map_operator, &parse_comparison)
}
fn parse_comparison<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
fn map_operator(token: &Token) -> Option<Operator> {
match token {
&Token::Greater => Some(Operator::Greater),
&Token::GreaterEqual => Some(Operator::GreaterEqual),
&Token::Less => Some(Operator::Less),
&Token::LessEqual => Some(Operator::LessEqual),
_ => None,
}
}
parse_binary(tokens, &map_operator, &parse_term)
}
fn parse_term<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
fn map_operator(token: &Token) -> Option<Operator> {
match token {
&Token::Minus => Some(Operator::Minus),
&Token::Plus => Some(Operator::Plus),
_ => None,
}
}
parse_binary(tokens, &map_operator, &parse_factor)
}
fn parse_factor<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
fn map_operator(token: &Token) -> Option<Operator> {
match token {
&Token::Slash => Some(Operator::Slash),
&Token::Star => Some(Operator::Star),
_ => None,
}
}
parse_binary(tokens, &map_operator, &parse_unary)
}
fn parse_unary<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
fn map_operator(token: &Token) -> Option<Operator> {
match token {
&Token::Minus => Some(Operator::Minus),
&Token::Bang => Some(Operator::Bang),
_ => None,
}
}
if let Some(Some(mapped_operator)) = tokens.peek().cloned().map(|pt| map_operator(&pt.token)) {
{
// Just advance, we know all we need from the peeked value
let _ = tokens.next();
}
let right;
{
if let Some(e) = try!(parse_unary(tokens)) {
right = e;
} else {
// TODO: add context
return Err("Expected right side of unary".into());
}
};
let unary_expression = UnaryExpr {
operator: mapped_operator,
right: right,
};
return Ok(Some(Expr::Unary(Box::new(unary_expression))));
} else {
parse_primary(tokens)
}
}
fn parse_primary<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
let primary_token;
{
primary_token = tokens.next();
};
if let Some(primary_token) = primary_token {
let parsed_expression = match primary_token.token {
Token::False => Expr::Literal(Literal::BoolLiteral(false)),
Token::True => Expr::Literal(Literal::BoolLiteral(true)),
Token::Nil => Expr::Literal(Literal::NilLiteral),
Token::NumberLiteral(n) => Expr::Literal(Literal::NumberLiteral(n)),
Token::StringLiteral(ref s) => Expr::Literal(Literal::StringLiteral(s.clone())),
Token::LeftParen => {
let expr;
{
if let Some(e) = try!(parse_expression(tokens)) {
expr = e;
} else {
// TODO add context
return Err("Unfinished grouping expression".into());
}
};
{
if let Some(token) = tokens.next() {
if token.token == Token::LeftParen {
let grouping_expression = Grouping { expr: expr };
return Ok(Some(Expr::Grouping(Box::new(grouping_expression))));
}
}
// TODO: fill with context
return Err("Missing )".into());
}
}
_ => {
// TODO: fill with context
return Err("Unexpected token".into());
}
};
Ok(Some(parsed_expression))
} else {
Ok(None)
}
}
#[cfg(test)]
mod tests {
use scanner::*;
use parser::*;
use pretty_printer::PrettyPrint;
#[test]
fn literal() {
let tokens = scan(&"123".into()).unwrap();
let expr = parse(tokens).unwrap().unwrap();
assert_eq!("123", &expr.pretty_print());
}
#[test]
fn binary() {
let tokens = scan(&"123+456".into()).unwrap();
let expr = parse(tokens).unwrap().unwrap();
assert_eq!("(+ 123 456)", &expr.pretty_print());
}
#[test]
fn precedence_add_mul() {
let tokens = scan(&"123+456*789".into()).unwrap();
let expr = parse(tokens).unwrap().unwrap();
assert_eq!("(+ 123 (* 456 789))", &expr.pretty_print());
}
#[test]
fn precedence_mul_add() {
let tokens = scan(&"123*456+789".into()).unwrap();
let expr = parse(tokens).unwrap().unwrap();
assert_eq!("(+ (* 123 456) 789)", &expr.pretty_print());
}
}
Test for unary operation
use ast::*;
use scanner::{Token, TokenWithContext};
use std::iter::Peekable;
pub fn parse(tokens: Vec<TokenWithContext>) -> Result<Option<Expr>, String> {
let mut iter = tokens.iter().peekable();
// TODO: add recovery
parse_expression(&mut iter)
}
fn parse_expression<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
parse_equality(tokens)
}
fn parse_binary<'a, I>(tokens: &mut Peekable<I>,
map_operator: &Fn(&Token) -> Option<Operator>,
parse_subexpression: &Fn(&mut Peekable<I>) -> Result<Option<Expr>, String>)
-> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
let mut expr;
{
if let Some(e) = try!(parse_subexpression(tokens)) {
expr = e;
} else {
return Ok(None);
}
};
while let Some(Some(mapped_operator)) = tokens.peek().map(|pt| map_operator(&pt.token)) {
{
// Just advance, we know all we need from the peeked value
let _ = tokens.next();
}
let right;
{
if let Some(e) = try!(parse_subexpression(tokens)) {
right = e
} else {
// TODO add context
return Err("Expected subexpression".into());
}
};
let binary_expression = BinaryExpr {
left: expr,
operator: mapped_operator,
right: right,
};
expr = Expr::Binary(Box::new(binary_expression));
}
Ok(Some(expr))
}
fn parse_equality<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
fn map_operator(token: &Token) -> Option<Operator> {
match token {
&Token::BangEqual => Some(Operator::NotEqual),
&Token::EqualEqual => Some(Operator::Equal),
_ => None,
}
}
parse_binary(tokens, &map_operator, &parse_comparison)
}
fn parse_comparison<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
fn map_operator(token: &Token) -> Option<Operator> {
match token {
&Token::Greater => Some(Operator::Greater),
&Token::GreaterEqual => Some(Operator::GreaterEqual),
&Token::Less => Some(Operator::Less),
&Token::LessEqual => Some(Operator::LessEqual),
_ => None,
}
}
parse_binary(tokens, &map_operator, &parse_term)
}
fn parse_term<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
fn map_operator(token: &Token) -> Option<Operator> {
match token {
&Token::Minus => Some(Operator::Minus),
&Token::Plus => Some(Operator::Plus),
_ => None,
}
}
parse_binary(tokens, &map_operator, &parse_factor)
}
fn parse_factor<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
fn map_operator(token: &Token) -> Option<Operator> {
match token {
&Token::Slash => Some(Operator::Slash),
&Token::Star => Some(Operator::Star),
_ => None,
}
}
parse_binary(tokens, &map_operator, &parse_unary)
}
fn parse_unary<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
fn map_operator(token: &Token) -> Option<Operator> {
match token {
&Token::Minus => Some(Operator::Minus),
&Token::Bang => Some(Operator::Bang),
_ => None,
}
}
if let Some(Some(mapped_operator)) = tokens.peek().cloned().map(|pt| map_operator(&pt.token)) {
{
// Just advance, we know all we need from the peeked value
let _ = tokens.next();
}
let right;
{
if let Some(e) = try!(parse_unary(tokens)) {
right = e;
} else {
// TODO: add context
return Err("Expected right side of unary".into());
}
};
let unary_expression = UnaryExpr {
operator: mapped_operator,
right: right,
};
return Ok(Some(Expr::Unary(Box::new(unary_expression))));
} else {
parse_primary(tokens)
}
}
fn parse_primary<'a, I>(tokens: &mut Peekable<I>) -> Result<Option<Expr>, String>
where I: Iterator<Item = &'a TokenWithContext>
{
let primary_token;
{
primary_token = tokens.next();
};
if let Some(primary_token) = primary_token {
let parsed_expression = match primary_token.token {
Token::False => Expr::Literal(Literal::BoolLiteral(false)),
Token::True => Expr::Literal(Literal::BoolLiteral(true)),
Token::Nil => Expr::Literal(Literal::NilLiteral),
Token::NumberLiteral(n) => Expr::Literal(Literal::NumberLiteral(n)),
Token::StringLiteral(ref s) => Expr::Literal(Literal::StringLiteral(s.clone())),
Token::LeftParen => {
let expr;
{
if let Some(e) = try!(parse_expression(tokens)) {
expr = e;
} else {
// TODO add context
return Err("Unfinished grouping expression".into());
}
};
{
if let Some(token) = tokens.next() {
if token.token == Token::LeftParen {
let grouping_expression = Grouping { expr: expr };
return Ok(Some(Expr::Grouping(Box::new(grouping_expression))));
}
}
// TODO: fill with context
return Err("Missing )".into());
}
}
_ => {
// TODO: fill with context
return Err("Unexpected token".into());
}
};
Ok(Some(parsed_expression))
} else {
Ok(None)
}
}
#[cfg(test)]
mod tests {
use scanner::*;
use parser::*;
use pretty_printer::PrettyPrint;
#[test]
fn literal() {
let tokens = scan(&"123".into()).unwrap();
let expr = parse(tokens).unwrap().unwrap();
assert_eq!("123", &expr.pretty_print());
}
#[test]
fn binary() {
let tokens = scan(&"123+456".into()).unwrap();
let expr = parse(tokens).unwrap().unwrap();
assert_eq!("(+ 123 456)", &expr.pretty_print());
}
#[test]
fn precedence_add_mul() {
let tokens = scan(&"123+456*789".into()).unwrap();
let expr = parse(tokens).unwrap().unwrap();
assert_eq!("(+ 123 (* 456 789))", &expr.pretty_print());
}
#[test]
fn precedence_mul_add() {
let tokens = scan(&"123*456+789".into()).unwrap();
let expr = parse(tokens).unwrap().unwrap();
assert_eq!("(+ (* 123 456) 789)", &expr.pretty_print());
}
#[test]
fn precedence_mul_add_unary() {
let tokens = scan(&"-123*456+789".into()).unwrap();
let expr = parse(tokens).unwrap().unwrap();
assert_eq!("(+ (* (- 123) 456) 789)", &expr.pretty_print());
}
}
|
use std::str;
use std::fmt::Write;
use std::iter::Iterator;
use std::collections::HashMap;
use std::string::String;
use regex::Regex;
use types::*;
lazy_static!{
pub static ref FMT_PAT: Regex = Regex::new(
// 1-ident 2-fill 3-align 4-sign 5-width 6-precision 7-type
r"^([\w\d-_]+)(?::(.)?([<>^])?([+-])?([([\d]+)?(?:\.([\d]+))?([sbcdoxXneEfFgG%])?)?\z")
.unwrap();
// 1-ident 2-fill 3-align 4-width 5-precision
// r"^([\w\d-_]+)(?::(.)?([<>^])?([\d]+)?(?:\.([\d]+))?)?\z").unwrap();
// if align doesn't exist, width == fill + width
}
fn write_char(s: &mut String, c: char, n: usize) {
for _ in 0..n {
s.push(c);
}
}
#[test]
fn test_write_char() {
let mut s = String::new();
s.write_str("h ").unwrap();
write_char(&mut s, 'f', 3);
assert!(s == "h fff");
}
fn write_from<'a, I>(s: &mut String, f: I, n: usize) -> usize
where I: Iterator<Item = char>
{
// eexaust f or run out of n, return chars written
if n == 0 {
return 0;
}
let mut n_written: usize = 0;
for c in f {
s.push(c);
n_written += 1;
if n_written == n {
return n_written;
}
}
n_written
}
#[test]
fn test_write_from() {
let mut s = String::new();
s.write_str("h ").unwrap();
write_from(&mut s, "fff".chars(), 5);
assert!(s == "h fff");
write_from(&mut s, "xxxx".chars(), 2);
assert!(s == "h fffxx");
write_from(&mut s, "333".chars(), 3);
assert!(s == "h fffxx333");
}
fn is_alignment_token(c: char) -> bool {
match c {
'=' | '<' | '^' | '>' => true,
_ => false,
}
}
fn is_sign_element(c: char) -> bool {
match c {
' ' | '-' | '+' => true,
_ => false,
}
}
fn is_type_element(c: char) -> bool {
match c {
'b' | 'c' | 'd' | 'o' | 'x' | 'X' | 'n' |
'e' | 'E' | 'f' | 'F' | 'g' | 'G' | '%' |
's' | '?' => true,
_ => false,
}
}
// get an integer from pos, returning the number of bytes
// consumed and the integer
fn get_integer(s: &[u8], pos: usize) -> (usize, Option<i64>) {
let (_, rest) = s.split_at(pos);
let mut consumed: usize = 0;
for b in rest {
match *b as char {
'0'...'9' => {},
_ => break,
};
consumed += 1;
}
if consumed == 0 {
(0, None)
} else {
let (intstr, _) = rest.split_at(consumed);
let val = unsafe { // I think I can be reasonably sure that 0-9 chars are utf8 :)
match str::from_utf8_unchecked(intstr).parse::<i64>() {
Ok(v) => Some(v),
Err(_) => None,
}
};
(consumed, val)
}
}
#[derive(Debug)]
/// The format struct as it is defined in the python source
struct FmtPy {
pub fill: char,
pub align: char,
pub alternate: bool,
pub sign: char,
pub width: i64,
pub thousands: bool,
pub precision: i64,
pub ty: char,
}
fn parse_like_python(rest: &str) -> Result<FmtPy> {
/* The rest of this was pretty much strait up copied from python's format parser
All credit goes to python source file: formatter_unicode.c
*/
let mut format = FmtPy {
fill: ' ',
align: '>',
alternate: false,
sign: '\0',
width: -1,
thousands: false,
precision: -1,
ty: ' ',
};
let mut chars = rest.chars();
let fake_fill = match chars.next() {
Some(c) => c,
None => return Ok(format),
};
let (_, rest) = rest.split_at(fake_fill.len_utf8());
// from now on all format characters MUST be valid
// ASCII characters (fill and identifier were the
// only ones that weren't.
// Therefore we can use bytes for the rest
let mut align_specified = false;
let mut fill_specified = false;
let rest = rest.as_bytes();
let end: usize = rest.len();
let mut pos: usize = 0;
/* If the second char is an alignment token,
then parse the fill char */
if end-pos >= 1 && is_alignment_token(rest[0] as char) {
format.align = rest[0] as char;
format.fill = fake_fill;
fill_specified = true;
align_specified = true;
pos += 2;
} else if end-pos == 0 && is_alignment_token(fake_fill) {
format.align = fake_fill;
pos+=1;
}
/* Parse the various sign options */
if end-pos >= 1 && is_sign_element(rest[pos] as char) {
format.sign = rest[pos] as char;
pos+=1;
}
/* If the next character is #, we're in alternate mode. This only
applies to integers. */
if end-pos >= 1 && rest[pos] as char == '#' {
format.alternate = true;
pos+=1;
}
/* The special case for 0-padding (backwards compat) */
if !fill_specified && end-pos >= 1 && rest[pos] == '0' as u8 {
format.fill = '0';
if (!align_specified) {
format.align = '=';
}
pos+=1;
}
// check to make sure that val is good
let (consumed, val) = get_integer(rest, pos);
pos += consumed;
if consumed != 0 {
match val {
None => return Err(FmtError::Invalid("overflow error when parsing width".to_string())),
Some(v) => {
format.width = v;
}
}
}
/* Comma signifies add thousands separators */
if end-pos > 0 && rest[pos] as char == ',' {
format.thousands = true;
pos+=1;
}
/* Parse field precision */
if end-pos > 0 && rest[pos] as char == '.' {
pos+=1;
let (consumed, val) = get_integer(rest, pos);
if consumed != 0 {
match val {
None => return Err(FmtError::Invalid("overflow error when parsing precision"
.to_string())),
Some(v) => {
format.precision = v;
}
}
} else {
/* Not having a precision after a dot is an error. */
if (consumed == 0) {
return Err(FmtError::Invalid("Format specifier missing precision".to_string()));
}
}
}
/* Finally, parse the type field. */
if end-pos > 1 {
/* More than one char remain, invalid format specifier. */
return Err(FmtError::Invalid("Invalid format specifier".to_string()));
}
if end-pos == 1 {
format.ty = rest[pos] as char;
if !is_type_element(format.ty) {
let mut msg = String::new();
write!(msg, "Invalid type specifier: {:?}", format.ty).unwrap();
return Err(FmtError::Invalid(msg));
}
pos+=1;
}
/* Do as much validating as we can, just by looking at the format
specifier. Do not take into account what type of formatting
we're doing (int, float, string). */
if format.thousands {
match format.ty {
'd' | 'e' | 'f' | 'g' | 'E' | 'G' |
'%' | 'F' | '\0' => {}, /* These are allowed. See PEP 378.*/
_ => {
let mut msg = String::new();
write!(msg, "Invalid comma type: {}", format.ty);
return Err(FmtError::Invalid(msg));
}
}
}
Ok(format)
}
impl<'a> FmtChunk<'a> {
/// create FmtChunk from format string
pub fn from_str(s: &'a str) -> Result<FmtChunk> {
let mut done = false;
let mut chars = s.chars();
let mut c = match chars.next() {
Some(':') | None => return Err(
FmtError::Invalid("must specify identifier".to_string())),
Some(c) => c,
};
let mut consumed = 0;
// find the identifier
loop {
consumed += c.len_utf8();
if c == ':' {
break;
}
c = match chars.next() {
Some(c) => c,
None => {
done = true;
break;
}
};
}
let (identifier, rest) = s.split_at(consumed);
let (identifier, _) = identifier.split_at(identifier.len() - 1); // get rid of ':'
let format = try!(parse_like_python(rest));
Ok(FmtChunk{
identifier: identifier,
fill: format.fill,
align: match format.align {
'<' => Align::Left,
'^' => Align::Center,
'>' => Align::Right,
'=' => Align::Equal,
_ => unreachable!(),
},
alternate: format.alternate,
width: match format.width {
-1 => None,
_ => Some(format.width as usize),
},
thousands: format.thousands,
precision: match format.precision {
-1 => None,
_ => Some(format.precision as usize),
},
ty: match format.ty {
' ' => None,
_ => Some(format.ty),
},
})
}
/// write the formatted string to `s` and return true. If there is an error: clear `s`,
/// write the error and return false
pub fn write(&self, s: &mut String, vars: &'a HashMap<String, String>) -> Result<()> {
let ref value = match vars.get(self.identifier) {
Some(v) => v,
None => {
return Err(FmtError::KeyError(self.identifier.to_string()));
}
};
let len = match self.precision {
None => value.len(),
Some(p) => {
if p < value.len() {
p
} else {
value.len()
}
}
};
let mut value = value.chars();
let mut pad: usize = 0;
match self.width {
Some(mut width) => {
if width > len {
match self.align {
Align::Left => pad = width - len,
Align::Center => {
width = width - len;
pad = width / 2;
write_char(s, self.fill, pad);
pad += width % 2;
}
Align::Right => {
write_char(s, self.fill, width - len);
}
Align::Equal => panic!("not yet supported"), // TODO
}
}
}
None => {}
}
if self.precision.is_none() {
s.extend(value);
} else {
write_from(s, &mut value, self.precision.unwrap());
}
write_char(s, self.fill, pad);
Ok(())
}
}
/// UNSTABLE: rust-style format a string given a HashMap of the variables and additional options
/// variables:
/// ignore_missing: if true, ignore missing variables
pub fn strfmt_options(fmtstr: &str, vars: &HashMap<String, String>, ignore_missing: bool) -> Result<String> {
let mut out = String::with_capacity(fmtstr.len() * 2);
let mut bytes_read: usize = 0;
let mut opening_brace: usize = 0;
let mut closing_brace: bool = false;
let mut reading_fmt = false;
let mut remaining = fmtstr;
for c in fmtstr.chars() {
bytes_read += c.len_utf8();
if c == '{' {
if reading_fmt && opening_brace == bytes_read - 2 {
// found {{
out.push(c);
reading_fmt = false;
} else if !reading_fmt {
// found a first {
reading_fmt = true;
opening_brace = bytes_read - 1;
} else {
// found a { after finding an opening brace, error!
out.clear();
out.write_str("extra { found").unwrap();
return Err(FmtError::Invalid(out));
}
} else if c == '}' {
if !reading_fmt && !closing_brace {
// found a '}' that isn't after a '{'
closing_brace = true;
} else if closing_brace {
// found "}}"
out.push(c);
closing_brace = false;
} else {
// found a format string
// discard before opening brace
let (_, r) = remaining.split_at(opening_brace);
// get the fmt pattern and remaining
let (fmt_pattern, r) = r.split_at(bytes_read - opening_brace);
remaining = r;
// discard the braces
let (_, fmt_pattern) = fmt_pattern.split_at(1);
let (fmt_pattern, _) = fmt_pattern.split_at(fmt_pattern.len() - 1);
// use the FmtChunk object to write the formatted string
let fmt = try!(FmtChunk::from_str(fmt_pattern));
match fmt.write(&mut out, vars) {
Ok(_) => {},
Err(err) => match ignore_missing {
true => write!(out, "{{{}}}", fmt_pattern).unwrap(),
false => return Err(err),
}
}
reading_fmt = false;
bytes_read = 0;
}
} else if closing_brace {
return Err(FmtError::Invalid("Single '}' encountered in format string".to_string()));
} else if !reading_fmt {
out.push(c)
} // else we are currently reading a format string, so don't push
}
if closing_brace {
return Err(FmtError::Invalid("Single '}' encountered in format string".to_string()));
} else if reading_fmt {
return Err(FmtError::Invalid("Expected '}' before end of string".to_string()));
}
out.shrink_to_fit();
Ok(out)
}
fixed identifier
use std::str;
use std::fmt::Write;
use std::iter::Iterator;
use std::collections::HashMap;
use std::string::String;
use regex::Regex;
use types::*;
lazy_static!{
pub static ref FMT_PAT: Regex = Regex::new(
// 1-ident 2-fill 3-align 4-sign 5-width 6-precision 7-type
r"^([\w\d-_]+)(?::(.)?([<>^])?([+-])?([([\d]+)?(?:\.([\d]+))?([sbcdoxXneEfFgG%])?)?\z")
.unwrap();
// 1-ident 2-fill 3-align 4-width 5-precision
// r"^([\w\d-_]+)(?::(.)?([<>^])?([\d]+)?(?:\.([\d]+))?)?\z").unwrap();
// if align doesn't exist, width == fill + width
}
fn write_char(s: &mut String, c: char, n: usize) {
for _ in 0..n {
s.push(c);
}
}
#[test]
fn test_write_char() {
let mut s = String::new();
s.write_str("h ").unwrap();
write_char(&mut s, 'f', 3);
assert!(s == "h fff");
}
fn write_from<'a, I>(s: &mut String, f: I, n: usize) -> usize
where I: Iterator<Item = char>
{
// eexaust f or run out of n, return chars written
if n == 0 {
return 0;
}
let mut n_written: usize = 0;
for c in f {
s.push(c);
n_written += 1;
if n_written == n {
return n_written;
}
}
n_written
}
#[test]
fn test_write_from() {
let mut s = String::new();
s.write_str("h ").unwrap();
write_from(&mut s, "fff".chars(), 5);
assert!(s == "h fff");
write_from(&mut s, "xxxx".chars(), 2);
assert!(s == "h fffxx");
write_from(&mut s, "333".chars(), 3);
assert!(s == "h fffxx333");
}
fn is_alignment_token(c: char) -> bool {
match c {
'=' | '<' | '^' | '>' => true,
_ => false,
}
}
fn is_sign_element(c: char) -> bool {
match c {
' ' | '-' | '+' => true,
_ => false,
}
}
fn is_type_element(c: char) -> bool {
match c {
'b' | 'c' | 'd' | 'o' | 'x' | 'X' | 'n' |
'e' | 'E' | 'f' | 'F' | 'g' | 'G' | '%' |
's' | '?' => true,
_ => false,
}
}
// get an integer from pos, returning the number of bytes
// consumed and the integer
fn get_integer(s: &[u8], pos: usize) -> (usize, Option<i64>) {
let (_, rest) = s.split_at(pos);
let mut consumed: usize = 0;
for b in rest {
match *b as char {
'0'...'9' => {},
_ => break,
};
consumed += 1;
}
if consumed == 0 {
(0, None)
} else {
let (intstr, _) = rest.split_at(consumed);
let val = unsafe { // I think I can be reasonably sure that 0-9 chars are utf8 :)
match str::from_utf8_unchecked(intstr).parse::<i64>() {
Ok(v) => Some(v),
Err(_) => None,
}
};
(consumed, val)
}
}
#[derive(Debug)]
/// The format struct as it is defined in the python source
struct FmtPy {
pub fill: char,
pub align: char,
pub alternate: bool,
pub sign: char,
pub width: i64,
pub thousands: bool,
pub precision: i64,
pub ty: char,
}
fn parse_like_python(rest: &str) -> Result<FmtPy> {
/* The rest of this was pretty much strait up copied from python's format parser
All credit goes to python source file: formatter_unicode.c
*/
let mut format = FmtPy {
fill: ' ',
align: '>',
alternate: false,
sign: '\0',
width: -1,
thousands: false,
precision: -1,
ty: ' ',
};
let mut chars = rest.chars();
let fake_fill = match chars.next() {
Some(c) => c,
None => return Ok(format),
};
let (_, rest) = rest.split_at(fake_fill.len_utf8());
// from now on all format characters MUST be valid
// ASCII characters (fill and identifier were the
// only ones that weren't.
// Therefore we can use bytes for the rest
let mut align_specified = false;
let mut fill_specified = false;
let rest = rest.as_bytes();
let end: usize = rest.len();
let mut pos: usize = 0;
/* If the second char is an alignment token,
then parse the fill char */
if end-pos >= 1 && is_alignment_token(rest[0] as char) {
format.align = rest[0] as char;
format.fill = fake_fill;
fill_specified = true;
align_specified = true;
pos += 2;
} else if end-pos == 0 && is_alignment_token(fake_fill) {
format.align = fake_fill;
pos+=1;
}
/* Parse the various sign options */
if end-pos >= 1 && is_sign_element(rest[pos] as char) {
format.sign = rest[pos] as char;
pos+=1;
}
/* If the next character is #, we're in alternate mode. This only
applies to integers. */
if end-pos >= 1 && rest[pos] as char == '#' {
format.alternate = true;
pos+=1;
}
/* The special case for 0-padding (backwards compat) */
if !fill_specified && end-pos >= 1 && rest[pos] == '0' as u8 {
format.fill = '0';
if (!align_specified) {
format.align = '=';
}
pos+=1;
}
// check to make sure that val is good
let (consumed, val) = get_integer(rest, pos);
pos += consumed;
if consumed != 0 {
match val {
None => return Err(FmtError::Invalid("overflow error when parsing width".to_string())),
Some(v) => {
format.width = v;
}
}
}
/* Comma signifies add thousands separators */
if end-pos > 0 && rest[pos] as char == ',' {
format.thousands = true;
pos+=1;
}
/* Parse field precision */
if end-pos > 0 && rest[pos] as char == '.' {
pos+=1;
let (consumed, val) = get_integer(rest, pos);
if consumed != 0 {
match val {
None => return Err(FmtError::Invalid("overflow error when parsing precision"
.to_string())),
Some(v) => {
format.precision = v;
}
}
} else {
/* Not having a precision after a dot is an error. */
if (consumed == 0) {
return Err(FmtError::Invalid("Format specifier missing precision".to_string()));
}
}
}
/* Finally, parse the type field. */
if end-pos > 1 {
/* More than one char remain, invalid format specifier. */
return Err(FmtError::Invalid("Invalid format specifier".to_string()));
}
if end-pos == 1 {
format.ty = rest[pos] as char;
if !is_type_element(format.ty) {
let mut msg = String::new();
write!(msg, "Invalid type specifier: {:?}", format.ty).unwrap();
return Err(FmtError::Invalid(msg));
}
pos+=1;
}
/* Do as much validating as we can, just by looking at the format
specifier. Do not take into account what type of formatting
we're doing (int, float, string). */
if format.thousands {
match format.ty {
'd' | 'e' | 'f' | 'g' | 'E' | 'G' |
'%' | 'F' | '\0' => {}, /* These are allowed. See PEP 378.*/
_ => {
let mut msg = String::new();
write!(msg, "Invalid comma type: {}", format.ty);
return Err(FmtError::Invalid(msg));
}
}
}
Ok(format)
}
impl<'a> FmtChunk<'a> {
/// create FmtChunk from format string
pub fn from_str(s: &'a str) -> Result<FmtChunk> {
let mut found_colon = false;
let mut chars = s.chars();
let mut c = match chars.next() {
Some(':') | None => return Err(
FmtError::Invalid("must specify identifier".to_string())),
Some(c) => c,
};
let mut consumed = 0;
// find the identifier
loop {
consumed += c.len_utf8();
if c == ':' {
found_colon = true;
break;
}
c = match chars.next() {
Some(c) => c,
None => {
break;
}
};
}
let (identifier, rest) = s.split_at(consumed);
println!("iden: {:?} rest: {:?}", identifier, rest);
let identifier = if found_colon {
let (i, _) = identifier.split_at(identifier.len() - 1); // get rid of ':'
i
} else {
identifier
};
let format = try!(parse_like_python(rest));
Ok(FmtChunk{
identifier: identifier,
fill: format.fill,
align: match format.align {
'<' => Align::Left,
'^' => Align::Center,
'>' => Align::Right,
'=' => Align::Equal,
_ => unreachable!(),
},
alternate: format.alternate,
width: match format.width {
-1 => None,
_ => Some(format.width as usize),
},
thousands: format.thousands,
precision: match format.precision {
-1 => None,
_ => Some(format.precision as usize),
},
ty: match format.ty {
' ' => None,
_ => Some(format.ty),
},
})
}
/// write the formatted string to `s` and return true. If there is an error: clear `s`,
/// write the error and return false
pub fn write(&self, s: &mut String, vars: &'a HashMap<String, String>) -> Result<()> {
let ref value = match vars.get(self.identifier) {
Some(v) => v,
None => {
return Err(FmtError::KeyError(self.identifier.to_string()));
}
};
let len = match self.precision {
None => value.len(),
Some(p) => {
if p < value.len() {
p
} else {
value.len()
}
}
};
let mut value = value.chars();
let mut pad: usize = 0;
match self.width {
Some(mut width) => {
if width > len {
match self.align {
Align::Left => pad = width - len,
Align::Center => {
width = width - len;
pad = width / 2;
write_char(s, self.fill, pad);
pad += width % 2;
}
Align::Right => {
write_char(s, self.fill, width - len);
}
Align::Equal => panic!("not yet supported"), // TODO
}
}
}
None => {}
}
if self.precision.is_none() {
s.extend(value);
} else {
write_from(s, &mut value, self.precision.unwrap());
}
write_char(s, self.fill, pad);
Ok(())
}
}
/// UNSTABLE: rust-style format a string given a HashMap of the variables and additional options
/// variables:
/// ignore_missing: if true, ignore missing variables
pub fn strfmt_options(fmtstr: &str, vars: &HashMap<String, String>, ignore_missing: bool) -> Result<String> {
let mut out = String::with_capacity(fmtstr.len() * 2);
let mut bytes_read: usize = 0;
let mut opening_brace: usize = 0;
let mut closing_brace: bool = false;
let mut reading_fmt = false;
let mut remaining = fmtstr;
for c in fmtstr.chars() {
bytes_read += c.len_utf8();
if c == '{' {
if reading_fmt && opening_brace == bytes_read - 2 {
// found {{
out.push(c);
reading_fmt = false;
} else if !reading_fmt {
// found a first {
reading_fmt = true;
opening_brace = bytes_read - 1;
} else {
// found a { after finding an opening brace, error!
out.clear();
out.write_str("extra { found").unwrap();
return Err(FmtError::Invalid(out));
}
} else if c == '}' {
if !reading_fmt && !closing_brace {
// found a '}' that isn't after a '{'
closing_brace = true;
} else if closing_brace {
// found "}}"
out.push(c);
closing_brace = false;
} else {
// found a format string
// discard before opening brace
let (_, r) = remaining.split_at(opening_brace);
// get the fmt pattern and remaining
let (fmt_pattern, r) = r.split_at(bytes_read - opening_brace);
remaining = r;
// discard the braces
let (_, fmt_pattern) = fmt_pattern.split_at(1);
let (fmt_pattern, _) = fmt_pattern.split_at(fmt_pattern.len() - 1);
// use the FmtChunk object to write the formatted string
let fmt = try!(FmtChunk::from_str(fmt_pattern));
match fmt.write(&mut out, vars) {
Ok(_) => {},
Err(err) => match ignore_missing {
true => write!(out, "{{{}}}", fmt_pattern).unwrap(),
false => return Err(err),
}
}
reading_fmt = false;
bytes_read = 0;
}
} else if closing_brace {
return Err(FmtError::Invalid("Single '}' encountered in format string".to_string()));
} else if !reading_fmt {
out.push(c)
} // else we are currently reading a format string, so don't push
}
if closing_brace {
return Err(FmtError::Invalid("Single '}' encountered in format string".to_string()));
} else if reading_fmt {
return Err(FmtError::Invalid("Expected '}' before end of string".to_string()));
}
out.shrink_to_fit();
Ok(out)
}
|
use std::str::from_utf8;
use nom::IResult;
#[derive(Debug,PartialEq,Eq)]
pub struct PosixHeader<'a> {
pub name: & 'a str,
pub mode: & 'a str,
pub uid: & 'a str,
pub gid: & 'a str,
pub size: & 'a str,
pub mtime: & 'a str,
pub chksum: & 'a str,
pub typeflag: char,
pub linkname: & 'a str,
pub ustar: Option<UStarHeader<'a>>
}
#[derive(Debug,PartialEq,Eq)]
pub struct UStarHeader<'a> {
pub magic: & 'a str,
pub version: & 'a str,
pub uname: & 'a str,
pub gname: & 'a str,
pub devmajor: & 'a str,
pub devminor: & 'a str,
pub prefix: & 'a str,
}
#[derive(Debug,PartialEq,Eq)]
pub struct TarEntry<'a> {
pub header: PosixHeader<'a>,
pub contents: & 'a str
}
fn parse_header(i: &[u8]) -> IResult<&[u8], PosixHeader> {
chain!(i,
name: map_res!(take!(100), from_utf8) ~
mode: map_res!(take!(8), from_utf8) ~
uid: map_res!(take!(8), from_utf8) ~
gid: map_res!(take!(8), from_utf8) ~
size: map_res!(take!(12), from_utf8) ~
mtime: map_res!(take!(12), from_utf8) ~
chksum: map_res!(take!(8), from_utf8) ~
typeflag: take!(1) ~
linkname: map_res!(take!(100), from_utf8),
/* TODO: ustar */
||{
PosixHeader {
name: name,
mode: mode,
uid: uid,
gid: gid,
size: size,
mtime: mtime,
chksum: chksum,
typeflag: typeflag[0] as char,
linkname: linkname,
ustar: None
}
}
)
}
fn parse_entry(i: &[u8]) -> IResult<&[u8], TarEntry> {
chain!(i,
header: parse_header,
/* TODO: contents */
||{
TarEntry {
header: header,
contents: ""
}
}
)
}
pub fn parse_tar(i: &[u8]) -> IResult<&[u8], Vec<TarEntry>> {
many0!(i, parse_entry)
}
parse ustart header
Signed-off-by: Marc-Antoine Perennou <07f76cf0511c79b361712839686f3cee8c75791c@Perennou.com>
use std::str::from_utf8;
use nom::IResult;
#[derive(Debug,PartialEq,Eq)]
pub struct PosixHeader<'a> {
pub name: & 'a str,
pub mode: & 'a str,
pub uid: & 'a str,
pub gid: & 'a str,
pub size: & 'a str,
pub mtime: & 'a str,
pub chksum: & 'a str,
pub typeflag: char,
pub linkname: & 'a str,
pub ustar: Option<UStarHeader<'a>>
}
#[derive(Debug,PartialEq,Eq)]
pub struct UStarHeader<'a> {
pub magic: & 'a str,
pub version: & 'a str,
pub uname: & 'a str,
pub gname: & 'a str,
pub devmajor: & 'a str,
pub devminor: & 'a str,
pub prefix: & 'a str,
}
#[derive(Debug,PartialEq,Eq)]
pub struct TarEntry<'a> {
pub header: PosixHeader<'a>,
pub contents: & 'a str
}
fn parse_ustar(i: &[u8]) -> IResult<&[u8], Option<UStarHeader>> {
chain!(i,
magic: map_res!(take!(6), from_utf8) ~
version: map_res!(take!(2), from_utf8) ~
uname: map_res!(take!(32), from_utf8) ~
gname: map_res!(take!(32), from_utf8) ~
devmajor: map_res!(take!(8), from_utf8) ~
devminor: map_res!(take!(8), from_utf8) ~
prefix: map_res!(take!(155), from_utf8),
||{
match magic {
"ustar" => Some(UStarHeader{
magic: magic,
version: version,
uname: uname,
gname: gname,
devmajor: devmajor,
devminor: devminor,
prefix: prefix
}),
_ => None,
}
}
)
}
fn parse_header(i: &[u8]) -> IResult<&[u8], PosixHeader> {
chain!(i,
name: map_res!(take!(100), from_utf8) ~
mode: map_res!(take!(8), from_utf8) ~
uid: map_res!(take!(8), from_utf8) ~
gid: map_res!(take!(8), from_utf8) ~
size: map_res!(take!(12), from_utf8) ~
mtime: map_res!(take!(12), from_utf8) ~
chksum: map_res!(take!(8), from_utf8) ~
typeflag: take!(1) ~
linkname: map_res!(take!(100), from_utf8) ~
ustar: parse_ustar,
||{
PosixHeader {
name: name,
mode: mode,
uid: uid,
gid: gid,
size: size,
mtime: mtime,
chksum: chksum,
typeflag: typeflag[0] as char,
linkname: linkname,
ustar: ustar
}
}
)
}
fn parse_entry(i: &[u8]) -> IResult<&[u8], TarEntry> {
chain!(i,
header: parse_header,
/* TODO: contents */
||{
TarEntry {
header: header,
contents: ""
}
}
)
}
pub fn parse_tar(i: &[u8]) -> IResult<&[u8], Vec<TarEntry>> {
many0!(i, parse_entry)
}
|
use std::char;
use std::collections::{TreeMap, HashSet};
use std::num::FromStrRadix;
use std::str;
use {Array, Table, Value, String, Float, Integer, Boolean, Datetime};
/// Parser for converting a string to a TOML `Value` instance.
///
/// This parser contains the string slice that is being parsed, and exports the
/// list of errors which have occurred during parsing.
pub struct Parser<'a> {
input: &'a str,
cur: str::CharOffsets<'a>,
tables_defined: HashSet<String>,
/// A list of all errors which have occurred during parsing.
///
/// Not all parse errors are fatal, so this list is added to as much as
/// possible without aborting parsing. If `None` is returned by `parse`, it
/// is guaranteed that this list is not empty.
pub errors: Vec<Error>,
}
/// A structure representing a parse error.
///
/// The data in this structure can be used to trace back to the original cause
/// of the error in order to provide diagnostics about parse errors.
#[deriving(Show)]
pub struct Error {
/// The low byte at which this error is pointing at.
pub lo: uint,
/// One byte beyond the last character at which this error is pointing at.
pub hi: uint,
/// A human-readable description explaining what the error is.
pub desc: String,
}
impl<'a> Parser<'a> {
/// Creates a new parser for a string.
///
/// The parser can be executed by invoking the `parse` method.
///
/// # Example
///
/// ```
/// let toml = r#"
/// [test]
/// foo = "bar"
/// "#;
///
/// let mut parser = toml::Parser::new(toml);
/// match parser.parse() {
/// Some(value) => println!("found toml: {}", value),
/// None => {
/// println!("parse errors: {}", parser.errors);
/// }
/// }
/// ```
pub fn new(s: &'a str) -> Parser<'a> {
Parser {
input: s,
cur: s.char_indices(),
errors: Vec::new(),
tables_defined: HashSet::new(),
}
}
/// Converts a byte offset from an error message to a (line, column) pair
///
/// All indexes are 0-based.
pub fn to_linecol(&self, offset: uint) -> (uint, uint) {
let mut cur = 0;
for (i, line) in self.input.lines().enumerate() {
if cur + line.len() > offset {
return (i, offset - cur)
}
cur += line.len() + 1;
}
return (self.input.lines().count(), 0)
}
fn next_pos(&self) -> uint {
self.cur.clone().next().map(|p| p.val0()).unwrap_or(self.input.len())
}
fn eat(&mut self, ch: char) -> bool {
match self.cur.clone().next() {
Some((_, c)) if c == ch => { self.cur.next(); true }
Some(_) | None => false,
}
}
fn expect(&mut self, ch: char) -> bool {
if self.eat(ch) { return true }
let mut it = self.cur.clone();
let lo = it.next().map(|p| p.val0()).unwrap_or(self.input.len());
let hi = it.next().map(|p| p.val0()).unwrap_or(self.input.len());
self.errors.push(Error {
lo: lo,
hi: hi,
desc: match self.cur.clone().next() {
Some((_, c)) => format!("expected `{}`, but found `{}`", ch, c),
None => format!("expected `{}`, but found eof", ch)
}
});
false
}
fn ws(&mut self) {
loop {
match self.cur.clone().next() {
Some((_, '\t')) |
Some((_, ' ')) => { self.cur.next(); }
_ => break,
}
}
}
fn comment(&mut self) {
match self.cur.clone().next() {
Some((_, '#')) => {}
_ => return,
}
for (_, ch) in self.cur {
if ch == '\n' { break }
}
}
/// Executes the parser, parsing the string contained within.
///
/// This function will return the `Table` instance if parsing is successful,
/// or it will return `None` if any parse error or invalid TOML error
/// occurs.
///
/// If an error occurs, the `errors` field of this parser can be consulted
/// to determine the cause of the parse failure.
pub fn parse(&mut self) -> Option<Table> {
let mut ret = TreeMap::new();
loop {
self.ws();
match self.cur.clone().next() {
Some((_, '#')) => { self.comment(); }
Some((_, '\n')) |
Some((_, '\r')) => { self.cur.next(); }
Some((start, '[')) => {
self.cur.next();
let array = self.eat('[');
let mut section = String::new();
for (pos, ch) in self.cur {
if ch == ']' { break }
if ch == '[' {
self.errors.push(Error {
lo: pos,
hi: pos + 1,
desc: format!("section names cannot contain \
a `[` character"),
});
continue
}
section.push_char(ch);
}
if section.len() == 0 {
self.errors.push(Error {
lo: start,
hi: start + if array {3} else {1},
desc: format!("section name must not be empty"),
});
continue
} else if array && !self.expect(']') {
return None
}
let mut table = TreeMap::new();
if !self.values(&mut table) { return None }
if array {
self.insert_array(&mut ret, section, Table(table), start)
} else {
self.insert_table(&mut ret, section, table, start)
}
}
Some(_) => {
if !self.values(&mut ret) { return None }
}
None if self.errors.len() == 0 => return Some(ret),
None => return None,
}
}
}
fn values(&mut self, into: &mut Table) -> bool {
loop {
self.ws();
match self.cur.clone().next() {
Some((_, '#')) => self.comment(),
Some((_, '\n')) |
Some((_, '\r')) => { self.cur.next(); }
Some((_, '[')) => break,
Some((start, _)) => {
let mut key = String::new();
let mut found_eq = false;
for (pos, ch) in self.cur {
match ch {
' ' | '\t' => break,
'=' => { found_eq = true; break }
'\n' => {
self.errors.push(Error {
lo: start,
hi: pos + 1,
desc: format!("keys cannot be defined \
across lines"),
})
}
c => key.push_char(c),
}
}
if !found_eq {
self.ws();
if !self.expect('=') { return false }
}
let value = match self.value() {
Some(value) => value,
None => return false,
};
self.insert(into, key, value, start);
self.ws();
self.comment();
self.eat('\r');
self.eat('\n');
}
None => break,
}
}
return true
}
fn value(&mut self) -> Option<Value> {
self.ws();
match self.cur.clone().next() {
Some((pos, '"')) => self.string(pos),
Some((pos, 't')) |
Some((pos, 'f')) => self.boolean(pos),
Some((pos, '[')) => self.array(pos),
Some((pos, '-')) => self.number_or_datetime(pos),
Some((pos, ch)) if ch.is_digit() => self.number_or_datetime(pos),
_ => {
let mut it = self.cur.clone();
let lo = it.next().map(|p| p.val0()).unwrap_or(self.input.len());
let hi = it.next().map(|p| p.val0()).unwrap_or(self.input.len());
self.errors.push(Error {
lo: lo,
hi: hi,
desc: format!("expected a value"),
});
return None
}
}
}
fn string(&mut self, start: uint) -> Option<Value> {
if !self.expect('"') { return None }
let mut ret = String::new();
loop {
match self.cur.next() {
Some((_, '"')) => break,
Some((pos, '\\')) => {
match escape(self, pos) {
Some(c) => ret.push_char(c),
None => {}
}
}
Some((pos, ch)) if ch < '\u001f' => {
let mut escaped = String::new();
ch.escape_default(|c| escaped.push_char(c));
self.errors.push(Error {
lo: pos,
hi: pos + 1,
desc: format!("control character `{}` must be escaped",
escaped)
});
}
Some((_, ch)) => ret.push_char(ch),
None => {
self.errors.push(Error {
lo: start,
hi: self.input.len(),
desc: format!("unterminated string literal"),
});
return None
}
}
}
return Some(String(ret));
fn escape(me: &mut Parser, pos: uint) -> Option<char> {
match me.cur.next() {
Some((_, 'b')) => Some('\u0008'),
Some((_, 't')) => Some('\u0009'),
Some((_, 'n')) => Some('\u000a'),
Some((_, 'f')) => Some('\u000c'),
Some((_, 'r')) => Some('\u000d'),
Some((_, '"')) => Some('\u0022'),
Some((_, '/')) => Some('\u002f'),
Some((_, '\\')) => Some('\u005c'),
Some((pos, c @ 'u')) |
Some((pos, c @ 'U')) => {
let len = if c == 'u' {4} else {8};
let num = if me.input.is_char_boundary(pos + 1 + len) {
me.input.slice(pos + 1, pos + 1 + len)
} else {
"invalid"
};
match FromStrRadix::from_str_radix(num, 16) {
Some(n) => {
match char::from_u32(n) {
Some(c) => {
for _ in range(0, len) {
me.cur.next();
}
return Some(c)
}
None => {
me.errors.push(Error {
lo: pos + 1,
hi: pos + 5,
desc: format!("codepoint `{:x}` is \
not a valid unicode \
codepoint", n),
})
}
}
}
None => {
me.errors.push(Error {
lo: pos,
hi: pos + 1,
desc: format!("expected {} hex digits \
after a `u` escape", len),
})
}
}
None
}
Some((pos, ch)) => {
let mut escaped = String::new();
ch.escape_default(|c| escaped.push_char(c));
let next_pos = me.next_pos();
me.errors.push(Error {
lo: pos,
hi: next_pos,
desc: format!("unknown string escape: `{}`",
escaped),
});
None
}
None => {
me.errors.push(Error {
lo: pos,
hi: pos + 1,
desc: format!("unterminated escape sequence"),
});
None
}
}
}
}
fn number_or_datetime(&mut self, start: uint) -> Option<Value> {
let negative = self.eat('-');
let mut is_float = false;
loop {
match self.cur.clone().next() {
Some((_, ch)) if ch.is_digit() => { self.cur.next(); }
Some((_, '.')) if !is_float => {
is_float = true;
self.cur.next();
}
Some(_) | None => break,
}
}
let end = self.next_pos();
let ret = if is_float {
if self.input.char_at_reverse(end) == '.' {
None
} else {
from_str::<f64>(self.input.slice(start, end)).map(Float)
}
} else if !negative && self.eat('-') {
self.datetime(start, end + 1)
} else {
from_str::<i64>(self.input.slice(start, end)).map(Integer)
};
if ret.is_none() {
self.errors.push(Error {
lo: start,
hi: end,
desc: format!("invalid numeric literal"),
});
}
return ret;
}
fn boolean(&mut self, start: uint) -> Option<Value> {
let rest = self.input.slice_from(start);
if rest.starts_with("true") {
for _ in range(0u, 4u) {
self.cur.next();
}
Some(Boolean(true))
} else if rest.starts_with("false") {
for _ in range(0u, 5u) {
self.cur.next();
}
Some(Boolean(false))
} else {
let next = self.next_pos();
self.errors.push(Error {
lo: start,
hi: next,
desc: format!("unexpected character: `{}`",
rest.char_at(0)),
});
None
}
}
fn datetime(&mut self, start: uint, end_so_far: uint) -> Option<Value> {
let mut date = self.input.slice(start, end_so_far).to_string();
for _ in range(0u, 15u) {
match self.cur.next() {
Some((_, ch)) => date.push_char(ch),
None => {
self.errors.push(Error {
lo: start,
hi: end_so_far,
desc: format!("malformed date literal"),
});
return None
}
}
}
let mut it = date.as_slice().chars();
let mut valid = true;
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == '-').unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == '-').unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == 'T').unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == ':').unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == ':').unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == 'Z').unwrap_or(false);
if valid {
Some(Datetime(date.clone()))
} else {
self.errors.push(Error {
lo: start,
hi: start + date.len(),
desc: format!("malformed date literal"),
});
None
}
}
fn array(&mut self, _start: uint) -> Option<Value> {
if !self.expect('[') { return None }
let mut ret = Vec::new();
fn consume(me: &mut Parser) {
loop {
me.ws();
match me.cur.clone().next() {
Some((_, '#')) => { me.comment(); }
Some((_, '\n')) |
Some((_, '\r')) => { me.cur.next(); }
_ => break,
}
}
}
let mut type_str = None;
loop {
// Break out early if we see the closing bracket
consume(self);
if self.eat(']') { return Some(Array(ret)) }
// Attempt to parse a value, triggering an error if it's the wrong
// type.
let start = self.next_pos();
let value = match self.value() {
Some(v) => v,
None => return None,
};
let end = self.next_pos();
let expected = type_str.unwrap_or(value.type_str());
if value.type_str() != expected {
self.errors.push(Error {
lo: start,
hi: end,
desc: format!("expected type `{}`, found type `{}`",
expected, value.type_str()),
});
} else {
type_str = Some(expected);
ret.push(value);
}
// Look for a comma. If we don't find one we're done
consume(self);
if !self.eat(',') { break }
}
consume(self);
if !self.expect(']') { return None }
return Some(Array(ret))
}
fn insert(&mut self, into: &mut Table, key: String, value: Value,
key_lo: uint) {
if into.contains_key(&key) {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("duplicate key: `{}`", key),
})
} else {
into.insert(key, value);
}
}
fn recurse<'a>(&mut self, mut cur: &'a mut Table, orig_key: &'a str,
key_lo: uint) -> Option<(&'a mut Table, &'a str)> {
if orig_key.starts_with(".") || orig_key.ends_with(".") ||
orig_key.contains("..") {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + orig_key.len(),
desc: format!("tables cannot have empty names"),
});
return None
}
let key = match orig_key.rfind('.') {
Some(n) => orig_key.slice_to(n),
None => return Some((cur, orig_key)),
};
for part in key.as_slice().split('.') {
let part = part.to_string();
let tmp = cur;
if tmp.contains_key(&part) {
match *tmp.find_mut(&part).unwrap() {
Table(ref mut table) => {
cur = table;
continue
}
Array(ref mut array) => {
match array.as_mut_slice().mut_last() {
Some(&Table(ref mut table)) => cur = table,
_ => {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("array `{}` does not contain \
tables", part)
});
return None
}
}
continue
}
_ => {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("key `{}` was not previously a table",
part)
});
return None
}
}
}
// Initialize an empty table as part of this sub-key
tmp.insert(part.clone(), Table(TreeMap::new()));
match *tmp.find_mut(&part).unwrap() {
Table(ref mut inner) => cur = inner,
_ => unreachable!(),
}
}
return Some((cur, orig_key.slice_from(key.len() + 1)))
}
fn insert_table(&mut self, into: &mut Table, key: String, value: Table,
key_lo: uint) {
if !self.tables_defined.insert(key.clone()) {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("redefinition of table `{}`", key),
});
return
}
let (into, key) = match self.recurse(into, key.as_slice(), key_lo) {
Some(pair) => pair,
None => return,
};
let key = key.to_string();
if !into.contains_key(&key) {
into.insert(key.clone(), Table(TreeMap::new()));
}
match into.find_mut(&key) {
Some(&Table(ref mut table)) => {
for (k, v) in value.move_iter() {
if !table.insert(k.clone(), v) {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("duplicate key `{}` in table", k),
});
}
}
}
Some(_) => {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("duplicate key `{}` in table", key),
});
}
None => {}
}
}
fn insert_array(&mut self, into: &mut Table, key: String, value: Value,
key_lo: uint) {
let (into, key) = match self.recurse(into, key.as_slice(), key_lo) {
Some(pair) => pair,
None => return,
};
let key = key.to_string();
if !into.contains_key(&key) {
into.insert(key.clone(), Array(Vec::new()));
}
match *into.find_mut(&key).unwrap() {
Array(ref mut vec) => {
match vec.as_slice().head() {
Some(ref v) if !v.same_type(&value) => {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("expected type `{}`, found type `{}`",
v.type_str(), value.type_str()),
})
}
Some(..) | None => {}
}
vec.push(value);
}
_ => {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("key `{}` was previously not an array", key),
});
}
}
}
}
#[cfg(test)]
mod tests {
use {Table, Parser};
#[test]
fn crlf() {
let mut p = Parser::new("\
[project]\r\n\
\r\n\
name = \"splay\"\r\n\
version = \"0.1.0\"\r\n\
authors = [\"alex@crichton.co\"]\r\n\
\r\n\
[[lib]]\r\n\
\r\n\
path = \"lib.rs\"\r\n\
name = \"splay\"\r\n\
");
assert!(p.parse().is_some());
}
#[test]
fn linecol() {
let p = Parser::new("ab\ncde\nf");
assert_eq!(p.to_linecol(0), (0, 0));
assert_eq!(p.to_linecol(1), (0, 1));
assert_eq!(p.to_linecol(3), (1, 0));
assert_eq!(p.to_linecol(4), (1, 1));
assert_eq!(p.to_linecol(7), (2, 0));
}
#[test]
fn fun_with_strings() {
let mut p = Parser::new(r#"
[foo]
bar = "\U00000000"
"#);
let table = Table(p.parse().unwrap());
assert_eq!(table.lookup("foo.bar").and_then(|k| k.as_str()), Some("\0"));
}
}
Implement multiline string literals
While I'm at it, implement literal strings as well.
Closes #5
use std::char;
use std::collections::{TreeMap, HashSet};
use std::num::FromStrRadix;
use std::str;
use {Array, Table, Value, String, Float, Integer, Boolean, Datetime};
/// Parser for converting a string to a TOML `Value` instance.
///
/// This parser contains the string slice that is being parsed, and exports the
/// list of errors which have occurred during parsing.
pub struct Parser<'a> {
input: &'a str,
cur: str::CharOffsets<'a>,
tables_defined: HashSet<String>,
/// A list of all errors which have occurred during parsing.
///
/// Not all parse errors are fatal, so this list is added to as much as
/// possible without aborting parsing. If `None` is returned by `parse`, it
/// is guaranteed that this list is not empty.
pub errors: Vec<Error>,
}
/// A structure representing a parse error.
///
/// The data in this structure can be used to trace back to the original cause
/// of the error in order to provide diagnostics about parse errors.
#[deriving(Show)]
pub struct Error {
/// The low byte at which this error is pointing at.
pub lo: uint,
/// One byte beyond the last character at which this error is pointing at.
pub hi: uint,
/// A human-readable description explaining what the error is.
pub desc: String,
}
impl<'a> Parser<'a> {
/// Creates a new parser for a string.
///
/// The parser can be executed by invoking the `parse` method.
///
/// # Example
///
/// ```
/// let toml = r#"
/// [test]
/// foo = "bar"
/// "#;
///
/// let mut parser = toml::Parser::new(toml);
/// match parser.parse() {
/// Some(value) => println!("found toml: {}", value),
/// None => {
/// println!("parse errors: {}", parser.errors);
/// }
/// }
/// ```
pub fn new(s: &'a str) -> Parser<'a> {
Parser {
input: s,
cur: s.char_indices(),
errors: Vec::new(),
tables_defined: HashSet::new(),
}
}
/// Converts a byte offset from an error message to a (line, column) pair
///
/// All indexes are 0-based.
pub fn to_linecol(&self, offset: uint) -> (uint, uint) {
let mut cur = 0;
for (i, line) in self.input.lines().enumerate() {
if cur + line.len() > offset {
return (i, offset - cur)
}
cur += line.len() + 1;
}
return (self.input.lines().count(), 0)
}
fn next_pos(&self) -> uint {
self.cur.clone().next().map(|p| p.val0()).unwrap_or(self.input.len())
}
fn eat(&mut self, ch: char) -> bool {
match self.cur.clone().next() {
Some((_, c)) if c == ch => { self.cur.next(); true }
Some(_) | None => false,
}
}
fn expect(&mut self, ch: char) -> bool {
if self.eat(ch) { return true }
let mut it = self.cur.clone();
let lo = it.next().map(|p| p.val0()).unwrap_or(self.input.len());
let hi = it.next().map(|p| p.val0()).unwrap_or(self.input.len());
self.errors.push(Error {
lo: lo,
hi: hi,
desc: match self.cur.clone().next() {
Some((_, c)) => format!("expected `{}`, but found `{}`", ch, c),
None => format!("expected `{}`, but found eof", ch)
}
});
false
}
fn ws(&mut self) {
loop {
match self.cur.clone().next() {
Some((_, '\t')) |
Some((_, ' ')) => { self.cur.next(); }
_ => break,
}
}
}
fn comment(&mut self) {
match self.cur.clone().next() {
Some((_, '#')) => {}
_ => return,
}
for (_, ch) in self.cur {
if ch == '\n' { break }
}
}
/// Executes the parser, parsing the string contained within.
///
/// This function will return the `Table` instance if parsing is successful,
/// or it will return `None` if any parse error or invalid TOML error
/// occurs.
///
/// If an error occurs, the `errors` field of this parser can be consulted
/// to determine the cause of the parse failure.
pub fn parse(&mut self) -> Option<Table> {
let mut ret = TreeMap::new();
loop {
self.ws();
match self.cur.clone().next() {
Some((_, '#')) => { self.comment(); }
Some((_, '\n')) |
Some((_, '\r')) => { self.cur.next(); }
Some((start, '[')) => {
self.cur.next();
let array = self.eat('[');
let mut section = String::new();
for (pos, ch) in self.cur {
if ch == ']' { break }
if ch == '[' {
self.errors.push(Error {
lo: pos,
hi: pos + 1,
desc: format!("section names cannot contain \
a `[` character"),
});
continue
}
section.push_char(ch);
}
if section.len() == 0 {
self.errors.push(Error {
lo: start,
hi: start + if array {3} else {1},
desc: format!("section name must not be empty"),
});
continue
} else if array && !self.expect(']') {
return None
}
let mut table = TreeMap::new();
if !self.values(&mut table) { return None }
if array {
self.insert_array(&mut ret, section, Table(table), start)
} else {
self.insert_table(&mut ret, section, table, start)
}
}
Some(_) => {
if !self.values(&mut ret) { return None }
}
None if self.errors.len() == 0 => return Some(ret),
None => return None,
}
}
}
fn values(&mut self, into: &mut Table) -> bool {
loop {
self.ws();
match self.cur.clone().next() {
Some((_, '#')) => self.comment(),
Some((_, '\n')) |
Some((_, '\r')) => { self.cur.next(); }
Some((_, '[')) => break,
Some((start, _)) => {
let mut key = String::new();
let mut found_eq = false;
for (pos, ch) in self.cur {
match ch {
' ' | '\t' => break,
'=' => { found_eq = true; break }
'\n' => {
self.errors.push(Error {
lo: start,
hi: pos + 1,
desc: format!("keys cannot be defined \
across lines"),
})
}
c => key.push_char(c),
}
}
if !found_eq {
self.ws();
if !self.expect('=') { return false }
}
let value = match self.value() {
Some(value) => value,
None => return false,
};
self.insert(into, key, value, start);
self.ws();
self.comment();
self.eat('\r');
self.eat('\n');
}
None => break,
}
}
return true
}
fn value(&mut self) -> Option<Value> {
self.ws();
match self.cur.clone().next() {
Some((pos, '"')) => self.string(pos),
Some((pos, '\'')) => self.literal_string(pos),
Some((pos, 't')) |
Some((pos, 'f')) => self.boolean(pos),
Some((pos, '[')) => self.array(pos),
Some((pos, '-')) => self.number_or_datetime(pos),
Some((pos, ch)) if ch.is_digit() => self.number_or_datetime(pos),
_ => {
let mut it = self.cur.clone();
let lo = it.next().map(|p| p.val0()).unwrap_or(self.input.len());
let hi = it.next().map(|p| p.val0()).unwrap_or(self.input.len());
self.errors.push(Error {
lo: lo,
hi: hi,
desc: format!("expected a value"),
});
return None
}
}
}
fn string(&mut self, start: uint) -> Option<Value> {
if !self.expect('"') { return None }
let mut multiline = false;
let mut ret = String::new();
// detect multiline literals
if self.eat('"') {
multiline = true;
if !self.expect('"') { return None }
self.eat('\n');
}
loop {
match self.cur.next() {
Some((_, '"')) => {
if multiline {
if !self.eat('"') { ret.push_str("\""); continue }
if !self.eat('"') { ret.push_str("\"\""); continue }
}
break
}
Some((pos, '\\')) => {
match escape(self, pos, multiline) {
Some(c) => ret.push_char(c),
None => {}
}
}
Some((_, '\n')) if multiline => ret.push_char('\n'),
Some((pos, ch)) if ch < '\u001f' => {
let mut escaped = String::new();
ch.escape_default(|c| escaped.push_char(c));
self.errors.push(Error {
lo: pos,
hi: pos + 1,
desc: format!("control character `{}` must be escaped",
escaped)
});
}
Some((_, ch)) => ret.push_char(ch),
None => {
self.errors.push(Error {
lo: start,
hi: self.input.len(),
desc: format!("unterminated string literal"),
});
return None
}
}
}
return Some(String(ret));
fn escape(me: &mut Parser, pos: uint, multiline: bool) -> Option<char> {
match me.cur.next() {
Some((_, 'b')) => Some('\u0008'),
Some((_, 't')) => Some('\u0009'),
Some((_, 'n')) => Some('\u000a'),
Some((_, 'f')) => Some('\u000c'),
Some((_, 'r')) => Some('\u000d'),
Some((_, '"')) => Some('\u0022'),
Some((_, '/')) => Some('\u002f'),
Some((_, '\\')) => Some('\u005c'),
Some((pos, c @ 'u')) |
Some((pos, c @ 'U')) => {
let len = if c == 'u' {4} else {8};
let num = if me.input.is_char_boundary(pos + 1 + len) {
me.input.slice(pos + 1, pos + 1 + len)
} else {
"invalid"
};
match FromStrRadix::from_str_radix(num, 16) {
Some(n) => {
match char::from_u32(n) {
Some(c) => {
for _ in range(0, len) {
me.cur.next();
}
return Some(c)
}
None => {
me.errors.push(Error {
lo: pos + 1,
hi: pos + 5,
desc: format!("codepoint `{:x}` is \
not a valid unicode \
codepoint", n),
})
}
}
}
None => {
me.errors.push(Error {
lo: pos,
hi: pos + 1,
desc: format!("expected {} hex digits \
after a `u` escape", len),
})
}
}
None
}
Some((_, '\n')) if multiline => {
loop {
match me.cur.clone().next() {
Some((_, '\t')) |
Some((_, ' ')) |
Some((_, '\n')) => { me.cur.next(); }
_ => break
}
}
None
}
Some((pos, ch)) => {
let mut escaped = String::new();
ch.escape_default(|c| escaped.push_char(c));
let next_pos = me.next_pos();
me.errors.push(Error {
lo: pos,
hi: next_pos,
desc: format!("unknown string escape: `{}`",
escaped),
});
None
}
None => {
me.errors.push(Error {
lo: pos,
hi: pos + 1,
desc: format!("unterminated escape sequence"),
});
None
}
}
}
}
fn literal_string(&mut self, start: uint) -> Option<Value> {
if !self.expect('\'') { return None }
let mut multiline = false;
let mut ret = String::new();
// detect multiline literals
if self.eat('\'') {
multiline = true;
if !self.expect('\'') { return None }
self.eat('\n');
}
loop {
match self.cur.next() {
Some((_, '\'')) => {
if multiline {
if !self.eat('\'') { ret.push_str("'"); continue }
if !self.eat('\'') { ret.push_str("''"); continue }
}
break
}
Some((_, ch)) => ret.push_char(ch),
None => {
self.errors.push(Error {
lo: start,
hi: self.input.len(),
desc: format!("unterminated string literal"),
});
return None
}
}
}
return Some(String(ret));
}
fn number_or_datetime(&mut self, start: uint) -> Option<Value> {
let negative = self.eat('-');
let mut is_float = false;
loop {
match self.cur.clone().next() {
Some((_, ch)) if ch.is_digit() => { self.cur.next(); }
Some((_, '.')) if !is_float => {
is_float = true;
self.cur.next();
}
Some(_) | None => break,
}
}
let end = self.next_pos();
let ret = if is_float {
if self.input.char_at_reverse(end) == '.' {
None
} else {
from_str::<f64>(self.input.slice(start, end)).map(Float)
}
} else if !negative && self.eat('-') {
self.datetime(start, end + 1)
} else {
from_str::<i64>(self.input.slice(start, end)).map(Integer)
};
if ret.is_none() {
self.errors.push(Error {
lo: start,
hi: end,
desc: format!("invalid numeric literal"),
});
}
return ret;
}
fn boolean(&mut self, start: uint) -> Option<Value> {
let rest = self.input.slice_from(start);
if rest.starts_with("true") {
for _ in range(0u, 4u) {
self.cur.next();
}
Some(Boolean(true))
} else if rest.starts_with("false") {
for _ in range(0u, 5u) {
self.cur.next();
}
Some(Boolean(false))
} else {
let next = self.next_pos();
self.errors.push(Error {
lo: start,
hi: next,
desc: format!("unexpected character: `{}`",
rest.char_at(0)),
});
None
}
}
fn datetime(&mut self, start: uint, end_so_far: uint) -> Option<Value> {
let mut date = self.input.slice(start, end_so_far).to_string();
for _ in range(0u, 15u) {
match self.cur.next() {
Some((_, ch)) => date.push_char(ch),
None => {
self.errors.push(Error {
lo: start,
hi: end_so_far,
desc: format!("malformed date literal"),
});
return None
}
}
}
let mut it = date.as_slice().chars();
let mut valid = true;
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == '-').unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == '-').unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == 'T').unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == ':').unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == ':').unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c.is_digit()).unwrap_or(false);
valid = valid && it.next().map(|c| c == 'Z').unwrap_or(false);
if valid {
Some(Datetime(date.clone()))
} else {
self.errors.push(Error {
lo: start,
hi: start + date.len(),
desc: format!("malformed date literal"),
});
None
}
}
fn array(&mut self, _start: uint) -> Option<Value> {
if !self.expect('[') { return None }
let mut ret = Vec::new();
fn consume(me: &mut Parser) {
loop {
me.ws();
match me.cur.clone().next() {
Some((_, '#')) => { me.comment(); }
Some((_, '\n')) |
Some((_, '\r')) => { me.cur.next(); }
_ => break,
}
}
}
let mut type_str = None;
loop {
// Break out early if we see the closing bracket
consume(self);
if self.eat(']') { return Some(Array(ret)) }
// Attempt to parse a value, triggering an error if it's the wrong
// type.
let start = self.next_pos();
let value = match self.value() {
Some(v) => v,
None => return None,
};
let end = self.next_pos();
let expected = type_str.unwrap_or(value.type_str());
if value.type_str() != expected {
self.errors.push(Error {
lo: start,
hi: end,
desc: format!("expected type `{}`, found type `{}`",
expected, value.type_str()),
});
} else {
type_str = Some(expected);
ret.push(value);
}
// Look for a comma. If we don't find one we're done
consume(self);
if !self.eat(',') { break }
}
consume(self);
if !self.expect(']') { return None }
return Some(Array(ret))
}
fn insert(&mut self, into: &mut Table, key: String, value: Value,
key_lo: uint) {
if into.contains_key(&key) {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("duplicate key: `{}`", key),
})
} else {
into.insert(key, value);
}
}
fn recurse<'a>(&mut self, mut cur: &'a mut Table, orig_key: &'a str,
key_lo: uint) -> Option<(&'a mut Table, &'a str)> {
if orig_key.starts_with(".") || orig_key.ends_with(".") ||
orig_key.contains("..") {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + orig_key.len(),
desc: format!("tables cannot have empty names"),
});
return None
}
let key = match orig_key.rfind('.') {
Some(n) => orig_key.slice_to(n),
None => return Some((cur, orig_key)),
};
for part in key.as_slice().split('.') {
let part = part.to_string();
let tmp = cur;
if tmp.contains_key(&part) {
match *tmp.find_mut(&part).unwrap() {
Table(ref mut table) => {
cur = table;
continue
}
Array(ref mut array) => {
match array.as_mut_slice().mut_last() {
Some(&Table(ref mut table)) => cur = table,
_ => {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("array `{}` does not contain \
tables", part)
});
return None
}
}
continue
}
_ => {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("key `{}` was not previously a table",
part)
});
return None
}
}
}
// Initialize an empty table as part of this sub-key
tmp.insert(part.clone(), Table(TreeMap::new()));
match *tmp.find_mut(&part).unwrap() {
Table(ref mut inner) => cur = inner,
_ => unreachable!(),
}
}
return Some((cur, orig_key.slice_from(key.len() + 1)))
}
fn insert_table(&mut self, into: &mut Table, key: String, value: Table,
key_lo: uint) {
if !self.tables_defined.insert(key.clone()) {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("redefinition of table `{}`", key),
});
return
}
let (into, key) = match self.recurse(into, key.as_slice(), key_lo) {
Some(pair) => pair,
None => return,
};
let key = key.to_string();
if !into.contains_key(&key) {
into.insert(key.clone(), Table(TreeMap::new()));
}
match into.find_mut(&key) {
Some(&Table(ref mut table)) => {
for (k, v) in value.move_iter() {
if !table.insert(k.clone(), v) {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("duplicate key `{}` in table", k),
});
}
}
}
Some(_) => {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("duplicate key `{}` in table", key),
});
}
None => {}
}
}
fn insert_array(&mut self, into: &mut Table, key: String, value: Value,
key_lo: uint) {
let (into, key) = match self.recurse(into, key.as_slice(), key_lo) {
Some(pair) => pair,
None => return,
};
let key = key.to_string();
if !into.contains_key(&key) {
into.insert(key.clone(), Array(Vec::new()));
}
match *into.find_mut(&key).unwrap() {
Array(ref mut vec) => {
match vec.as_slice().head() {
Some(ref v) if !v.same_type(&value) => {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("expected type `{}`, found type `{}`",
v.type_str(), value.type_str()),
})
}
Some(..) | None => {}
}
vec.push(value);
}
_ => {
self.errors.push(Error {
lo: key_lo,
hi: key_lo + key.len(),
desc: format!("key `{}` was previously not an array", key),
});
}
}
}
}
#[cfg(test)]
mod tests {
use {Table, Parser};
#[test]
fn crlf() {
let mut p = Parser::new("\
[project]\r\n\
\r\n\
name = \"splay\"\r\n\
version = \"0.1.0\"\r\n\
authors = [\"alex@crichton.co\"]\r\n\
\r\n\
[[lib]]\r\n\
\r\n\
path = \"lib.rs\"\r\n\
name = \"splay\"\r\n\
");
assert!(p.parse().is_some());
}
#[test]
fn linecol() {
let p = Parser::new("ab\ncde\nf");
assert_eq!(p.to_linecol(0), (0, 0));
assert_eq!(p.to_linecol(1), (0, 1));
assert_eq!(p.to_linecol(3), (1, 0));
assert_eq!(p.to_linecol(4), (1, 1));
assert_eq!(p.to_linecol(7), (2, 0));
}
#[test]
fn fun_with_strings() {
let mut p = Parser::new(r#"
bar = "\U00000000"
key1 = "One\nTwo"
key2 = """One\nTwo"""
key3 = """
One
Two"""
key4 = "The quick brown fox jumps over the lazy dog."
key5 = """
The quick brown \
fox jumps over \
the lazy dog."""
key6 = """\
The quick brown \
fox jumps over \
the lazy dog.\
"""
# What you see is what you get.
winpath = 'C:\Users\nodejs\templates'
winpath2 = '\\ServerX\admin$\system32\'
quoted = 'Tom "Dubs" Preston-Werner'
regex = '<\i\c*\s*>'
regex2 = '''I [dw]on't need \d{2} apples'''
lines = '''
The first newline is
trimmed in raw strings.
All other whitespace
is preserved.
'''
"#);
let table = Table(p.parse().unwrap());
assert_eq!(table.lookup("bar").and_then(|k| k.as_str()), Some("\0"));
assert_eq!(table.lookup("key1").and_then(|k| k.as_str()),
Some("One\nTwo"));
assert_eq!(table.lookup("key2").and_then(|k| k.as_str()),
Some("One\nTwo"));
assert_eq!(table.lookup("key3").and_then(|k| k.as_str()),
Some("One\nTwo"));
let msg = "The quick brown fox jumps over the lazy dog.";
assert_eq!(table.lookup("key4").and_then(|k| k.as_str()), Some(msg));
assert_eq!(table.lookup("key5").and_then(|k| k.as_str()), Some(msg));
assert_eq!(table.lookup("key6").and_then(|k| k.as_str()), Some(msg));
assert_eq!(table.lookup("winpath").and_then(|k| k.as_str()),
Some(r"C:\Users\nodejs\templates"));
assert_eq!(table.lookup("winpath2").and_then(|k| k.as_str()),
Some(r"\\ServerX\admin$\system32\"));
assert_eq!(table.lookup("quoted").and_then(|k| k.as_str()),
Some(r#"Tom "Dubs" Preston-Werner"#));
assert_eq!(table.lookup("regex").and_then(|k| k.as_str()),
Some(r"<\i\c*\s*>"));
assert_eq!(table.lookup("regex2").and_then(|k| k.as_str()),
Some(r"I [dw]on't need \d{2} apples"));
assert_eq!(table.lookup("lines").and_then(|k| k.as_str()),
Some("The first newline is\n\
trimmed in raw strings.\n \
All other whitespace\n \
is preserved.\n"));
}
}
|
#![allow(dead_code)]
bitflags! {
flags KeyState: u8 {
const LEFT = 0b1,
const RIGHT = 0b10,
const UP = 0b100,
const DOWN = 0b1000,
}
}
pub enum State {
Bitten(f64),
Normal,
}
pub struct Player {
pub pos: [f64, ..2],
pub vel: [f64, ..2],
pub key_state: KeyState,
pub time_since_last_frame_update: f64,
pub frame: uint,
pub state: State,
}
impl Player {
pub fn new(pos: [f64, ..2]) -> Player {
Player {
pos: pos,
vel: [0.0, 0.0],
key_state: KeyState::empty(),
time_since_last_frame_update: 0.0,
frame: 0,
state: State::Normal,
}
}
}
pub fn update_player(dt: f64) {
use current_stream;
use current_player;
use current_rocks;
use piston::vecmath::vec2_add as add;
use piston::vecmath::vec2_scale as scale;
use piston::vecmath::vec2_sub as sub;
use piston::vecmath::vec2_len as len;
use piston::vecmath::vec2_square_len as square_len;
use settings::WATER_FRICTION;
use settings::player::{ ACC, FRAME_INTERVAL, FRAMES, SPEEDUP };
use std::num::Float;
let dt = dt * SPEEDUP;
let stream = &mut *current_stream();
let player = &mut *current_player();
let rocks = &mut *current_rocks();
let friction = WATER_FRICTION;
player.time_since_last_frame_update += dt;
if player.time_since_last_frame_update > FRAME_INTERVAL {
if player.key_state.contains(UP) {
player.frame = (player.frame + FRAMES.len() - 1) % FRAMES.len();
} else {
player.frame = (player.frame + 1) % FRAMES.len();
}
player.time_since_last_frame_update -= FRAME_INTERVAL;
}
player.state = match player.state {
State::Normal => State::Normal,
State::Bitten(sec) => {
let new_sec = sec - dt;
if new_sec < 0.0 {
State::Normal
} else {
State::Bitten(new_sec)
}
}
};
let mut acc: [f64, ..2] = [0.0, 0.0];
if player.key_state.contains(LEFT) {
acc[0] -= ACC;
}
if player.key_state.contains(RIGHT) {
acc[0] += ACC;
}
if player.key_state.contains(DOWN) {
acc[1] += ACC;
}
if player.key_state.contains(UP) {
acc[1] -= ACC;
}
let next_vel = add(player.vel, scale(acc, dt));
let next_vel_square_len = square_len(next_vel);
let drag = 1.0 / (next_vel_square_len * friction).exp();
let next_vel = scale(next_vel, drag);
let avg_vel = scale(add(player.vel, next_vel), 0.5);
let dir = stream.at(player.pos);
let next_pos = add(player.pos, add(scale(dir, dt), scale(avg_vel, dt)));
let mut hits_rock = false;
let rock_radius = ::settings::rocks::RADIUS;
for rock in rocks.rocks.iter() {
let diff = sub(rock.pos, next_pos);
if len(diff) < rock_radius {
hits_rock = true;
break;
}
}
if !hits_rock {
player.pos = next_pos;
}
}
Created `acceleration` method
#![allow(dead_code)]
bitflags! {
flags KeyState: u8 {
const LEFT = 0b1,
const RIGHT = 0b10,
const UP = 0b100,
const DOWN = 0b1000,
}
}
impl KeyState {
pub fn acceleration(&self, d: f64) -> [f64, ..2] {
let mut acc = [0.0, ..2];
if self.contains(LEFT) {
acc[0] -= d;
}
if self.contains(RIGHT) {
acc[0] += d;
}
if self.contains(DOWN) {
acc[1] += d;
}
if self.contains(UP) {
acc[1] -= d;
}
acc
}
}
pub enum State {
Bitten(f64),
Normal,
}
pub struct Player {
pub pos: [f64, ..2],
pub vel: [f64, ..2],
pub key_state: KeyState,
pub time_since_last_frame_update: f64,
pub frame: uint,
pub state: State,
}
impl Player {
pub fn new(pos: [f64, ..2]) -> Player {
Player {
pos: pos,
vel: [0.0, 0.0],
key_state: KeyState::empty(),
time_since_last_frame_update: 0.0,
frame: 0,
state: State::Normal,
}
}
}
pub fn update_player(dt: f64) {
use current_stream;
use current_player;
use current_rocks;
use piston::vecmath::vec2_add as add;
use piston::vecmath::vec2_scale as scale;
use piston::vecmath::vec2_sub as sub;
use piston::vecmath::vec2_len as len;
use piston::vecmath::vec2_square_len as square_len;
use settings::WATER_FRICTION;
use settings::player::{ ACC, FRAME_INTERVAL, FRAMES, SPEEDUP };
use std::num::Float;
let dt = dt * SPEEDUP;
let stream = &mut *current_stream();
let player = &mut *current_player();
let rocks = &mut *current_rocks();
let friction = WATER_FRICTION;
player.time_since_last_frame_update += dt;
if player.time_since_last_frame_update > FRAME_INTERVAL {
if player.key_state.contains(UP) {
player.frame = (player.frame + FRAMES.len() - 1) % FRAMES.len();
} else {
player.frame = (player.frame + 1) % FRAMES.len();
}
player.time_since_last_frame_update -= FRAME_INTERVAL;
}
player.state = match player.state {
State::Normal => State::Normal,
State::Bitten(sec) => {
let new_sec = sec - dt;
if new_sec < 0.0 {
State::Normal
} else {
State::Bitten(new_sec)
}
}
};
let acc = player.key_state.acceleration(ACC);
let next_vel = add(player.vel, scale(acc, dt));
let next_vel_square_len = square_len(next_vel);
let drag = 1.0 / (next_vel_square_len * friction).exp();
let next_vel = scale(next_vel, drag);
let avg_vel = scale(add(player.vel, next_vel), 0.5);
let dir = stream.at(player.pos);
let next_pos = add(player.pos, add(scale(dir, dt), scale(avg_vel, dt)));
let mut hits_rock = false;
let rock_radius = ::settings::rocks::RADIUS;
for rock in rocks.rocks.iter() {
let diff = sub(rock.pos, next_pos);
if len(diff) < rock_radius {
hits_rock = true;
break;
}
}
if !hits_rock {
player.pos = next_pos;
}
}
|
//! An Asteroids-ish example game to show off ggez.
//! The idea is that this game is simple but still
//! non-trivial enough to be interesting.
extern crate ggez;
extern crate nalgebra;
extern crate rand;
use ggez::audio;
use ggez::conf;
use ggez::event::{self, EventHandler, KeyCode, KeyMods};
use ggez::graphics;
use ggez::nalgebra as na;
use ggez::timer;
use ggez::{Context, ContextBuilder, GameResult};
use std::env;
use std::path;
type Point2 = nalgebra::Point2<f32>;
type Vector2 = nalgebra::Vector2<f32>;
/// *********************************************************************
/// Basic stuff, make some helpers for vector functions.
/// ggez includes the nalgebra math library to provide lots of
/// math stuff We just add some helpers.
/// **********************************************************************
/// Create a unit vector representing the
/// given angle (in radians)
fn vec_from_angle(angle: f32) -> Vector2 {
let vx = angle.sin();
let vy = angle.cos();
Vector2::new(vx, vy)
}
/// Just makes a random `Vector2` with the given max magnitude.
fn random_vec(max_magnitude: f32) -> Vector2 {
let angle = rand::random::<f32>() * 2.0 * std::f32::consts::PI;
let mag = rand::random::<f32>() * max_magnitude;
vec_from_angle(angle) * (mag)
}
/// *********************************************************************
/// Now we define our Actor's.
/// An Actor is anything in the game world.
/// We're not *quite* making a real entity-component system but it's
/// pretty close. For a more complicated game you would want a
/// real ECS, but for this it's enough to say that all our game objects
/// contain pretty much the same data.
/// **********************************************************************
#[derive(Debug)]
enum ActorType {
Player,
Rock,
Shot,
}
#[derive(Debug)]
struct Actor {
tag: ActorType,
pos: Point2,
facing: f32,
velocity: Vector2,
ang_vel: f32,
bbox_size: f32,
// I am going to lazily overload "life" with a
// double meaning:
// for shots, it is the time left to live,
// for players and rocks, it is the actual hit points.
life: f32,
}
const PLAYER_LIFE: f32 = 1.0;
const SHOT_LIFE: f32 = 2.0;
const ROCK_LIFE: f32 = 1.0;
const PLAYER_BBOX: f32 = 12.0;
const ROCK_BBOX: f32 = 12.0;
const SHOT_BBOX: f32 = 6.0;
const MAX_ROCK_VEL: f32 = 50.0;
/// *********************************************************************
/// Now we have some constructor functions for different game objects.
/// **********************************************************************
fn create_player() -> Actor {
Actor {
tag: ActorType::Player,
pos: Point2::origin(),
facing: 0.,
velocity: na::zero(),
ang_vel: 0.,
bbox_size: PLAYER_BBOX,
life: PLAYER_LIFE,
}
}
fn create_rock() -> Actor {
Actor {
tag: ActorType::Rock,
pos: Point2::origin(),
facing: 0.,
velocity: na::zero(),
ang_vel: 0.,
bbox_size: ROCK_BBOX,
life: ROCK_LIFE,
}
}
fn create_shot() -> Actor {
Actor {
tag: ActorType::Shot,
pos: Point2::origin(),
facing: 0.,
velocity: na::zero(),
ang_vel: SHOT_ANG_VEL,
bbox_size: SHOT_BBOX,
life: SHOT_LIFE,
}
}
/// Create the given number of rocks.
/// Makes sure that none of them are within the
/// given exclusion zone (nominally the player)
/// Note that this *could* create rocks outside the
/// bounds of the playing field, so it should be
/// called before `wrap_actor_position()` happens.
fn create_rocks(num: i32, exclusion: Point2, min_radius: f32, max_radius: f32) -> Vec<Actor> {
assert!(max_radius > min_radius);
let new_rock = |_| {
let mut rock = create_rock();
let r_angle = rand::random::<f32>() * 2.0 * std::f32::consts::PI;
let r_distance = rand::random::<f32>() * (max_radius - min_radius) + min_radius;
rock.pos = exclusion + vec_from_angle(r_angle) * r_distance;
rock.velocity = random_vec(MAX_ROCK_VEL);
rock
};
(0..num).map(new_rock).collect()
}
/// *********************************************************************
/// Now we make functions to handle physics. We do simple Newtonian
/// physics (so we do have inertia), and cap the max speed so that we
/// don't have to worry too much about small objects clipping through
/// each other.
///
/// Our unit of world space is simply pixels, though we do transform
/// the coordinate system so that +y is up and -y is down.
/// **********************************************************************
const SHOT_SPEED: f32 = 200.0;
const SHOT_ANG_VEL: f32 = 0.1;
// Acceleration in pixels per second.
const PLAYER_THRUST: f32 = 100.0;
// Rotation in radians per second.
const PLAYER_TURN_RATE: f32 = 3.0;
// Seconds between shots
const PLAYER_SHOT_TIME: f32 = 0.5;
fn player_handle_input(actor: &mut Actor, input: &InputState, dt: f32) {
actor.facing += dt * PLAYER_TURN_RATE * input.xaxis;
if input.yaxis > 0.0 {
player_thrust(actor, dt);
}
}
fn player_thrust(actor: &mut Actor, dt: f32) {
let direction_vector = vec_from_angle(actor.facing);
let thrust_vector = direction_vector * (PLAYER_THRUST);
actor.velocity += thrust_vector * (dt);
}
const MAX_PHYSICS_VEL: f32 = 250.0;
fn update_actor_position(actor: &mut Actor, dt: f32) {
// Clamp the velocity to the max efficiently
let norm_sq = actor.velocity.norm_squared();
if norm_sq > MAX_PHYSICS_VEL.powi(2) {
actor.velocity = actor.velocity / norm_sq.sqrt() * MAX_PHYSICS_VEL;
}
let dv = actor.velocity * (dt);
actor.pos += dv;
actor.facing += actor.ang_vel;
}
/// Takes an actor and wraps its position to the bounds of the
/// screen, so if it goes off the left side of the screen it
/// will re-enter on the right side and so on.
fn wrap_actor_position(actor: &mut Actor, sx: f32, sy: f32) {
// Wrap screen
let screen_x_bounds = sx / 2.0;
let screen_y_bounds = sy / 2.0;
if actor.pos.x > screen_x_bounds {
actor.pos.x -= sx;
} else if actor.pos.x < -screen_x_bounds {
actor.pos.x += sx;
};
if actor.pos.y > screen_y_bounds {
actor.pos.y -= sy;
} else if actor.pos.y < -screen_y_bounds {
actor.pos.y += sy;
}
}
fn handle_timed_life(actor: &mut Actor, dt: f32) {
actor.life -= dt;
}
/// Translates the world coordinate system, which
/// has Y pointing up and the origin at the center,
/// to the screen coordinate system, which has Y
/// pointing downward and the origin at the top-left,
fn world_to_screen_coords(screen_width: f32, screen_height: f32, point: Point2) -> Point2 {
let x = point.x + screen_width / 2.0;
let y = screen_height - (point.y + screen_height / 2.0);
Point2::new(x, y)
}
/// **********************************************************************
/// So that was the real meat of our game. Now we just need a structure
/// to contain the images, sounds, etc. that we need to hang on to; this
/// is our "asset management system". All the file names and such are
/// just hard-coded.
/// **********************************************************************
struct Assets {
player_image: graphics::Image,
shot_image: graphics::Image,
rock_image: graphics::Image,
font: graphics::Font,
shot_sound: audio::Source,
hit_sound: audio::Source,
}
impl Assets {
fn new(ctx: &mut Context) -> GameResult<Assets> {
let player_image = graphics::Image::new(ctx, "/player.png")?;
let shot_image = graphics::Image::new(ctx, "/shot.png")?;
let rock_image = graphics::Image::new(ctx, "/rock.png")?;
let font = graphics::Font::new(ctx, "/DejaVuSerif.ttf")?;
let shot_sound = audio::Source::new(ctx, "/pew.ogg")?;
let hit_sound = audio::Source::new(ctx, "/boom.ogg")?;
Ok(Assets {
player_image,
shot_image,
rock_image,
font,
shot_sound,
hit_sound,
})
}
fn actor_image(&mut self, actor: &Actor) -> &mut graphics::Image {
match actor.tag {
ActorType::Player => &mut self.player_image,
ActorType::Rock => &mut self.rock_image,
ActorType::Shot => &mut self.shot_image,
}
}
}
/// **********************************************************************
/// The `InputState` is exactly what it sounds like, it just keeps track of
/// the user's input state so that we turn keyboard events into something
/// state-based and device-independent.
/// **********************************************************************
#[derive(Debug)]
struct InputState {
xaxis: f32,
yaxis: f32,
fire: bool,
}
impl Default for InputState {
fn default() -> Self {
InputState {
xaxis: 0.0,
yaxis: 0.0,
fire: false,
}
}
}
/// **********************************************************************
/// Now we're getting into the actual game loop. The `MainState` is our
/// game's "global" state, it keeps track of everything we need for
/// actually running the game.
///
/// Our game objects are simply a vector for each actor type, and we
/// probably mingle gameplay-state (like score) and hardware-state
/// (like `input`) a little more than we should, but for something
/// this small it hardly matters.
/// **********************************************************************
struct MainState {
player: Actor,
shots: Vec<Actor>,
rocks: Vec<Actor>,
level: i32,
score: i32,
assets: Assets,
screen_width: f32,
screen_height: f32,
input: InputState,
player_shot_timeout: f32,
}
impl MainState {
fn new(ctx: &mut Context) -> GameResult<MainState> {
println!("Game resource path: {:?}", ctx.filesystem);
print_instructions();
let assets = Assets::new(ctx)?;
// let score_disp = graphics::Text::new(ctx, "score", &assets.font)?;
// let level_disp = graphics::Text::new(ctx, "level", &assets.font)?;
let player = create_player();
let rocks = create_rocks(5, player.pos, 100.0, 250.0);
let s = MainState {
player,
shots: Vec::new(),
rocks,
level: 0,
score: 0,
assets,
screen_width: ctx.conf.window_mode.width,
screen_height: ctx.conf.window_mode.height,
input: InputState::default(),
player_shot_timeout: 0.0,
};
Ok(s)
}
fn fire_player_shot(&mut self) {
self.player_shot_timeout = PLAYER_SHOT_TIME;
let player = &self.player;
let mut shot = create_shot();
shot.pos = player.pos;
shot.facing = player.facing;
let direction = vec_from_angle(shot.facing);
shot.velocity.x = SHOT_SPEED * direction.x;
shot.velocity.y = SHOT_SPEED * direction.y;
self.shots.push(shot);
let _ = self.assets.shot_sound.play();
}
fn clear_dead_stuff(&mut self) {
self.shots.retain(|s| s.life > 0.0);
self.rocks.retain(|r| r.life > 0.0);
}
fn handle_collisions(&mut self) {
for rock in &mut self.rocks {
let pdistance = rock.pos - self.player.pos;
if pdistance.norm() < (self.player.bbox_size + rock.bbox_size) {
self.player.life = 0.0;
}
for shot in &mut self.shots {
let distance = shot.pos - rock.pos;
if distance.norm() < (shot.bbox_size + rock.bbox_size) {
shot.life = 0.0;
rock.life = 0.0;
self.score += 1;
let _ = self.assets.hit_sound.play();
}
}
}
}
fn check_for_level_respawn(&mut self) {
if self.rocks.is_empty() {
self.level += 1;
let r = create_rocks(self.level + 5, self.player.pos, 100.0, 250.0);
self.rocks.extend(r);
}
}
// fn update_ui(&mut self, ctx: &mut Context) {
// let score_str = format!("Score: {}", self.score);
// let level_str = format!("Level: {}", self.level);
// let score_text = graphics::Text::new(ctx, &score_str, &self.assets.font).unwrap();
// let level_text = graphics::Text::new(ctx, &level_str, &self.assets.font).unwrap();
// self.score_display = score_text;
// self.level_display = level_text;
// }
}
/// **********************************************************************
/// A couple of utility functions.
/// **********************************************************************
fn print_instructions() {
println!();
println!("Welcome to ASTROBLASTO!");
println!();
println!("How to play:");
println!("L/R arrow keys rotate your ship, up thrusts, space bar fires");
println!();
}
extern crate mint;
fn draw_actor(
assets: &mut Assets,
ctx: &mut Context,
actor: &Actor,
world_coords: (f32, f32),
) -> GameResult {
let (screen_w, screen_h) = world_coords;
let pos = world_to_screen_coords(screen_w, screen_h, actor.pos);
let image = assets.actor_image(actor);
let drawparams = graphics::DrawParam::new()
.dest(pos)
.rotation(actor.facing as f32)
.offset(Point2::new(0.5, 0.5));
graphics::draw(ctx, image, drawparams)
}
/// **********************************************************************
/// Now we implement the `EventHandler` trait from `ggez::event`, which provides
/// ggez with callbacks for updating and drawing our game, as well as
/// handling input events.
/// **********************************************************************
impl EventHandler for MainState {
fn update(&mut self, ctx: &mut Context) -> GameResult {
const DESIRED_FPS: u32 = 60;
while timer::check_update_time(ctx, DESIRED_FPS) {
let seconds = 1.0 / (DESIRED_FPS as f32);
// Update the player state based on the user input.
player_handle_input(&mut self.player, &self.input, seconds);
self.player_shot_timeout -= seconds;
if self.input.fire && self.player_shot_timeout < 0.0 {
self.fire_player_shot();
}
// Update the physics for all actors.
// First the player...
update_actor_position(&mut self.player, seconds);
wrap_actor_position(
&mut self.player,
self.screen_width as f32,
self.screen_height as f32,
);
// Then the shots...
for act in &mut self.shots {
update_actor_position(act, seconds);
wrap_actor_position(act, self.screen_width as f32, self.screen_height as f32);
handle_timed_life(act, seconds);
}
// And finally the rocks.
for act in &mut self.rocks {
update_actor_position(act, seconds);
wrap_actor_position(act, self.screen_width as f32, self.screen_height as f32);
}
// Handle the results of things moving:
// collision detection, object death, and if
// we have killed all the rocks in the level,
// spawn more of them.
self.handle_collisions();
self.clear_dead_stuff();
self.check_for_level_respawn();
// Finally we check for our end state.
// I want to have a nice death screen eventually,
// but for now we just quit.
if self.player.life <= 0.0 {
println!("Game over!");
let _ = ctx.quit();
}
}
Ok(())
}
fn draw(&mut self, ctx: &mut Context) -> GameResult {
// Our drawing is quite simple.
// Just clear the screen...
graphics::clear(ctx, graphics::BLACK);
// Loop over all objects drawing them...
{
let assets = &mut self.assets;
let coords = (self.screen_width, self.screen_height);
let p = &self.player;
draw_actor(assets, ctx, p, coords)?;
for s in &self.shots {
draw_actor(assets, ctx, s, coords)?;
}
for r in &self.rocks {
draw_actor(assets, ctx, r, coords)?;
}
}
// And draw the GUI elements in the right places.
let level_dest = Point2::new(10.0, 10.0);
let score_dest = Point2::new(200.0, 10.0);
let level_str = format!("Level: {}", self.level);
let score_str = format!("Score: {}", self.score);
let level_display = graphics::Text::new((level_str, self.assets.font, 32.0));
let score_display = graphics::Text::new((score_str, self.assets.font, 32.0));
graphics::draw(ctx, &level_display, (level_dest, 0.0, graphics::WHITE))?;
graphics::draw(ctx, &score_display, (score_dest, 0.0, graphics::WHITE))?;
// Then we flip the screen...
graphics::present(ctx)?;
// And yield the timeslice
// This tells the OS that we're done using the CPU but it should
// get back to this program as soon as it can.
// This ideally prevents the game from using 100% CPU all the time
// even if vsync is off.
// The actual behavior can be a little platform-specific.
timer::yield_now();
Ok(())
}
// Handle key events. These just map keyboard events
// and alter our input state appropriately.
fn key_down_event(
&mut self,
ctx: &mut Context,
keycode: KeyCode,
_keymod: KeyMods,
_repeat: bool,
) {
match keycode {
KeyCode::Up => {
self.input.yaxis = 1.0;
}
KeyCode::Left => {
self.input.xaxis = -1.0;
}
KeyCode::Right => {
self.input.xaxis = 1.0;
}
KeyCode::Space => {
self.input.fire = true;
}
KeyCode::P => {
let img = graphics::screenshot(ctx).expect("Could not take screenshot");
img.encode(ctx, graphics::ImageFormat::Png, "/screenshot.png")
.expect("Could not save screenshot");
}
KeyCode::Escape => ctx.quit(),
_ => (), // Do nothing
}
}
fn key_up_event(&mut self, _ctx: &mut Context, keycode: KeyCode, _keymod: KeyMods) {
match keycode {
KeyCode::Up => {
self.input.yaxis = 0.0;
}
KeyCode::Left | KeyCode::Right => {
self.input.xaxis = 0.0;
}
KeyCode::Space => {
self.input.fire = false;
}
_ => (), // Do nothing
}
}
}
/// **********************************************************************
/// Finally our main function! Which merely sets up a config and calls
/// `ggez::event::run()` with our `EventHandler` type.
/// **********************************************************************
pub fn main() -> GameResult {
let mut cb = ContextBuilder::new("astroblasto", "ggez")
.window_setup(conf::WindowSetup::default().title("Astroblasto!"))
.window_mode(conf::WindowMode::default().dimensions(640.0, 480.0));
// We add the CARGO_MANIFEST_DIR/resources to the filesystems paths so
// we we look in the cargo project for files.
if let Ok(manifest_dir) = env::var("CARGO_MANIFEST_DIR") {
let mut path = path::PathBuf::from(manifest_dir);
path.push("resources");
println!("Adding path {:?}", path);
// We need this re-assignment alas, see
// https://aturon.github.io/ownership/builders.html
// under "Consuming builders"
cb = cb.add_resource_path(path);
} else {
println!("Not building from cargo? Ok.");
}
let (ctx, events_loop) = &mut cb.build()?;
let game = &mut MainState::new(ctx)?;
event::run(ctx, events_loop, game)
}
Made Astroblastro use spatial sounds
//! An Asteroids-ish example game to show off ggez.
//! The idea is that this game is simple but still
//! non-trivial enough to be interesting.
extern crate ggez;
extern crate nalgebra;
extern crate rand;
use ggez::audio;
use ggez::conf;
use ggez::event::{self, EventHandler, KeyCode, KeyMods};
use ggez::graphics;
use ggez::nalgebra as na;
use ggez::timer;
use ggez::{Context, ContextBuilder, GameResult};
use std::env;
use std::path;
type Point2 = nalgebra::Point2<f32>;
type Vector2 = nalgebra::Vector2<f32>;
/// *********************************************************************
/// Basic stuff, make some helpers for vector functions.
/// ggez includes the nalgebra math library to provide lots of
/// math stuff We just add some helpers.
/// **********************************************************************
/// Create a unit vector representing the
/// given angle (in radians)
fn vec_from_angle(angle: f32) -> Vector2 {
let vx = angle.sin();
let vy = angle.cos();
Vector2::new(vx, vy)
}
/// Just makes a random `Vector2` with the given max magnitude.
fn random_vec(max_magnitude: f32) -> Vector2 {
let angle = rand::random::<f32>() * 2.0 * std::f32::consts::PI;
let mag = rand::random::<f32>() * max_magnitude;
vec_from_angle(angle) * (mag)
}
/// *********************************************************************
/// Now we define our Actor's.
/// An Actor is anything in the game world.
/// We're not *quite* making a real entity-component system but it's
/// pretty close. For a more complicated game you would want a
/// real ECS, but for this it's enough to say that all our game objects
/// contain pretty much the same data.
/// **********************************************************************
#[derive(Debug)]
enum ActorType {
Player,
Rock,
Shot,
}
#[derive(Debug)]
struct Actor {
tag: ActorType,
pos: Point2,
facing: f32,
velocity: Vector2,
ang_vel: f32,
bbox_size: f32,
// I am going to lazily overload "life" with a
// double meaning:
// for shots, it is the time left to live,
// for players and rocks, it is the actual hit points.
life: f32,
}
const PLAYER_LIFE: f32 = 1.0;
const SHOT_LIFE: f32 = 2.0;
const ROCK_LIFE: f32 = 1.0;
const PLAYER_BBOX: f32 = 12.0;
const ROCK_BBOX: f32 = 12.0;
const SHOT_BBOX: f32 = 6.0;
const MAX_ROCK_VEL: f32 = 50.0;
/// *********************************************************************
/// Now we have some constructor functions for different game objects.
/// **********************************************************************
fn create_player() -> Actor {
Actor {
tag: ActorType::Player,
pos: Point2::origin(),
facing: 0.,
velocity: na::zero(),
ang_vel: 0.,
bbox_size: PLAYER_BBOX,
life: PLAYER_LIFE,
}
}
fn create_rock() -> Actor {
Actor {
tag: ActorType::Rock,
pos: Point2::origin(),
facing: 0.,
velocity: na::zero(),
ang_vel: 0.,
bbox_size: ROCK_BBOX,
life: ROCK_LIFE,
}
}
fn create_shot() -> Actor {
Actor {
tag: ActorType::Shot,
pos: Point2::origin(),
facing: 0.,
velocity: na::zero(),
ang_vel: SHOT_ANG_VEL,
bbox_size: SHOT_BBOX,
life: SHOT_LIFE,
}
}
/// Create the given number of rocks.
/// Makes sure that none of them are within the
/// given exclusion zone (nominally the player)
/// Note that this *could* create rocks outside the
/// bounds of the playing field, so it should be
/// called before `wrap_actor_position()` happens.
fn create_rocks(num: i32, exclusion: Point2, min_radius: f32, max_radius: f32) -> Vec<Actor> {
assert!(max_radius > min_radius);
let new_rock = |_| {
let mut rock = create_rock();
let r_angle = rand::random::<f32>() * 2.0 * std::f32::consts::PI;
let r_distance = rand::random::<f32>() * (max_radius - min_radius) + min_radius;
rock.pos = exclusion + vec_from_angle(r_angle) * r_distance;
rock.velocity = random_vec(MAX_ROCK_VEL);
rock
};
(0..num).map(new_rock).collect()
}
/// *********************************************************************
/// Now we make functions to handle physics. We do simple Newtonian
/// physics (so we do have inertia), and cap the max speed so that we
/// don't have to worry too much about small objects clipping through
/// each other.
///
/// Our unit of world space is simply pixels, though we do transform
/// the coordinate system so that +y is up and -y is down.
/// **********************************************************************
const SHOT_SPEED: f32 = 200.0;
const SHOT_ANG_VEL: f32 = 0.1;
// Acceleration in pixels per second.
const PLAYER_THRUST: f32 = 100.0;
// Rotation in radians per second.
const PLAYER_TURN_RATE: f32 = 3.0;
// Seconds between shots
const PLAYER_SHOT_TIME: f32 = 0.5;
fn player_handle_input(actor: &mut Actor, input: &InputState, dt: f32) {
actor.facing += dt * PLAYER_TURN_RATE * input.xaxis;
if input.yaxis > 0.0 {
player_thrust(actor, dt);
}
}
fn player_thrust(actor: &mut Actor, dt: f32) {
let direction_vector = vec_from_angle(actor.facing);
let thrust_vector = direction_vector * (PLAYER_THRUST);
actor.velocity += thrust_vector * (dt);
}
const MAX_PHYSICS_VEL: f32 = 250.0;
fn update_actor_position(actor: &mut Actor, dt: f32) {
// Clamp the velocity to the max efficiently
let norm_sq = actor.velocity.norm_squared();
if norm_sq > MAX_PHYSICS_VEL.powi(2) {
actor.velocity = actor.velocity / norm_sq.sqrt() * MAX_PHYSICS_VEL;
}
let dv = actor.velocity * (dt);
actor.pos += dv;
actor.facing += actor.ang_vel;
}
/// Takes an actor and wraps its position to the bounds of the
/// screen, so if it goes off the left side of the screen it
/// will re-enter on the right side and so on.
fn wrap_actor_position(actor: &mut Actor, sx: f32, sy: f32) {
// Wrap screen
let screen_x_bounds = sx / 2.0;
let screen_y_bounds = sy / 2.0;
if actor.pos.x > screen_x_bounds {
actor.pos.x -= sx;
} else if actor.pos.x < -screen_x_bounds {
actor.pos.x += sx;
};
if actor.pos.y > screen_y_bounds {
actor.pos.y -= sy;
} else if actor.pos.y < -screen_y_bounds {
actor.pos.y += sy;
}
}
fn handle_timed_life(actor: &mut Actor, dt: f32) {
actor.life -= dt;
}
/// Translates the world coordinate system, which
/// has Y pointing up and the origin at the center,
/// to the screen coordinate system, which has Y
/// pointing downward and the origin at the top-left,
fn world_to_screen_coords(screen_width: f32, screen_height: f32, point: Point2) -> Point2 {
let x = point.x + screen_width / 2.0;
let y = screen_height - (point.y + screen_height / 2.0);
Point2::new(x, y)
}
/// Translates the world coordinate system to
/// coordinates suitable for the audio system.
fn world_to_audio_coords(screen_width: f32, screen_height: f32, point: Point2) -> [f32; 3] {
let x = point.x * 2.0 / screen_width;
let y = point.y * 2.0 / screen_height;
let z = 0.0;
[x, y, z]
}
/// **********************************************************************
/// So that was the real meat of our game. Now we just need a structure
/// to contain the images, sounds, etc. that we need to hang on to; this
/// is our "asset management system". All the file names and such are
/// just hard-coded.
/// **********************************************************************
struct Assets {
player_image: graphics::Image,
shot_image: graphics::Image,
rock_image: graphics::Image,
font: graphics::Font,
// Todo: add a music track to show non-spatial audio?
shot_sound: audio::SpatialSource,
hit_sound: audio::SpatialSource,
}
impl Assets {
fn new(ctx: &mut Context) -> GameResult<Assets> {
let player_image = graphics::Image::new(ctx, "/player.png")?;
let shot_image = graphics::Image::new(ctx, "/shot.png")?;
let rock_image = graphics::Image::new(ctx, "/rock.png")?;
let font = graphics::Font::new(ctx, "/DejaVuSerif.ttf")?;
let mut shot_sound = audio::SpatialSource::new(ctx, "/pew.ogg")?;
let mut hit_sound = audio::SpatialSource::new(ctx, "/boom.ogg")?;
shot_sound.set_ears([-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]);
hit_sound.set_ears([-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]);
Ok(Assets {
player_image,
shot_image,
rock_image,
font,
shot_sound,
hit_sound,
})
}
fn actor_image(&mut self, actor: &Actor) -> &mut graphics::Image {
match actor.tag {
ActorType::Player => &mut self.player_image,
ActorType::Rock => &mut self.rock_image,
ActorType::Shot => &mut self.shot_image,
}
}
}
/// **********************************************************************
/// The `InputState` is exactly what it sounds like, it just keeps track of
/// the user's input state so that we turn keyboard events into something
/// state-based and device-independent.
/// **********************************************************************
#[derive(Debug)]
struct InputState {
xaxis: f32,
yaxis: f32,
fire: bool,
}
impl Default for InputState {
fn default() -> Self {
InputState {
xaxis: 0.0,
yaxis: 0.0,
fire: false,
}
}
}
/// **********************************************************************
/// Now we're getting into the actual game loop. The `MainState` is our
/// game's "global" state, it keeps track of everything we need for
/// actually running the game.
///
/// Our game objects are simply a vector for each actor type, and we
/// probably mingle gameplay-state (like score) and hardware-state
/// (like `input`) a little more than we should, but for something
/// this small it hardly matters.
/// **********************************************************************
struct MainState {
player: Actor,
shots: Vec<Actor>,
rocks: Vec<Actor>,
level: i32,
score: i32,
assets: Assets,
screen_width: f32,
screen_height: f32,
input: InputState,
player_shot_timeout: f32,
}
impl MainState {
fn new(ctx: &mut Context) -> GameResult<MainState> {
println!("Game resource path: {:?}", ctx.filesystem);
print_instructions();
let assets = Assets::new(ctx)?;
// let score_disp = graphics::Text::new(ctx, "score", &assets.font)?;
// let level_disp = graphics::Text::new(ctx, "level", &assets.font)?;
let player = create_player();
let rocks = create_rocks(5, player.pos, 100.0, 250.0);
let s = MainState {
player,
shots: Vec::new(),
rocks,
level: 0,
score: 0,
assets,
screen_width: ctx.conf.window_mode.width,
screen_height: ctx.conf.window_mode.height,
input: InputState::default(),
player_shot_timeout: 0.0,
};
Ok(s)
}
fn fire_player_shot(&mut self) {
self.player_shot_timeout = PLAYER_SHOT_TIME;
let player = &self.player;
let mut shot = create_shot();
shot.pos = player.pos;
shot.facing = player.facing;
let direction = vec_from_angle(shot.facing);
shot.velocity.x = SHOT_SPEED * direction.x;
shot.velocity.y = SHOT_SPEED * direction.y;
self.shots.push(shot);
let pos = world_to_audio_coords(self.screen_width, self.screen_height, player.pos);
self.assets.shot_sound.set_position(pos);
let _ = self.assets.shot_sound.play();
}
fn clear_dead_stuff(&mut self) {
self.shots.retain(|s| s.life > 0.0);
self.rocks.retain(|r| r.life > 0.0);
}
fn handle_collisions(&mut self) {
for rock in &mut self.rocks {
let pdistance = rock.pos - self.player.pos;
if pdistance.norm() < (self.player.bbox_size + rock.bbox_size) {
self.player.life = 0.0;
}
for shot in &mut self.shots {
let distance = shot.pos - rock.pos;
if distance.norm() < (shot.bbox_size + rock.bbox_size) {
shot.life = 0.0;
rock.life = 0.0;
self.score += 1;
let pos = world_to_audio_coords(self.screen_width, self.screen_height, rock.pos);
self.assets.shot_sound.set_position(pos);
let _ = self.assets.hit_sound.play();
}
}
}
}
fn check_for_level_respawn(&mut self) {
if self.rocks.is_empty() {
self.level += 1;
let r = create_rocks(self.level + 5, self.player.pos, 100.0, 250.0);
self.rocks.extend(r);
}
}
// fn update_ui(&mut self, ctx: &mut Context) {
// let score_str = format!("Score: {}", self.score);
// let level_str = format!("Level: {}", self.level);
// let score_text = graphics::Text::new(ctx, &score_str, &self.assets.font).unwrap();
// let level_text = graphics::Text::new(ctx, &level_str, &self.assets.font).unwrap();
// self.score_display = score_text;
// self.level_display = level_text;
// }
}
/// **********************************************************************
/// A couple of utility functions.
/// **********************************************************************
fn print_instructions() {
println!();
println!("Welcome to ASTROBLASTO!");
println!();
println!("How to play:");
println!("L/R arrow keys rotate your ship, up thrusts, space bar fires");
println!();
}
extern crate mint;
fn draw_actor(
assets: &mut Assets,
ctx: &mut Context,
actor: &Actor,
world_coords: (f32, f32),
) -> GameResult {
let (screen_w, screen_h) = world_coords;
let pos = world_to_screen_coords(screen_w, screen_h, actor.pos);
let image = assets.actor_image(actor);
let drawparams = graphics::DrawParam::new()
.dest(pos)
.rotation(actor.facing as f32)
.offset(Point2::new(0.5, 0.5));
graphics::draw(ctx, image, drawparams)
}
/// **********************************************************************
/// Now we implement the `EventHandler` trait from `ggez::event`, which provides
/// ggez with callbacks for updating and drawing our game, as well as
/// handling input events.
/// **********************************************************************
impl EventHandler for MainState {
fn update(&mut self, ctx: &mut Context) -> GameResult {
const DESIRED_FPS: u32 = 60;
while timer::check_update_time(ctx, DESIRED_FPS) {
let seconds = 1.0 / (DESIRED_FPS as f32);
// Update the player state based on the user input.
player_handle_input(&mut self.player, &self.input, seconds);
self.player_shot_timeout -= seconds;
if self.input.fire && self.player_shot_timeout < 0.0 {
self.fire_player_shot();
}
// Update the physics for all actors.
// First the player...
update_actor_position(&mut self.player, seconds);
wrap_actor_position(
&mut self.player,
self.screen_width as f32,
self.screen_height as f32,
);
// Then the shots...
for act in &mut self.shots {
update_actor_position(act, seconds);
wrap_actor_position(act, self.screen_width as f32, self.screen_height as f32);
handle_timed_life(act, seconds);
}
// And finally the rocks.
for act in &mut self.rocks {
update_actor_position(act, seconds);
wrap_actor_position(act, self.screen_width as f32, self.screen_height as f32);
}
// Handle the results of things moving:
// collision detection, object death, and if
// we have killed all the rocks in the level,
// spawn more of them.
self.handle_collisions();
self.clear_dead_stuff();
self.check_for_level_respawn();
// Finally we check for our end state.
// I want to have a nice death screen eventually,
// but for now we just quit.
if self.player.life <= 0.0 {
println!("Game over!");
let _ = ctx.quit();
}
}
Ok(())
}
fn draw(&mut self, ctx: &mut Context) -> GameResult {
// Our drawing is quite simple.
// Just clear the screen...
graphics::clear(ctx, graphics::BLACK);
// Loop over all objects drawing them...
{
let assets = &mut self.assets;
let coords = (self.screen_width, self.screen_height);
let p = &self.player;
draw_actor(assets, ctx, p, coords)?;
for s in &self.shots {
draw_actor(assets, ctx, s, coords)?;
}
for r in &self.rocks {
draw_actor(assets, ctx, r, coords)?;
}
}
// And draw the GUI elements in the right places.
let level_dest = Point2::new(10.0, 10.0);
let score_dest = Point2::new(200.0, 10.0);
let level_str = format!("Level: {}", self.level);
let score_str = format!("Score: {}", self.score);
let level_display = graphics::Text::new((level_str, self.assets.font, 32.0));
let score_display = graphics::Text::new((score_str, self.assets.font, 32.0));
graphics::draw(ctx, &level_display, (level_dest, 0.0, graphics::WHITE))?;
graphics::draw(ctx, &score_display, (score_dest, 0.0, graphics::WHITE))?;
// Then we flip the screen...
graphics::present(ctx)?;
// And yield the timeslice
// This tells the OS that we're done using the CPU but it should
// get back to this program as soon as it can.
// This ideally prevents the game from using 100% CPU all the time
// even if vsync is off.
// The actual behavior can be a little platform-specific.
timer::yield_now();
Ok(())
}
// Handle key events. These just map keyboard events
// and alter our input state appropriately.
fn key_down_event(
&mut self,
ctx: &mut Context,
keycode: KeyCode,
_keymod: KeyMods,
_repeat: bool,
) {
match keycode {
KeyCode::Up => {
self.input.yaxis = 1.0;
}
KeyCode::Left => {
self.input.xaxis = -1.0;
}
KeyCode::Right => {
self.input.xaxis = 1.0;
}
KeyCode::Space => {
self.input.fire = true;
}
KeyCode::P => {
let img = graphics::screenshot(ctx).expect("Could not take screenshot");
img.encode(ctx, graphics::ImageFormat::Png, "/screenshot.png")
.expect("Could not save screenshot");
}
KeyCode::Escape => ctx.quit(),
_ => (), // Do nothing
}
}
fn key_up_event(&mut self, _ctx: &mut Context, keycode: KeyCode, _keymod: KeyMods) {
match keycode {
KeyCode::Up => {
self.input.yaxis = 0.0;
}
KeyCode::Left | KeyCode::Right => {
self.input.xaxis = 0.0;
}
KeyCode::Space => {
self.input.fire = false;
}
_ => (), // Do nothing
}
}
}
/// **********************************************************************
/// Finally our main function! Which merely sets up a config and calls
/// `ggez::event::run()` with our `EventHandler` type.
/// **********************************************************************
pub fn main() -> GameResult {
let mut cb = ContextBuilder::new("astroblasto", "ggez")
.window_setup(conf::WindowSetup::default().title("Astroblasto!"))
.window_mode(conf::WindowMode::default().dimensions(640.0, 480.0));
// We add the CARGO_MANIFEST_DIR/resources to the filesystems paths so
// we we look in the cargo project for files.
if let Ok(manifest_dir) = env::var("CARGO_MANIFEST_DIR") {
let mut path = path::PathBuf::from(manifest_dir);
path.push("resources");
println!("Adding path {:?}", path);
// We need this re-assignment alas, see
// https://aturon.github.io/ownership/builders.html
// under "Consuming builders"
cb = cb.add_resource_path(path);
} else {
println!("Not building from cargo? Ok.");
}
let (ctx, events_loop) = &mut cb.build()?;
let game = &mut MainState::new(ctx)?;
event::run(ctx, events_loop, game)
}
|
mod component;
mod ui;
use crate::config::Config;
use crate::err::{self, Result};
use crate::file::TomlFile;
use crate::series::database::Database as SeriesDatabase;
use crate::series::{self, LastWatched, Series};
use crate::{try_opt_r, try_ret};
use anime::remote::RemoteService;
use chrono::{DateTime, Duration, Utc};
use clap::ArgMatches;
use component::command_prompt::{Command, CommandPrompt};
use component::log::{LogItem, StatusLog};
use snafu::ResultExt;
use std::mem;
use std::process;
use termion::event::Key;
use ui::{Event, Events, UI};
pub fn run(args: &ArgMatches) -> Result<()> {
let mut ui = UI::init()?;
let mut cstate = {
let config = Config::load_or_create()?;
let remote = init_remote(args, &mut ui.status_log);
let db = SeriesDatabase::open()?;
CommonState { config, remote, db }
};
let mut ui_state = init_ui_state(&cstate, args)?;
let events = Events::new(Duration::seconds(1));
loop {
ui.draw(&ui_state, cstate.remote.as_ref())?;
ui.adjust_cursor(&ui_state)?;
match events.next()? {
Event::Input(key) => match key {
// Exit
Key::Char('q') if !ui_state.status_bar_state.in_input_dialog() => {
cstate.db.close().ok();
ui.clear().ok();
break Ok(());
}
key => ui_state.process_key(&mut cstate, &mut ui.status_log, key),
},
Event::Tick => ui_state.process_tick(&cstate, &mut ui.status_log),
}
}
}
fn init_remote(args: &ArgMatches, log: &mut StatusLog) -> Box<dyn RemoteService> {
use anime::remote::anilist;
use anime::remote::offline::Offline;
match crate::init_remote(args, true) {
Ok(remote) => remote,
Err(err) => {
match err {
err::Error::NeedAniListToken => {
log.push(format!(
"No access token found. Go to {} \
and set your token with the 'token' command",
anilist::auth_url(crate::ANILIST_CLIENT_ID)
));
}
_ => {
log.push(LogItem::failed("Logging in", err));
log.push(format!(
"If you need a new token, go to {} \
and set it with the 'token' command",
anilist::auth_url(crate::ANILIST_CLIENT_ID)
));
}
}
log.push("Continuing in offline mode");
Box::new(Offline::new())
}
}
}
/// Items that are not tied to the UI and are commonly used together.
struct CommonState {
config: Config,
remote: Box<dyn RemoteService>,
db: SeriesDatabase,
}
fn init_ui_state(cstate: &CommonState, args: &ArgMatches) -> Result<UIState> {
let series = init_series_list(&cstate, args)?;
let last_watched = LastWatched::load()?;
let selected_series = {
let desired_series = args
.value_of("series")
.map(|s| s.into())
.or_else(|| last_watched.get().clone());
match desired_series {
Some(desired) => series
.iter()
.position(|series| series.nickname() == desired)
.unwrap_or(0),
None => 0,
}
};
let mut ui_state = UIState {
series,
selected_series,
last_watched,
watch_state: WatchState::Idle,
status_bar_state: StatusBarState::default(),
last_used_command: None,
};
ui_state.ensure_cur_series_initialized(&cstate.db);
Ok(ui_state)
}
fn init_series_list(cstate: &CommonState, args: &ArgMatches) -> Result<Vec<SeriesStatus>> {
let series_names = series::database::get_series_names(&cstate.db)?;
// Did the user specify a series that we don't have?
let new_desired_series = args.value_of("series").and_then(|desired| {
if series_names.contains(&desired.to_string()) {
None
} else {
Some(desired)
}
});
let mut series = series_names
.into_iter()
.map(SeriesStatus::Unloaded)
.collect();
// If we have the series, there's nothing left to do
let desired_series = match new_desired_series {
Some(desired_series) => desired_series,
None => return Ok(series),
};
let params = crate::series_params_from_args(args);
// Otherwise, we'll need to fetch & save it
let new_series = Series::from_remote(
desired_series,
params,
&cstate.config,
cstate.remote.as_ref(),
)
.and_then(|series| {
series.save(&cstate.db)?;
Ok(series)
});
series.push(SeriesStatus::from_series(new_series, desired_series));
Ok(series)
}
/// Current state of the UI.
pub struct UIState {
series: Vec<SeriesStatus>,
selected_series: usize,
last_watched: LastWatched,
watch_state: WatchState,
status_bar_state: StatusBarState,
last_used_command: Option<Command>,
}
impl UIState {
fn cur_series_status(&self) -> Option<&SeriesStatus> {
self.series.get(self.selected_series)
}
fn cur_valid_series(&self) -> Option<&Series> {
self.cur_series_status()
.and_then(|status| status.get_valid())
}
fn cur_series_status_mut(&mut self) -> Option<&mut SeriesStatus> {
self.series.get_mut(self.selected_series)
}
fn cur_valid_series_mut(&mut self) -> Option<&mut Series> {
self.cur_series_status_mut()
.and_then(|status| status.get_valid_mut())
}
fn ensure_cur_series_initialized(&mut self, db: &SeriesDatabase) {
let status = match self.cur_series_status() {
Some(status) => status,
None => return,
};
match status {
SeriesStatus::Valid(_) | SeriesStatus::Invalid(_, _) => (),
SeriesStatus::Unloaded(ref nickname) => {
let new_status = {
let series = Series::load(db, nickname);
SeriesStatus::from_series(series, nickname)
};
// Unwrapping here is safe as we return early if the status is None earlier
let status = self.cur_series_status_mut().unwrap();
*status = new_status;
}
}
}
fn process_key(&mut self, state: &mut CommonState, log: &mut StatusLog, key: Key) {
if !self.is_idle() {
return;
}
if self.status_bar_state.in_input_dialog() {
self.process_input_dialog_key(state, log, key);
return;
}
match key {
// Play next episode
Key::Char(ch) if ch == state.config.tui.keys.play_next_episode => {
let series = try_ret!(self.cur_valid_series());
let nickname = series.config.nickname.clone();
let is_diff_series = self.last_watched.set(nickname);
if is_diff_series {
log.capture_status("Setting series as last watched", || {
self.last_watched.save()
});
}
log.capture_status("Playing next episode", || {
self.start_next_series_episode(&state)
});
}
// Command prompt
Key::Char(':') => {
self.status_bar_state.set_to_command_prompt();
}
// Run last used command
Key::Char(ch) if ch == state.config.tui.keys.run_last_command => {
let cmd = match &self.last_used_command {
Some(cmd) => cmd.clone(),
None => return,
};
self.process_command(cmd, state, log);
}
// Select series
Key::Up | Key::Down => {
self.selected_series = match key {
Key::Up => self.selected_series.saturating_sub(1),
Key::Down if self.selected_series < self.series.len().saturating_sub(1) => {
self.selected_series + 1
}
_ => self.selected_series,
};
self.ensure_cur_series_initialized(&state.db);
}
_ => (),
}
}
fn process_input_dialog_key(&mut self, state: &mut CommonState, log: &mut StatusLog, key: Key) {
match &mut self.status_bar_state {
StatusBarState::Log => (),
StatusBarState::CommandPrompt(prompt) => {
use component::command_prompt::PromptResult;
match prompt.process_key(key) {
Ok(PromptResult::Command(command)) => {
self.status_bar_state.reset();
self.last_used_command = Some(command.clone());
self.process_command(command, state, log);
}
Ok(PromptResult::Done) => {
self.status_bar_state.reset();
}
Ok(PromptResult::NotDone) => (),
// We need to set the status bar state back before propagating errors,
// otherwise we'll be stuck in the prompt
Err(err) => {
self.status_bar_state.reset();
log.push(LogItem::failed("Processing command", err));
}
}
}
}
}
fn process_command(&mut self, command: Command, cstate: &mut CommonState, log: &mut StatusLog) {
match command {
Command::Add(nickname, params) => {
if cstate.remote.is_offline() {
log.push("This command cannot be ran in offline mode");
return;
}
log.capture_status("Adding series", || {
let series = Series::from_remote(
&nickname,
params,
&cstate.config,
cstate.remote.as_ref(),
);
if let Ok(series) = &series {
series.save(&cstate.db)?;
}
let status = SeriesStatus::from_series(series, &nickname);
let existing_position = self
.series
.iter()
.position(|series| series.nickname() == nickname);
if let Some(pos) = existing_position {
self.series[pos] = status;
} else {
self.series.push(status);
self.series
.sort_unstable_by(|x, y| x.nickname().cmp(y.nickname()));
}
self.selected_series = self
.series
.iter()
.position(|series| series.nickname() == nickname)
.unwrap_or(0);
Ok(())
});
}
Command::Delete => {
if self.selected_series >= self.series.len() {
return;
}
let series = self.series.remove(self.selected_series);
let nickname = series.nickname();
if self.selected_series == self.series.len() {
self.selected_series = self.selected_series.saturating_sub(1);
}
log.capture_status("Deleting series", || Series::delete(&cstate.db, nickname));
self.ensure_cur_series_initialized(&cstate.db);
}
Command::LoginToken(token) => {
use anime::remote::anilist::AniList;
use anime::remote::AccessToken;
log.capture_status("Setting user access token", || {
let token = AccessToken::encode(token);
token.save()?;
cstate.remote = Box::new(AniList::authenticated(token)?);
Ok(())
});
}
Command::Matcher(pattern) => {
use anime::local::{EpisodeMap, EpisodeMatcher};
let series = try_ret!(self.cur_valid_series_mut());
log.capture_status("Setting series episode matcher", || {
let matcher = match pattern {
Some(pattern) => series::episode_matcher_with_pattern(pattern)?,
None => EpisodeMatcher::new(),
};
series.episodes = EpisodeMap::parse(&series.config.path, &matcher)?;
series.config.episode_matcher = matcher;
series.save(&cstate.db)
});
}
Command::Path(path) => {
use anime::local::EpisodeMap;
let series = try_ret!(self.cur_valid_series_mut());
log.capture_status("Setting series path", || {
series.episodes = EpisodeMap::parse(&path, &series.config.episode_matcher)?;
series.config.path = path;
series.save(&cstate.db)
});
}
Command::PlayerArgs(args) => {
let series = try_ret!(self.cur_valid_series_mut());
log.capture_status("Saving player args for series", || {
series.config.player_args = args;
series.save(&cstate.db)
});
}
Command::Progress(direction) => {
use component::command_prompt::ProgressDirection;
let series = try_ret!(self.cur_valid_series_mut());
let remote = cstate.remote.as_ref();
match direction {
ProgressDirection::Forwards => {
log.capture_status("Forcing forward watch progress", || {
series.episode_completed(remote, &cstate.config, &cstate.db)
});
}
ProgressDirection::Backwards => {
log.capture_status("Forcing backwards watch progress", || {
series.episode_regressed(remote, &cstate.config, &cstate.db)
});
}
}
}
Command::SyncFromRemote => {
let series = try_ret!(self.cur_valid_series_mut());
let remote = cstate.remote.as_ref();
log.capture_status("Syncing entry from remote", || {
series.entry.force_sync_from_remote(remote)?;
series.save(&cstate.db)
});
}
Command::SyncToRemote => {
let series = try_ret!(self.cur_valid_series_mut());
let remote = cstate.remote.as_ref();
log.capture_status("Syncing entry to remote", || {
series.entry.force_sync_to_remote(remote)?;
series.save(&cstate.db)
});
}
Command::Score(raw_score) => {
let series = try_ret!(self.cur_valid_series_mut());
let score = match cstate.remote.parse_score(&raw_score) {
Some(score) if score == 0 => None,
Some(score) => Some(score),
None => {
log.push(LogItem::failed("Parsing score", None));
return;
}
};
let remote = cstate.remote.as_ref();
log.capture_status("Setting score", || {
series.entry.set_score(score);
series.entry.sync_to_remote(remote)?;
series.save(&cstate.db)
});
}
Command::Status(status) => {
let series = try_ret!(self.cur_valid_series_mut());
let remote = cstate.remote.as_ref();
log.capture_status(format!("Setting series status to \"{}\"", status), || {
series.entry.set_status(status, &cstate.config);
series.entry.sync_to_remote(remote)?;
series.save(&cstate.db)
});
}
}
}
fn process_tick(&mut self, state: &CommonState, log: &mut StatusLog) {
match &mut self.watch_state {
WatchState::Idle => (),
WatchState::Watching(_, child) => {
let status = match child.try_wait().context(err::IO) {
Ok(Some(status)) => status,
Ok(None) => return,
Err(err) => {
log.push(LogItem::failed("Waiting for player", err));
return;
}
};
// The watch state should be set to idle immediately to avoid a potential infinite loop.
let progress_time = match mem::replace(&mut self.watch_state, WatchState::Idle) {
WatchState::Watching(progress_time, _) => progress_time,
WatchState::Idle => unreachable!(),
};
let series = try_ret!(self.cur_valid_series_mut());
if !status.success() {
log.push("Player did not exit properly");
return;
}
if Utc::now() >= progress_time {
log.capture_status("Marking episode as completed", || {
series.episode_completed(state.remote.as_ref(), &state.config, &state.db)
});
} else {
log.push("Not marking episode as completed");
}
}
}
}
fn start_next_series_episode(&mut self, state: &CommonState) -> Result<()> {
let series = try_opt_r!(self.cur_valid_series_mut());
series.begin_watching(state.remote.as_ref(), &state.config, &state.db)?;
let next_ep = series.entry.watched_eps() + 1;
let child = series
.play_episode_cmd(next_ep, &state.config)?
.spawn()
.context(err::FailedToPlayEpisode { episode: next_ep })?;
let progress_time = {
let secs_must_watch =
(series.info.episode_length as f32 * state.config.episode.pcnt_must_watch) * 60.0;
let time_must_watch = Duration::seconds(secs_must_watch as i64);
Utc::now() + time_must_watch
};
self.watch_state = WatchState::Watching(progress_time, child);
Ok(())
}
fn is_idle(&self) -> bool {
self.watch_state == WatchState::Idle
}
}
type Nickname = String;
type Reason = String;
enum SeriesStatus {
Valid(Box<Series>),
Invalid(Nickname, Reason),
Unloaded(Nickname),
}
impl SeriesStatus {
fn from_series<S>(series: Result<Series>, nickname: S) -> Self
where
S: Into<String>,
{
match series {
Ok(series) => Self::Valid(Box::new(series)),
// We want to use a somewhat concise error message here, so
// we should strip error wrappers that don't provide much context
Err(err::Error::Anime { source, .. }) => {
Self::Invalid(nickname.into(), format!("{}", source))
}
Err(err) => Self::Invalid(nickname.into(), format!("{}", err)),
}
}
fn get_valid(&self) -> Option<&Series> {
match self {
Self::Valid(series) => Some(&series),
Self::Invalid(_, _) => None,
Self::Unloaded(_) => None,
}
}
fn get_valid_mut(&mut self) -> Option<&mut Series> {
match self {
Self::Valid(series) => Some(series),
Self::Invalid(_, _) => None,
Self::Unloaded(_) => None,
}
}
fn nickname(&self) -> &str {
match self {
Self::Valid(series) => series.config.nickname.as_ref(),
Self::Invalid(nickname, _) => nickname.as_ref(),
Self::Unloaded(nickname) => nickname.as_ref(),
}
}
}
enum StatusBarState {
Log,
CommandPrompt(CommandPrompt),
}
impl StatusBarState {
fn set_to_command_prompt(&mut self) {
*self = Self::CommandPrompt(CommandPrompt::new());
}
fn reset(&mut self) {
*self = Self::default();
}
fn in_input_dialog(&self) -> bool {
match self {
Self::Log => false,
Self::CommandPrompt(_) => true,
}
}
}
impl<'a> Default for StatusBarState {
fn default() -> Self {
Self::Log
}
}
type ProgressTime = DateTime<Utc>;
#[derive(Debug)]
enum WatchState {
Idle,
Watching(ProgressTime, process::Child),
}
impl PartialEq for WatchState {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::Idle, Self::Idle) => true,
(Self::Watching(_, _), Self::Watching(_, _)) => true,
_ => false,
}
}
}
anup: simplify PartialEq impl for WatchState
mod component;
mod ui;
use crate::config::Config;
use crate::err::{self, Result};
use crate::file::TomlFile;
use crate::series::database::Database as SeriesDatabase;
use crate::series::{self, LastWatched, Series};
use crate::{try_opt_r, try_ret};
use anime::remote::RemoteService;
use chrono::{DateTime, Duration, Utc};
use clap::ArgMatches;
use component::command_prompt::{Command, CommandPrompt};
use component::log::{LogItem, StatusLog};
use snafu::ResultExt;
use std::mem;
use std::process;
use termion::event::Key;
use ui::{Event, Events, UI};
pub fn run(args: &ArgMatches) -> Result<()> {
let mut ui = UI::init()?;
let mut cstate = {
let config = Config::load_or_create()?;
let remote = init_remote(args, &mut ui.status_log);
let db = SeriesDatabase::open()?;
CommonState { config, remote, db }
};
let mut ui_state = init_ui_state(&cstate, args)?;
let events = Events::new(Duration::seconds(1));
loop {
ui.draw(&ui_state, cstate.remote.as_ref())?;
ui.adjust_cursor(&ui_state)?;
match events.next()? {
Event::Input(key) => match key {
// Exit
Key::Char('q') if !ui_state.status_bar_state.in_input_dialog() => {
cstate.db.close().ok();
ui.clear().ok();
break Ok(());
}
key => ui_state.process_key(&mut cstate, &mut ui.status_log, key),
},
Event::Tick => ui_state.process_tick(&cstate, &mut ui.status_log),
}
}
}
fn init_remote(args: &ArgMatches, log: &mut StatusLog) -> Box<dyn RemoteService> {
use anime::remote::anilist;
use anime::remote::offline::Offline;
match crate::init_remote(args, true) {
Ok(remote) => remote,
Err(err) => {
match err {
err::Error::NeedAniListToken => {
log.push(format!(
"No access token found. Go to {} \
and set your token with the 'token' command",
anilist::auth_url(crate::ANILIST_CLIENT_ID)
));
}
_ => {
log.push(LogItem::failed("Logging in", err));
log.push(format!(
"If you need a new token, go to {} \
and set it with the 'token' command",
anilist::auth_url(crate::ANILIST_CLIENT_ID)
));
}
}
log.push("Continuing in offline mode");
Box::new(Offline::new())
}
}
}
/// Items that are not tied to the UI and are commonly used together.
struct CommonState {
config: Config,
remote: Box<dyn RemoteService>,
db: SeriesDatabase,
}
fn init_ui_state(cstate: &CommonState, args: &ArgMatches) -> Result<UIState> {
let series = init_series_list(&cstate, args)?;
let last_watched = LastWatched::load()?;
let selected_series = {
let desired_series = args
.value_of("series")
.map(|s| s.into())
.or_else(|| last_watched.get().clone());
match desired_series {
Some(desired) => series
.iter()
.position(|series| series.nickname() == desired)
.unwrap_or(0),
None => 0,
}
};
let mut ui_state = UIState {
series,
selected_series,
last_watched,
watch_state: WatchState::Idle,
status_bar_state: StatusBarState::default(),
last_used_command: None,
};
ui_state.ensure_cur_series_initialized(&cstate.db);
Ok(ui_state)
}
fn init_series_list(cstate: &CommonState, args: &ArgMatches) -> Result<Vec<SeriesStatus>> {
let series_names = series::database::get_series_names(&cstate.db)?;
// Did the user specify a series that we don't have?
let new_desired_series = args.value_of("series").and_then(|desired| {
if series_names.contains(&desired.to_string()) {
None
} else {
Some(desired)
}
});
let mut series = series_names
.into_iter()
.map(SeriesStatus::Unloaded)
.collect();
// If we have the series, there's nothing left to do
let desired_series = match new_desired_series {
Some(desired_series) => desired_series,
None => return Ok(series),
};
let params = crate::series_params_from_args(args);
// Otherwise, we'll need to fetch & save it
let new_series = Series::from_remote(
desired_series,
params,
&cstate.config,
cstate.remote.as_ref(),
)
.and_then(|series| {
series.save(&cstate.db)?;
Ok(series)
});
series.push(SeriesStatus::from_series(new_series, desired_series));
Ok(series)
}
/// Current state of the UI.
pub struct UIState {
series: Vec<SeriesStatus>,
selected_series: usize,
last_watched: LastWatched,
watch_state: WatchState,
status_bar_state: StatusBarState,
last_used_command: Option<Command>,
}
impl UIState {
fn cur_series_status(&self) -> Option<&SeriesStatus> {
self.series.get(self.selected_series)
}
fn cur_valid_series(&self) -> Option<&Series> {
self.cur_series_status()
.and_then(|status| status.get_valid())
}
fn cur_series_status_mut(&mut self) -> Option<&mut SeriesStatus> {
self.series.get_mut(self.selected_series)
}
fn cur_valid_series_mut(&mut self) -> Option<&mut Series> {
self.cur_series_status_mut()
.and_then(|status| status.get_valid_mut())
}
fn ensure_cur_series_initialized(&mut self, db: &SeriesDatabase) {
let status = match self.cur_series_status() {
Some(status) => status,
None => return,
};
match status {
SeriesStatus::Valid(_) | SeriesStatus::Invalid(_, _) => (),
SeriesStatus::Unloaded(ref nickname) => {
let new_status = {
let series = Series::load(db, nickname);
SeriesStatus::from_series(series, nickname)
};
// Unwrapping here is safe as we return early if the status is None earlier
let status = self.cur_series_status_mut().unwrap();
*status = new_status;
}
}
}
fn process_key(&mut self, state: &mut CommonState, log: &mut StatusLog, key: Key) {
if !self.is_idle() {
return;
}
if self.status_bar_state.in_input_dialog() {
self.process_input_dialog_key(state, log, key);
return;
}
match key {
// Play next episode
Key::Char(ch) if ch == state.config.tui.keys.play_next_episode => {
let series = try_ret!(self.cur_valid_series());
let nickname = series.config.nickname.clone();
let is_diff_series = self.last_watched.set(nickname);
if is_diff_series {
log.capture_status("Setting series as last watched", || {
self.last_watched.save()
});
}
log.capture_status("Playing next episode", || {
self.start_next_series_episode(&state)
});
}
// Command prompt
Key::Char(':') => {
self.status_bar_state.set_to_command_prompt();
}
// Run last used command
Key::Char(ch) if ch == state.config.tui.keys.run_last_command => {
let cmd = match &self.last_used_command {
Some(cmd) => cmd.clone(),
None => return,
};
self.process_command(cmd, state, log);
}
// Select series
Key::Up | Key::Down => {
self.selected_series = match key {
Key::Up => self.selected_series.saturating_sub(1),
Key::Down if self.selected_series < self.series.len().saturating_sub(1) => {
self.selected_series + 1
}
_ => self.selected_series,
};
self.ensure_cur_series_initialized(&state.db);
}
_ => (),
}
}
fn process_input_dialog_key(&mut self, state: &mut CommonState, log: &mut StatusLog, key: Key) {
match &mut self.status_bar_state {
StatusBarState::Log => (),
StatusBarState::CommandPrompt(prompt) => {
use component::command_prompt::PromptResult;
match prompt.process_key(key) {
Ok(PromptResult::Command(command)) => {
self.status_bar_state.reset();
self.last_used_command = Some(command.clone());
self.process_command(command, state, log);
}
Ok(PromptResult::Done) => {
self.status_bar_state.reset();
}
Ok(PromptResult::NotDone) => (),
// We need to set the status bar state back before propagating errors,
// otherwise we'll be stuck in the prompt
Err(err) => {
self.status_bar_state.reset();
log.push(LogItem::failed("Processing command", err));
}
}
}
}
}
fn process_command(&mut self, command: Command, cstate: &mut CommonState, log: &mut StatusLog) {
match command {
Command::Add(nickname, params) => {
if cstate.remote.is_offline() {
log.push("This command cannot be ran in offline mode");
return;
}
log.capture_status("Adding series", || {
let series = Series::from_remote(
&nickname,
params,
&cstate.config,
cstate.remote.as_ref(),
);
if let Ok(series) = &series {
series.save(&cstate.db)?;
}
let status = SeriesStatus::from_series(series, &nickname);
let existing_position = self
.series
.iter()
.position(|series| series.nickname() == nickname);
if let Some(pos) = existing_position {
self.series[pos] = status;
} else {
self.series.push(status);
self.series
.sort_unstable_by(|x, y| x.nickname().cmp(y.nickname()));
}
self.selected_series = self
.series
.iter()
.position(|series| series.nickname() == nickname)
.unwrap_or(0);
Ok(())
});
}
Command::Delete => {
if self.selected_series >= self.series.len() {
return;
}
let series = self.series.remove(self.selected_series);
let nickname = series.nickname();
if self.selected_series == self.series.len() {
self.selected_series = self.selected_series.saturating_sub(1);
}
log.capture_status("Deleting series", || Series::delete(&cstate.db, nickname));
self.ensure_cur_series_initialized(&cstate.db);
}
Command::LoginToken(token) => {
use anime::remote::anilist::AniList;
use anime::remote::AccessToken;
log.capture_status("Setting user access token", || {
let token = AccessToken::encode(token);
token.save()?;
cstate.remote = Box::new(AniList::authenticated(token)?);
Ok(())
});
}
Command::Matcher(pattern) => {
use anime::local::{EpisodeMap, EpisodeMatcher};
let series = try_ret!(self.cur_valid_series_mut());
log.capture_status("Setting series episode matcher", || {
let matcher = match pattern {
Some(pattern) => series::episode_matcher_with_pattern(pattern)?,
None => EpisodeMatcher::new(),
};
series.episodes = EpisodeMap::parse(&series.config.path, &matcher)?;
series.config.episode_matcher = matcher;
series.save(&cstate.db)
});
}
Command::Path(path) => {
use anime::local::EpisodeMap;
let series = try_ret!(self.cur_valid_series_mut());
log.capture_status("Setting series path", || {
series.episodes = EpisodeMap::parse(&path, &series.config.episode_matcher)?;
series.config.path = path;
series.save(&cstate.db)
});
}
Command::PlayerArgs(args) => {
let series = try_ret!(self.cur_valid_series_mut());
log.capture_status("Saving player args for series", || {
series.config.player_args = args;
series.save(&cstate.db)
});
}
Command::Progress(direction) => {
use component::command_prompt::ProgressDirection;
let series = try_ret!(self.cur_valid_series_mut());
let remote = cstate.remote.as_ref();
match direction {
ProgressDirection::Forwards => {
log.capture_status("Forcing forward watch progress", || {
series.episode_completed(remote, &cstate.config, &cstate.db)
});
}
ProgressDirection::Backwards => {
log.capture_status("Forcing backwards watch progress", || {
series.episode_regressed(remote, &cstate.config, &cstate.db)
});
}
}
}
Command::SyncFromRemote => {
let series = try_ret!(self.cur_valid_series_mut());
let remote = cstate.remote.as_ref();
log.capture_status("Syncing entry from remote", || {
series.entry.force_sync_from_remote(remote)?;
series.save(&cstate.db)
});
}
Command::SyncToRemote => {
let series = try_ret!(self.cur_valid_series_mut());
let remote = cstate.remote.as_ref();
log.capture_status("Syncing entry to remote", || {
series.entry.force_sync_to_remote(remote)?;
series.save(&cstate.db)
});
}
Command::Score(raw_score) => {
let series = try_ret!(self.cur_valid_series_mut());
let score = match cstate.remote.parse_score(&raw_score) {
Some(score) if score == 0 => None,
Some(score) => Some(score),
None => {
log.push(LogItem::failed("Parsing score", None));
return;
}
};
let remote = cstate.remote.as_ref();
log.capture_status("Setting score", || {
series.entry.set_score(score);
series.entry.sync_to_remote(remote)?;
series.save(&cstate.db)
});
}
Command::Status(status) => {
let series = try_ret!(self.cur_valid_series_mut());
let remote = cstate.remote.as_ref();
log.capture_status(format!("Setting series status to \"{}\"", status), || {
series.entry.set_status(status, &cstate.config);
series.entry.sync_to_remote(remote)?;
series.save(&cstate.db)
});
}
}
}
fn process_tick(&mut self, state: &CommonState, log: &mut StatusLog) {
match &mut self.watch_state {
WatchState::Idle => (),
WatchState::Watching(_, child) => {
let status = match child.try_wait().context(err::IO) {
Ok(Some(status)) => status,
Ok(None) => return,
Err(err) => {
log.push(LogItem::failed("Waiting for player", err));
return;
}
};
// The watch state should be set to idle immediately to avoid a potential infinite loop.
let progress_time = match mem::replace(&mut self.watch_state, WatchState::Idle) {
WatchState::Watching(progress_time, _) => progress_time,
WatchState::Idle => unreachable!(),
};
let series = try_ret!(self.cur_valid_series_mut());
if !status.success() {
log.push("Player did not exit properly");
return;
}
if Utc::now() >= progress_time {
log.capture_status("Marking episode as completed", || {
series.episode_completed(state.remote.as_ref(), &state.config, &state.db)
});
} else {
log.push("Not marking episode as completed");
}
}
}
}
fn start_next_series_episode(&mut self, state: &CommonState) -> Result<()> {
let series = try_opt_r!(self.cur_valid_series_mut());
series.begin_watching(state.remote.as_ref(), &state.config, &state.db)?;
let next_ep = series.entry.watched_eps() + 1;
let child = series
.play_episode_cmd(next_ep, &state.config)?
.spawn()
.context(err::FailedToPlayEpisode { episode: next_ep })?;
let progress_time = {
let secs_must_watch =
(series.info.episode_length as f32 * state.config.episode.pcnt_must_watch) * 60.0;
let time_must_watch = Duration::seconds(secs_must_watch as i64);
Utc::now() + time_must_watch
};
self.watch_state = WatchState::Watching(progress_time, child);
Ok(())
}
fn is_idle(&self) -> bool {
self.watch_state == WatchState::Idle
}
}
type Nickname = String;
type Reason = String;
enum SeriesStatus {
Valid(Box<Series>),
Invalid(Nickname, Reason),
Unloaded(Nickname),
}
impl SeriesStatus {
fn from_series<S>(series: Result<Series>, nickname: S) -> Self
where
S: Into<String>,
{
match series {
Ok(series) => Self::Valid(Box::new(series)),
// We want to use a somewhat concise error message here, so
// we should strip error wrappers that don't provide much context
Err(err::Error::Anime { source, .. }) => {
Self::Invalid(nickname.into(), format!("{}", source))
}
Err(err) => Self::Invalid(nickname.into(), format!("{}", err)),
}
}
fn get_valid(&self) -> Option<&Series> {
match self {
Self::Valid(series) => Some(&series),
Self::Invalid(_, _) => None,
Self::Unloaded(_) => None,
}
}
fn get_valid_mut(&mut self) -> Option<&mut Series> {
match self {
Self::Valid(series) => Some(series),
Self::Invalid(_, _) => None,
Self::Unloaded(_) => None,
}
}
fn nickname(&self) -> &str {
match self {
Self::Valid(series) => series.config.nickname.as_ref(),
Self::Invalid(nickname, _) => nickname.as_ref(),
Self::Unloaded(nickname) => nickname.as_ref(),
}
}
}
enum StatusBarState {
Log,
CommandPrompt(CommandPrompt),
}
impl StatusBarState {
fn set_to_command_prompt(&mut self) {
*self = Self::CommandPrompt(CommandPrompt::new());
}
fn reset(&mut self) {
*self = Self::default();
}
fn in_input_dialog(&self) -> bool {
match self {
Self::Log => false,
Self::CommandPrompt(_) => true,
}
}
}
impl<'a> Default for StatusBarState {
fn default() -> Self {
Self::Log
}
}
type ProgressTime = DateTime<Utc>;
#[derive(Debug)]
enum WatchState {
Idle,
Watching(ProgressTime, process::Child),
}
impl PartialEq for WatchState {
fn eq(&self, other: &Self) -> bool {
mem::discriminant(self) == mem::discriminant(other)
}
}
|
use syntax::ast::consts::*;
use syntax::ast::op::*;
use syntax::core::tokens::Token;
pub trait CodeGen {
// Not sure if this should return anything
fn gen_code(&self);
}
#[derive(Debug, PartialEq)]
pub struct ExprWrapper {
expr: Box<Expr>,
start_line: usize,
start_column: usize,
end_line: usize,
end_column: usize
}
impl CodeGen for ExprWrapper {
fn gen_code(&self) {
self.expr.gen_code();
}
}
impl ExprWrapper {
// Create an associated expression with start and end positions
pub fn new(expr: Box<Expr>, startl: usize, startc: usize, endl: usize, endc: usize) -> ExprWrapper {
ExprWrapper {
expr: expr,
start_line: startl,
start_column: startc,
end_line: endl,
end_column: endc
}
}
pub fn default(expr: Box<Expr>) -> ExprWrapper {
ExprWrapper {
expr: expr,
start_line: 0,
start_column: 0,
end_line: 0,
end_column: 0,
}
}
pub fn get_expr(&self) -> &Expr {
&self.expr
}
}
#[derive(Debug, PartialEq)]
pub enum Expr {
// Operations between two expressions
NumOp(InfixOp, ExprWrapper, ExprWrapper),
// Operation on a single expression
UnaryOp(UnaryOp, ExprWrapper),
// Constants such as numbers and strings
Const(Const),
// Run expression while conditional is true
WhileLoop(ExprWrapper, ExprWrapper),
// Run expression if condition true, optional elif, else
If(ExprWrapper, ExprWrapper, Option<ExprWrapper>),
// Assign a value to an expression
Assign(ExprWrapper, ExprWrapper),
// Fn call with args.
// ToDo: Vec<Option<ExprWrapper>> for optional args?
FnCall(ExprWrapper, Vec<ExprWrapper>),
// Declare a function with a name, args(name, type | ident), return (type | ident), and expr
// ToDo: Optional args
FnDecl(String, Vec<(String, Token)>, Token, ExprWrapper),
// Run consecutive expressions
Block(Vec<ExprWrapper>),
// Variable name and expression.
// ToDo: Does this need a variable type here?
// ToDo: A bool for whether it is const(def) or not(var)?
VarDecl(Vec<(String, ExprWrapper)>),
// Reference to a value in an identifier
Ident(String),
// Return an expression from a function
Return(Option<ExprWrapper>),
// A lot more to come
NoOp,
}
impl CodeGen for Expr {
fn gen_code(&self) {
match *self {
Expr::Block(ref vec) => {
for expr in vec {
expr.gen_code();
}
},
// Codegen for others:
_ => ()
}
}
}
Changed tabs to spaces in expr.rs
use syntax::ast::consts::*;
use syntax::ast::op::*;
use syntax::core::tokens::Token;
pub trait CodeGen {
// Not sure if this should return anything
fn gen_code(&self);
}
#[derive(Debug, PartialEq)]
pub struct ExprWrapper {
expr: Box<Expr>,
start_line: usize,
start_column: usize,
end_line: usize,
end_column: usize
}
impl CodeGen for ExprWrapper {
fn gen_code(&self) {
self.expr.gen_code();
}
}
impl ExprWrapper {
// Create an associated expression with start and end positions
pub fn new(expr: Box<Expr>, startl: usize, startc: usize, endl: usize, endc: usize) -> ExprWrapper {
ExprWrapper {
expr: expr,
start_line: startl,
start_column: startc,
end_line: endl,
end_column: endc
}
}
pub fn default(expr: Box<Expr>) -> ExprWrapper {
ExprWrapper {
expr: expr,
start_line: 0,
start_column: 0,
end_line: 0,
end_column: 0,
}
}
pub fn get_expr(&self) -> &Expr {
&self.expr
}
}
#[derive(Debug, PartialEq)]
pub enum Expr {
// Operations between two expressions
NumOp(InfixOp, ExprWrapper, ExprWrapper),
// Operation on a single expression
UnaryOp(UnaryOp, ExprWrapper),
// Constants such as numbers and strings
Const(Const),
// Run expression while conditional is true
WhileLoop(ExprWrapper, ExprWrapper),
// Run expression if condition true, optional elif, else
If(ExprWrapper, ExprWrapper, Option<ExprWrapper>),
// Assign a value to an expression
Assign(ExprWrapper, ExprWrapper),
// Fn call with args.
// ToDo: Vec<Option<ExprWrapper>> for optional args?
FnCall(ExprWrapper, Vec<ExprWrapper>),
// Declare a function with a name, args(name, type | ident), return (type | ident), and expr
// ToDo: Optional args
FnDecl(String, Vec<(String, Token)>, Token, ExprWrapper),
// Run consecutive expressions
Block(Vec<ExprWrapper>),
// Variable name and expression.
// ToDo: Does this need a variable type here?
// ToDo: A bool for whether it is const(def) or not(var)?
VarDecl(Vec<(String, ExprWrapper)>),
// Reference to a value in an identifier
Ident(String),
// Return an expression from a function
Return(Option<ExprWrapper>),
// A lot more to come
NoOp,
}
impl CodeGen for Expr {
fn gen_code(&self) {
match *self {
Expr::Block(ref vec) => {
for expr in vec {
expr.gen_code();
}
},
// Codegen for others:
_ => ()
}
}
}
|
use super::{consts, sa_family_t};
use {Errno, Error, Result, NixPath};
use libc;
use std::{fmt, hash, mem, net, ptr};
use std::ffi::OsStr;
use std::path::Path;
use std::os::unix::ffi::OsStrExt;
#[cfg(any(target_os = "linux", target_os = "android"))]
use ::sys::socket::addr::netlink::NetlinkAddr;
// TODO: uncomment out IpAddr functions: rust-lang/rfcs#988
/*
*
* ===== AddressFamily =====
*
*/
#[cfg(any(target_os = "linux", target_os = "android"))]
#[derive(Debug,Copy,Clone)]
#[repr(C)]
pub struct sockaddr_nl {
pub nl_family: sa_family_t,
nl_pad: libc::c_ushort,
pub nl_pid: u32,
pub nl_groups: u32
}
#[repr(i32)]
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub enum AddressFamily {
Unix = consts::AF_UNIX,
Inet = consts::AF_INET,
Inet6 = consts::AF_INET6,
#[cfg(any(target_os = "linux", target_os = "android"))]
Netlink = consts::AF_NETLINK,
}
#[derive(Copy)]
pub enum InetAddr {
V4(libc::sockaddr_in),
V6(libc::sockaddr_in6),
}
impl InetAddr {
pub fn from_std(std: &net::SocketAddr) -> InetAddr {
let ip = match *std {
net::SocketAddr::V4(ref addr) => IpAddr::V4(Ipv4Addr::from_std(&addr.ip())),
net::SocketAddr::V6(ref addr) => IpAddr::V6(Ipv6Addr::from_std(&addr.ip())),
};
InetAddr::new(ip, std.port())
}
pub fn new(ip: IpAddr, port: u16) -> InetAddr {
match ip {
IpAddr::V4(ref ip) => {
InetAddr::V4(libc::sockaddr_in {
sin_family: AddressFamily::Inet as sa_family_t,
sin_port: port.to_be(),
sin_addr: ip.0,
.. unsafe { mem::zeroed() }
})
}
IpAddr::V6(ref ip) => {
InetAddr::V6(libc::sockaddr_in6 {
sin6_family: AddressFamily::Inet6 as sa_family_t,
sin6_port: port.to_be(),
sin6_addr: ip.0,
.. unsafe { mem::zeroed() }
})
}
}
}
/// Gets the IP address associated with this socket address.
pub fn ip(&self) -> IpAddr {
match *self {
InetAddr::V4(ref sa) => IpAddr::V4(Ipv4Addr(sa.sin_addr)),
InetAddr::V6(ref sa) => IpAddr::V6(Ipv6Addr(sa.sin6_addr)),
}
}
/// Gets the port number associated with this socket address
pub fn port(&self) -> u16 {
match *self {
InetAddr::V6(ref sa) => u16::from_be(sa.sin6_port),
InetAddr::V4(ref sa) => u16::from_be(sa.sin_port),
}
}
pub fn to_std(&self) -> net::SocketAddr {
match *self {
InetAddr::V4(ref sa) => net::SocketAddr::V4(
net::SocketAddrV4::new(
Ipv4Addr(sa.sin_addr).to_std(),
self.port())),
InetAddr::V6(ref sa) => net::SocketAddr::V6(
net::SocketAddrV6::new(
Ipv6Addr(sa.sin6_addr).to_std(),
self.port(),
sa.sin6_flowinfo,
sa.sin6_scope_id)),
}
}
pub fn to_str(&self) -> String {
format!("{}", self)
}
}
impl PartialEq for InetAddr {
fn eq(&self, other: &InetAddr) -> bool {
match (*self, *other) {
(InetAddr::V4(ref a), InetAddr::V4(ref b)) => {
a.sin_port == b.sin_port &&
a.sin_addr.s_addr == b.sin_addr.s_addr
}
(InetAddr::V6(ref a), InetAddr::V6(ref b)) => {
a.sin6_port == b.sin6_port &&
a.sin6_addr.s6_addr == b.sin6_addr.s6_addr &&
a.sin6_flowinfo == b.sin6_flowinfo &&
a.sin6_scope_id == b.sin6_scope_id
}
_ => false,
}
}
}
impl Eq for InetAddr {
}
impl hash::Hash for InetAddr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
match *self {
InetAddr::V4(ref a) => {
( a.sin_family,
a.sin_port,
a.sin_addr.s_addr ).hash(s)
}
InetAddr::V6(ref a) => {
( a.sin6_family,
a.sin6_port,
&a.sin6_addr.s6_addr,
a.sin6_flowinfo,
a.sin6_scope_id ).hash(s)
}
}
}
}
impl Clone for InetAddr {
fn clone(&self) -> InetAddr {
*self
}
}
impl fmt::Display for InetAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
InetAddr::V4(_) => write!(f, "{}:{}", self.ip(), self.port()),
InetAddr::V6(_) => write!(f, "[{}]:{}", self.ip(), self.port()),
}
}
}
/*
*
* ===== IpAddr =====
*
*/
pub enum IpAddr {
V4(Ipv4Addr),
V6(Ipv6Addr),
}
impl IpAddr {
/// Create a new IpAddr that contains an IPv4 address.
///
/// The result will represent the IP address a.b.c.d
pub fn new_v4(a: u8, b: u8, c: u8, d: u8) -> IpAddr {
IpAddr::V4(Ipv4Addr::new(a, b, c, d))
}
/// Create a new IpAddr that contains an IPv6 address.
///
/// The result will represent the IP address a:b:c:d:e:f
pub fn new_v6(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> IpAddr {
IpAddr::V6(Ipv6Addr::new(a, b, c, d, e, f, g, h))
}
/*
pub fn from_std(std: &net::IpAddr) -> IpAddr {
match *std {
net::IpAddr::V4(ref std) => IpAddr::V4(Ipv4Addr::from_std(std)),
net::IpAddr::V6(ref std) => IpAddr::V6(Ipv6Addr::from_std(std)),
}
}
pub fn to_std(&self) -> net::IpAddr {
match *self {
IpAddr::V4(ref ip) => net::IpAddr::V4(ip.to_std()),
IpAddr::V6(ref ip) => net::IpAddr::V6(ip.to_std()),
}
}
*/
}
impl fmt::Display for IpAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
IpAddr::V4(ref v4) => v4.fmt(f),
IpAddr::V6(ref v6) => v6.fmt(f)
}
}
}
/*
*
* ===== Ipv4Addr =====
*
*/
#[derive(Copy)]
pub struct Ipv4Addr(pub libc::in_addr);
impl Ipv4Addr {
pub fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr {
let ip = (((a as u32) << 24) |
((b as u32) << 16) |
((c as u32) << 8) |
((d as u32) << 0)).to_be();
Ipv4Addr(libc::in_addr { s_addr: ip })
}
pub fn from_std(std: &net::Ipv4Addr) -> Ipv4Addr {
let bits = std.octets();
Ipv4Addr::new(bits[0], bits[1], bits[2], bits[3])
}
pub fn any() -> Ipv4Addr {
Ipv4Addr(libc::in_addr { s_addr: consts::INADDR_ANY })
}
pub fn octets(&self) -> [u8; 4] {
let bits = u32::from_be(self.0.s_addr);
[(bits >> 24) as u8, (bits >> 16) as u8, (bits >> 8) as u8, bits as u8]
}
pub fn to_std(&self) -> net::Ipv4Addr {
let bits = self.octets();
net::Ipv4Addr::new(bits[0], bits[1], bits[2], bits[3])
}
}
impl PartialEq for Ipv4Addr {
fn eq(&self, other: &Ipv4Addr) -> bool {
self.0.s_addr == other.0.s_addr
}
}
impl Eq for Ipv4Addr {
}
impl hash::Hash for Ipv4Addr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
self.0.s_addr.hash(s)
}
}
impl Clone for Ipv4Addr {
fn clone(&self) -> Ipv4Addr {
*self
}
}
impl fmt::Display for Ipv4Addr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let octets = self.octets();
write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3])
}
}
/*
*
* ===== Ipv6Addr =====
*
*/
#[derive(Clone, Copy)]
pub struct Ipv6Addr(pub libc::in6_addr);
macro_rules! to_u8_array {
($($num:ident),*) => {
if cfg!(target_endian = "big") {
[ $(($num>>8) as u8, ($num&0xff) as u8,)* ]
} else {
[ $(($num&0xff) as u8, ($num>>8) as u8,)* ]
}
}
}
macro_rules! to_u16_array {
($slf:ident, $($first:expr, $second:expr),*) => {
if cfg!(target_endian = "big") {
[$( (($slf.0.s6_addr[$first] as u16) << 8) + $slf.0.s6_addr[$second] as u16,)*]
} else {
[$( (($slf.0.s6_addr[$second] as u16) << 8) + $slf.0.s6_addr[$first] as u16,)*]
}
}
}
impl Ipv6Addr {
pub fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr {
let mut in6_addr_var: libc::in6_addr = unsafe{mem::uninitialized()};
in6_addr_var.s6_addr = to_u8_array!(a,b,c,d,e,f,g,h);
Ipv6Addr(in6_addr_var)
}
pub fn from_std(std: &net::Ipv6Addr) -> Ipv6Addr {
let s = std.segments();
Ipv6Addr::new(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7])
}
/// Return the eight 16-bit segments that make up this address
pub fn segments(&self) -> [u16; 8] {
to_u16_array!(self, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)
}
pub fn to_std(&self) -> net::Ipv6Addr {
let s = self.segments();
net::Ipv6Addr::new(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7])
}
}
impl fmt::Display for Ipv6Addr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.to_std().fmt(fmt)
}
}
/*
*
* ===== UnixAddr =====
*
*/
/// A wrapper around sockaddr_un. We track the length of sun_path,
/// because it may not be null-terminated (unconnected and abstract
/// sockets). Note that the actual sockaddr length is greater by
/// size_of::<sa_family_t>().
#[derive(Copy)]
pub struct UnixAddr(pub libc::sockaddr_un, pub usize);
impl UnixAddr {
/// Create a new sockaddr_un representing a filesystem path.
pub fn new<P: ?Sized + NixPath>(path: &P) -> Result<UnixAddr> {
try!(path.with_nix_path(|cstr| {
unsafe {
let mut ret = libc::sockaddr_un {
sun_family: AddressFamily::Unix as sa_family_t,
.. mem::zeroed()
};
let bytes = cstr.to_bytes_with_nul();
if bytes.len() > ret.sun_path.len() {
return Err(Error::Sys(Errno::ENAMETOOLONG));
}
ptr::copy_nonoverlapping(bytes.as_ptr(),
ret.sun_path.as_mut_ptr() as *mut u8,
bytes.len());
Ok(UnixAddr(ret, bytes.len()))
}
}))
}
/// Create a new sockaddr_un representing an address in the
/// "abstract namespace". This is a Linux-specific extension,
/// primarily used to allow chrooted processes to communicate with
/// specific daemons.
pub fn new_abstract(path: &[u8]) -> Result<UnixAddr> {
unsafe {
let mut ret = libc::sockaddr_un {
sun_family: AddressFamily::Unix as sa_family_t,
.. mem::zeroed()
};
if path.len() > ret.sun_path.len() {
return Err(Error::Sys(Errno::ENAMETOOLONG));
}
// Abstract addresses are represented by sun_path[0] ==
// b'\0', so copy starting one byte in.
ptr::copy_nonoverlapping(path.as_ptr(),
ret.sun_path.as_mut_ptr().offset(1) as *mut u8,
path.len());
Ok(UnixAddr(ret, path.len()))
}
}
fn sun_path(&self) -> &[u8] {
unsafe { mem::transmute(&self.0.sun_path[..self.1]) }
}
/// If this address represents a filesystem path, return that path.
pub fn path(&self) -> Option<&Path> {
if self.1 == 0 || self.0.sun_path[0] == 0 {
// unbound or abstract
None
} else {
let p = self.sun_path();
Some(Path::new(<OsStr as OsStrExt>::from_bytes(&p[..p.len()-1])))
}
}
}
impl PartialEq for UnixAddr {
fn eq(&self, other: &UnixAddr) -> bool {
self.sun_path() == other.sun_path()
}
}
impl Eq for UnixAddr {
}
impl hash::Hash for UnixAddr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
( self.0.sun_family, self.sun_path() ).hash(s)
}
}
impl Clone for UnixAddr {
fn clone(&self) -> UnixAddr {
*self
}
}
impl fmt::Display for UnixAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.1 == 0 {
f.write_str("<unbound UNIX socket>")
} else if let Some(path) = self.path() {
path.display().fmt(f)
} else {
let display = String::from_utf8_lossy(&self.sun_path()[1..]);
write!(f, "@{}", display)
}
}
}
/*
*
* ===== Sock addr =====
*
*/
/// Represents a socket address
#[derive(Copy)]
pub enum SockAddr {
Inet(InetAddr),
Unix(UnixAddr),
#[cfg(any(target_os = "linux", target_os = "android"))]
Netlink(NetlinkAddr)
}
impl SockAddr {
pub fn new_inet(addr: InetAddr) -> SockAddr {
SockAddr::Inet(addr)
}
pub fn new_unix<P: ?Sized + NixPath>(path: &P) -> Result<SockAddr> {
Ok(SockAddr::Unix(try!(UnixAddr::new(path))))
}
#[cfg(any(target_os = "linux", target_os = "android"))]
pub fn new_netlink(pid: u32, groups: u32) -> SockAddr {
SockAddr::Netlink(NetlinkAddr::new(pid, groups))
}
pub fn family(&self) -> AddressFamily {
match *self {
SockAddr::Inet(InetAddr::V4(..)) => AddressFamily::Inet,
SockAddr::Inet(InetAddr::V6(..)) => AddressFamily::Inet6,
SockAddr::Unix(..) => AddressFamily::Unix,
#[cfg(any(target_os = "linux", target_os = "android"))]
SockAddr::Netlink(..) => AddressFamily::Netlink,
}
}
pub fn to_str(&self) -> String {
format!("{}", self)
}
pub unsafe fn as_ffi_pair(&self) -> (&libc::sockaddr, libc::socklen_t) {
match *self {
SockAddr::Inet(InetAddr::V4(ref addr)) => (mem::transmute(addr), mem::size_of::<libc::sockaddr_in>() as libc::socklen_t),
SockAddr::Inet(InetAddr::V6(ref addr)) => (mem::transmute(addr), mem::size_of::<libc::sockaddr_in6>() as libc::socklen_t),
SockAddr::Unix(UnixAddr(ref addr, len)) => (mem::transmute(addr), (len + mem::size_of::<libc::sa_family_t>()) as libc::socklen_t),
#[cfg(any(target_os = "linux", target_os = "android"))]
SockAddr::Netlink(NetlinkAddr(ref sa)) => (mem::transmute(sa), mem::size_of::<sockaddr_nl>() as libc::socklen_t),
}
}
}
impl PartialEq for SockAddr {
fn eq(&self, other: &SockAddr) -> bool {
match (*self, *other) {
(SockAddr::Inet(ref a), SockAddr::Inet(ref b)) => {
a == b
}
(SockAddr::Unix(ref a), SockAddr::Unix(ref b)) => {
a == b
}
#[cfg(any(target_os = "linux", target_os = "android"))]
(SockAddr::Netlink(ref a), SockAddr::Netlink(ref b)) => {
a == b
}
_ => false,
}
}
}
impl Eq for SockAddr {
}
impl hash::Hash for SockAddr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
match *self {
SockAddr::Inet(ref a) => a.hash(s),
SockAddr::Unix(ref a) => a.hash(s),
#[cfg(any(target_os = "linux", target_os = "android"))]
SockAddr::Netlink(ref a) => a.hash(s),
}
}
}
impl Clone for SockAddr {
fn clone(&self) -> SockAddr {
*self
}
}
impl fmt::Display for SockAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
SockAddr::Inet(ref inet) => inet.fmt(f),
SockAddr::Unix(ref unix) => unix.fmt(f),
#[cfg(any(target_os = "linux", target_os = "android"))]
SockAddr::Netlink(ref nl) => nl.fmt(f),
}
}
}
#[cfg(any(target_os = "linux", target_os = "android"))]
pub mod netlink {
use ::sys::socket::addr::{AddressFamily,sockaddr_nl};
use libc::sa_family_t;
use std::{fmt, hash};
#[derive(Copy)]
pub struct NetlinkAddr(pub sockaddr_nl);
impl NetlinkAddr {
pub fn new(pid: u32, groups: u32) -> NetlinkAddr {
NetlinkAddr(sockaddr_nl {
nl_family: AddressFamily::Netlink as sa_family_t,
nl_pad: 0,
nl_pid: pid,
nl_groups: groups,
})
}
pub fn pid(&self) -> u32 {
self.0.nl_pid
}
pub fn groups(&self) -> u32 {
self.0.nl_groups
}
}
impl PartialEq for NetlinkAddr {
fn eq(&self, other: &NetlinkAddr) -> bool {
self.0.nl_pid == other.0.nl_pid && self.0.nl_groups == self.0.nl_groups
}
}
impl Eq for NetlinkAddr {
}
impl hash::Hash for NetlinkAddr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
( self.0.nl_family, self.0.nl_pid, self.0.nl_groups).hash(s)
}
}
impl Clone for NetlinkAddr {
fn clone(&self) -> NetlinkAddr {
*self
}
}
impl fmt::Display for NetlinkAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "pid: {} groups: {}", self.pid(), self.groups())
}
}
}
Fix style suggestions regarding #cfg
as pointed out by @kamalmarhubi
use super::{consts, sa_family_t};
use {Errno, Error, Result, NixPath};
use libc;
use std::{fmt, hash, mem, net, ptr};
use std::ffi::OsStr;
use std::path::Path;
use std::os::unix::ffi::OsStrExt;
#[cfg(any(target_os = "linux", target_os = "android"))]
use ::sys::socket::addr::netlink::NetlinkAddr;
// TODO: uncomment out IpAddr functions: rust-lang/rfcs#988
/*
*
* ===== AddressFamily =====
*
*/
#[cfg(any(target_os = "linux", target_os = "android"))]
#[derive(Debug,Copy,Clone)]
#[repr(C)]
pub struct sockaddr_nl {
pub nl_family: sa_family_t,
nl_pad: libc::c_ushort,
pub nl_pid: u32,
pub nl_groups: u32
}
#[repr(i32)]
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub enum AddressFamily {
Unix = consts::AF_UNIX,
Inet = consts::AF_INET,
Inet6 = consts::AF_INET6,
#[cfg(any(target_os = "linux", target_os = "android"))]
Netlink = consts::AF_NETLINK,
}
#[derive(Copy)]
pub enum InetAddr {
V4(libc::sockaddr_in),
V6(libc::sockaddr_in6),
}
impl InetAddr {
pub fn from_std(std: &net::SocketAddr) -> InetAddr {
let ip = match *std {
net::SocketAddr::V4(ref addr) => IpAddr::V4(Ipv4Addr::from_std(&addr.ip())),
net::SocketAddr::V6(ref addr) => IpAddr::V6(Ipv6Addr::from_std(&addr.ip())),
};
InetAddr::new(ip, std.port())
}
pub fn new(ip: IpAddr, port: u16) -> InetAddr {
match ip {
IpAddr::V4(ref ip) => {
InetAddr::V4(libc::sockaddr_in {
sin_family: AddressFamily::Inet as sa_family_t,
sin_port: port.to_be(),
sin_addr: ip.0,
.. unsafe { mem::zeroed() }
})
}
IpAddr::V6(ref ip) => {
InetAddr::V6(libc::sockaddr_in6 {
sin6_family: AddressFamily::Inet6 as sa_family_t,
sin6_port: port.to_be(),
sin6_addr: ip.0,
.. unsafe { mem::zeroed() }
})
}
}
}
/// Gets the IP address associated with this socket address.
pub fn ip(&self) -> IpAddr {
match *self {
InetAddr::V4(ref sa) => IpAddr::V4(Ipv4Addr(sa.sin_addr)),
InetAddr::V6(ref sa) => IpAddr::V6(Ipv6Addr(sa.sin6_addr)),
}
}
/// Gets the port number associated with this socket address
pub fn port(&self) -> u16 {
match *self {
InetAddr::V6(ref sa) => u16::from_be(sa.sin6_port),
InetAddr::V4(ref sa) => u16::from_be(sa.sin_port),
}
}
pub fn to_std(&self) -> net::SocketAddr {
match *self {
InetAddr::V4(ref sa) => net::SocketAddr::V4(
net::SocketAddrV4::new(
Ipv4Addr(sa.sin_addr).to_std(),
self.port())),
InetAddr::V6(ref sa) => net::SocketAddr::V6(
net::SocketAddrV6::new(
Ipv6Addr(sa.sin6_addr).to_std(),
self.port(),
sa.sin6_flowinfo,
sa.sin6_scope_id)),
}
}
pub fn to_str(&self) -> String {
format!("{}", self)
}
}
impl PartialEq for InetAddr {
fn eq(&self, other: &InetAddr) -> bool {
match (*self, *other) {
(InetAddr::V4(ref a), InetAddr::V4(ref b)) => {
a.sin_port == b.sin_port &&
a.sin_addr.s_addr == b.sin_addr.s_addr
}
(InetAddr::V6(ref a), InetAddr::V6(ref b)) => {
a.sin6_port == b.sin6_port &&
a.sin6_addr.s6_addr == b.sin6_addr.s6_addr &&
a.sin6_flowinfo == b.sin6_flowinfo &&
a.sin6_scope_id == b.sin6_scope_id
}
_ => false,
}
}
}
impl Eq for InetAddr {
}
impl hash::Hash for InetAddr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
match *self {
InetAddr::V4(ref a) => {
( a.sin_family,
a.sin_port,
a.sin_addr.s_addr ).hash(s)
}
InetAddr::V6(ref a) => {
( a.sin6_family,
a.sin6_port,
&a.sin6_addr.s6_addr,
a.sin6_flowinfo,
a.sin6_scope_id ).hash(s)
}
}
}
}
impl Clone for InetAddr {
fn clone(&self) -> InetAddr {
*self
}
}
impl fmt::Display for InetAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
InetAddr::V4(_) => write!(f, "{}:{}", self.ip(), self.port()),
InetAddr::V6(_) => write!(f, "[{}]:{}", self.ip(), self.port()),
}
}
}
/*
*
* ===== IpAddr =====
*
*/
pub enum IpAddr {
V4(Ipv4Addr),
V6(Ipv6Addr),
}
impl IpAddr {
/// Create a new IpAddr that contains an IPv4 address.
///
/// The result will represent the IP address a.b.c.d
pub fn new_v4(a: u8, b: u8, c: u8, d: u8) -> IpAddr {
IpAddr::V4(Ipv4Addr::new(a, b, c, d))
}
/// Create a new IpAddr that contains an IPv6 address.
///
/// The result will represent the IP address a:b:c:d:e:f
pub fn new_v6(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> IpAddr {
IpAddr::V6(Ipv6Addr::new(a, b, c, d, e, f, g, h))
}
/*
pub fn from_std(std: &net::IpAddr) -> IpAddr {
match *std {
net::IpAddr::V4(ref std) => IpAddr::V4(Ipv4Addr::from_std(std)),
net::IpAddr::V6(ref std) => IpAddr::V6(Ipv6Addr::from_std(std)),
}
}
pub fn to_std(&self) -> net::IpAddr {
match *self {
IpAddr::V4(ref ip) => net::IpAddr::V4(ip.to_std()),
IpAddr::V6(ref ip) => net::IpAddr::V6(ip.to_std()),
}
}
*/
}
impl fmt::Display for IpAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
IpAddr::V4(ref v4) => v4.fmt(f),
IpAddr::V6(ref v6) => v6.fmt(f)
}
}
}
/*
*
* ===== Ipv4Addr =====
*
*/
#[derive(Copy)]
pub struct Ipv4Addr(pub libc::in_addr);
impl Ipv4Addr {
pub fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr {
let ip = (((a as u32) << 24) |
((b as u32) << 16) |
((c as u32) << 8) |
((d as u32) << 0)).to_be();
Ipv4Addr(libc::in_addr { s_addr: ip })
}
pub fn from_std(std: &net::Ipv4Addr) -> Ipv4Addr {
let bits = std.octets();
Ipv4Addr::new(bits[0], bits[1], bits[2], bits[3])
}
pub fn any() -> Ipv4Addr {
Ipv4Addr(libc::in_addr { s_addr: consts::INADDR_ANY })
}
pub fn octets(&self) -> [u8; 4] {
let bits = u32::from_be(self.0.s_addr);
[(bits >> 24) as u8, (bits >> 16) as u8, (bits >> 8) as u8, bits as u8]
}
pub fn to_std(&self) -> net::Ipv4Addr {
let bits = self.octets();
net::Ipv4Addr::new(bits[0], bits[1], bits[2], bits[3])
}
}
impl PartialEq for Ipv4Addr {
fn eq(&self, other: &Ipv4Addr) -> bool {
self.0.s_addr == other.0.s_addr
}
}
impl Eq for Ipv4Addr {
}
impl hash::Hash for Ipv4Addr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
self.0.s_addr.hash(s)
}
}
impl Clone for Ipv4Addr {
fn clone(&self) -> Ipv4Addr {
*self
}
}
impl fmt::Display for Ipv4Addr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let octets = self.octets();
write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3])
}
}
/*
*
* ===== Ipv6Addr =====
*
*/
#[derive(Clone, Copy)]
pub struct Ipv6Addr(pub libc::in6_addr);
macro_rules! to_u8_array {
($($num:ident),*) => {
if cfg!(target_endian = "big") {
[ $(($num>>8) as u8, ($num&0xff) as u8,)* ]
} else {
[ $(($num&0xff) as u8, ($num>>8) as u8,)* ]
}
}
}
macro_rules! to_u16_array {
($slf:ident, $($first:expr, $second:expr),*) => {
if cfg!(target_endian = "big") {
[$( (($slf.0.s6_addr[$first] as u16) << 8) + $slf.0.s6_addr[$second] as u16,)*]
} else {
[$( (($slf.0.s6_addr[$second] as u16) << 8) + $slf.0.s6_addr[$first] as u16,)*]
}
}
}
impl Ipv6Addr {
pub fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr {
let mut in6_addr_var: libc::in6_addr = unsafe{mem::uninitialized()};
in6_addr_var.s6_addr = to_u8_array!(a,b,c,d,e,f,g,h);
Ipv6Addr(in6_addr_var)
}
pub fn from_std(std: &net::Ipv6Addr) -> Ipv6Addr {
let s = std.segments();
Ipv6Addr::new(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7])
}
/// Return the eight 16-bit segments that make up this address
pub fn segments(&self) -> [u16; 8] {
to_u16_array!(self, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)
}
pub fn to_std(&self) -> net::Ipv6Addr {
let s = self.segments();
net::Ipv6Addr::new(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7])
}
}
impl fmt::Display for Ipv6Addr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.to_std().fmt(fmt)
}
}
/*
*
* ===== UnixAddr =====
*
*/
/// A wrapper around sockaddr_un. We track the length of sun_path,
/// because it may not be null-terminated (unconnected and abstract
/// sockets). Note that the actual sockaddr length is greater by
/// size_of::<sa_family_t>().
#[derive(Copy)]
pub struct UnixAddr(pub libc::sockaddr_un, pub usize);
impl UnixAddr {
/// Create a new sockaddr_un representing a filesystem path.
pub fn new<P: ?Sized + NixPath>(path: &P) -> Result<UnixAddr> {
try!(path.with_nix_path(|cstr| {
unsafe {
let mut ret = libc::sockaddr_un {
sun_family: AddressFamily::Unix as sa_family_t,
.. mem::zeroed()
};
let bytes = cstr.to_bytes_with_nul();
if bytes.len() > ret.sun_path.len() {
return Err(Error::Sys(Errno::ENAMETOOLONG));
}
ptr::copy_nonoverlapping(bytes.as_ptr(),
ret.sun_path.as_mut_ptr() as *mut u8,
bytes.len());
Ok(UnixAddr(ret, bytes.len()))
}
}))
}
/// Create a new sockaddr_un representing an address in the
/// "abstract namespace". This is a Linux-specific extension,
/// primarily used to allow chrooted processes to communicate with
/// specific daemons.
pub fn new_abstract(path: &[u8]) -> Result<UnixAddr> {
unsafe {
let mut ret = libc::sockaddr_un {
sun_family: AddressFamily::Unix as sa_family_t,
.. mem::zeroed()
};
if path.len() > ret.sun_path.len() {
return Err(Error::Sys(Errno::ENAMETOOLONG));
}
// Abstract addresses are represented by sun_path[0] ==
// b'\0', so copy starting one byte in.
ptr::copy_nonoverlapping(path.as_ptr(),
ret.sun_path.as_mut_ptr().offset(1) as *mut u8,
path.len());
Ok(UnixAddr(ret, path.len()))
}
}
fn sun_path(&self) -> &[u8] {
unsafe { mem::transmute(&self.0.sun_path[..self.1]) }
}
/// If this address represents a filesystem path, return that path.
pub fn path(&self) -> Option<&Path> {
if self.1 == 0 || self.0.sun_path[0] == 0 {
// unbound or abstract
None
} else {
let p = self.sun_path();
Some(Path::new(<OsStr as OsStrExt>::from_bytes(&p[..p.len()-1])))
}
}
}
impl PartialEq for UnixAddr {
fn eq(&self, other: &UnixAddr) -> bool {
self.sun_path() == other.sun_path()
}
}
impl Eq for UnixAddr {
}
impl hash::Hash for UnixAddr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
( self.0.sun_family, self.sun_path() ).hash(s)
}
}
impl Clone for UnixAddr {
fn clone(&self) -> UnixAddr {
*self
}
}
impl fmt::Display for UnixAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.1 == 0 {
f.write_str("<unbound UNIX socket>")
} else if let Some(path) = self.path() {
path.display().fmt(f)
} else {
let display = String::from_utf8_lossy(&self.sun_path()[1..]);
write!(f, "@{}", display)
}
}
}
/*
*
* ===== Sock addr =====
*
*/
/// Represents a socket address
#[derive(Copy)]
pub enum SockAddr {
Inet(InetAddr),
Unix(UnixAddr),
#[cfg(any(target_os = "linux", target_os = "android"))]
Netlink(NetlinkAddr)
}
impl SockAddr {
pub fn new_inet(addr: InetAddr) -> SockAddr {
SockAddr::Inet(addr)
}
pub fn new_unix<P: ?Sized + NixPath>(path: &P) -> Result<SockAddr> {
Ok(SockAddr::Unix(try!(UnixAddr::new(path))))
}
#[cfg(any(target_os = "linux", target_os = "android"))]
pub fn new_netlink(pid: u32, groups: u32) -> SockAddr {
SockAddr::Netlink(NetlinkAddr::new(pid, groups))
}
pub fn family(&self) -> AddressFamily {
match *self {
SockAddr::Inet(InetAddr::V4(..)) => AddressFamily::Inet,
SockAddr::Inet(InetAddr::V6(..)) => AddressFamily::Inet6,
SockAddr::Unix(..) => AddressFamily::Unix,
#[cfg(any(target_os = "linux", target_os = "android"))]
SockAddr::Netlink(..) => AddressFamily::Netlink,
}
}
pub fn to_str(&self) -> String {
format!("{}", self)
}
pub unsafe fn as_ffi_pair(&self) -> (&libc::sockaddr, libc::socklen_t) {
match *self {
SockAddr::Inet(InetAddr::V4(ref addr)) => (mem::transmute(addr), mem::size_of::<libc::sockaddr_in>() as libc::socklen_t),
SockAddr::Inet(InetAddr::V6(ref addr)) => (mem::transmute(addr), mem::size_of::<libc::sockaddr_in6>() as libc::socklen_t),
SockAddr::Unix(UnixAddr(ref addr, len)) => (mem::transmute(addr), (len + mem::size_of::<libc::sa_family_t>()) as libc::socklen_t),
#[cfg(any(target_os = "linux", target_os = "android"))]
SockAddr::Netlink(NetlinkAddr(ref sa)) => (mem::transmute(sa), mem::size_of::<sockaddr_nl>() as libc::socklen_t),
}
}
}
impl PartialEq for SockAddr {
fn eq(&self, other: &SockAddr) -> bool {
match (*self, *other) {
(SockAddr::Inet(ref a), SockAddr::Inet(ref b)) => {
a == b
}
(SockAddr::Unix(ref a), SockAddr::Unix(ref b)) => {
a == b
}
#[cfg(any(target_os = "linux", target_os = "android"))]
(SockAddr::Netlink(ref a), SockAddr::Netlink(ref b)) => {
a == b
}
_ => false,
}
}
}
impl Eq for SockAddr {
}
impl hash::Hash for SockAddr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
match *self {
SockAddr::Inet(ref a) => a.hash(s),
SockAddr::Unix(ref a) => a.hash(s),
#[cfg(any(target_os = "linux", target_os = "android"))]
SockAddr::Netlink(ref a) => a.hash(s),
}
}
}
impl Clone for SockAddr {
fn clone(&self) -> SockAddr {
*self
}
}
impl fmt::Display for SockAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
SockAddr::Inet(ref inet) => inet.fmt(f),
SockAddr::Unix(ref unix) => unix.fmt(f),
#[cfg(any(target_os = "linux", target_os = "android"))]
SockAddr::Netlink(ref nl) => nl.fmt(f),
}
}
}
#[cfg(any(target_os = "linux", target_os = "android"))]
pub mod netlink {
use ::sys::socket::addr::{AddressFamily,sockaddr_nl};
use libc::sa_family_t;
use std::{fmt, hash};
#[derive(Copy)]
pub struct NetlinkAddr(pub sockaddr_nl);
impl NetlinkAddr {
pub fn new(pid: u32, groups: u32) -> NetlinkAddr {
NetlinkAddr(sockaddr_nl {
nl_family: AddressFamily::Netlink as sa_family_t,
nl_pad: 0,
nl_pid: pid,
nl_groups: groups,
})
}
pub fn pid(&self) -> u32 {
self.0.nl_pid
}
pub fn groups(&self) -> u32 {
self.0.nl_groups
}
}
impl PartialEq for NetlinkAddr {
fn eq(&self, other: &NetlinkAddr) -> bool {
self.0.nl_pid == other.0.nl_pid && self.0.nl_groups == self.0.nl_groups
}
}
impl Eq for NetlinkAddr {
}
impl hash::Hash for NetlinkAddr {
fn hash<H: hash::Hasher>(&self, s: &mut H) {
( self.0.nl_family, self.0.nl_pid, self.0.nl_groups).hash(s)
}
}
impl Clone for NetlinkAddr {
fn clone(&self) -> NetlinkAddr {
*self
}
}
impl fmt::Display for NetlinkAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "pid: {} groups: {}", self.pid(), self.groups())
}
}
}
|
use super::*;
pub fn memset<T : Clone>(slice: &mut [T], val: T) {
for x in slice { *x = val.clone() }
}
pub struct CompressorOxide {
pub lz: LZOxide,
pub params: ParamsOxide,
pub callback: Option<CallbackFunc>,
pub local_buf: [u8; TDEFL_OUT_BUF_SIZE],
pub huff: HuffmanOxide,
pub dict: DictOxide,
}
#[derive(Copy, Clone)]
pub struct CallbackFunc {
pub put_buf_func: PutBufFuncPtrNotNull,
pub put_buf_user: *mut c_void,
}
pub struct CallbackBuf<'a> {
pub out_buf: &'a mut [u8],
}
pub enum CallbackOut<'a> {
Func(CallbackFunc),
Buf(CallbackBuf<'a>),
}
impl<'a> CallbackOut<'a> {
pub fn new_output_buffer<'b>(
&'b mut self,
local_buf: &'b mut [u8],
out_buf_ofs: usize
) -> OutputBufferOxide<'b> {
let is_local;
let buf_len = TDEFL_OUT_BUF_SIZE - 16;
let chosen_buffer = match *self {
CallbackOut::Buf(ref mut cb) if cb.out_buf.len() - out_buf_ofs >= TDEFL_OUT_BUF_SIZE => {
is_local = false;
&mut cb.out_buf[out_buf_ofs..out_buf_ofs + buf_len]
},
_ => {
is_local = true;
&mut local_buf[..buf_len]
},
};
let cursor = Cursor::new(chosen_buffer);
OutputBufferOxide {
inner: cursor,
local: is_local,
bit_buffer: 0,
bits_in: 0,
}
}
}
pub struct CallbackOxide<'a> {
pub in_buf: Option<&'a [u8]>,
pub out: CallbackOut<'a>,
}
impl<'a> CallbackOxide<'a> {
pub unsafe fn new(
callback_func: Option<CallbackFunc>,
in_buf: *const c_void,
in_size: usize,
out_buf: *mut c_void,
out_size: usize,
) -> Result<Self, TDEFLStatus> {
let out = match callback_func {
None => CallbackOut::Buf(CallbackBuf {
out_buf: slice::from_raw_parts_mut(
(out_buf as *mut u8).as_mut().ok_or(TDEFLStatus::BadParam)?,
out_size
),
}),
Some(func) => {
if out_size > 0 || out_buf.as_mut().is_some() {
return Err(TDEFLStatus::BadParam);
}
CallbackOut::Func(func)
},
};
if in_size > 0 && in_buf.is_null() {
return Err(TDEFLStatus::BadParam);
}
Ok(CallbackOxide {
in_buf: (in_buf as *const u8).as_ref().map(|in_buf|
slice::from_raw_parts(in_buf, in_size)
),
out: out,
})
}
}
impl CompressorOxide {
pub unsafe fn new(callback: Option<CallbackFunc>, flags: c_uint) -> Self {
CompressorOxide {
callback: callback,
lz: LZOxide::new(),
params: ParamsOxide::new(flags),
local_buf: [0; TDEFL_OUT_BUF_SIZE],
huff: HuffmanOxide::new(),
dict: DictOxide::new(flags),
}
}
}
pub struct HuffmanOxide {
pub count: [[u16; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
pub codes: [[u16; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
pub code_sizes: [[u8; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
}
impl HuffmanOxide {
pub fn new() -> Self {
HuffmanOxide {
count: [[0; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
codes: [[0; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
code_sizes: [[0; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
}
}
}
pub struct OutputBufferOxide<'a> {
pub inner: Cursor<&'a mut [u8]>,
pub local: bool,
pub bit_buffer: u32,
pub bits_in: u32,
}
pub struct BitBuffer {
pub bit_buffer: u64,
pub bits_in: u32,
}
impl BitBuffer {
pub fn put_fast(&mut self, bits: u64, len: u32) {
self.bit_buffer |= bits << self.bits_in;
self.bits_in += len;
}
pub fn flush(&mut self, output: &mut OutputBufferOxide) -> io::Result<()> {
let pos = output.inner.position() as usize;
let inner = &mut((*output.inner.get_mut())[pos]) as *mut u8 as *mut u64;
unsafe {
ptr::write_unaligned(inner, self.bit_buffer);
}
output.inner.seek(SeekFrom::Current((self.bits_in >> 3) as i64))?;
self.bit_buffer >>= self.bits_in & !7;
self.bits_in &= 7;
Ok(())
}
}
pub struct DictOxide {
pub max_probes: [c_uint; 2],
pub dict: [u8; TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1],
pub next: [u16; TDEFL_LZ_DICT_SIZE],
pub hash: [u16; TDEFL_LZ_DICT_SIZE],
pub code_buf_dict_pos: c_uint,
pub lookahead_size: c_uint,
pub lookahead_pos: c_uint,
pub size: c_uint,
}
impl DictOxide {
pub fn new(flags: c_uint) -> Self {
DictOxide {
max_probes: [
1 + ((flags & 0xFFF) + 2) / 3,
1 + (((flags & 0xFFF) >> 2) + 2) / 3
],
dict: [0; TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1],
next: [0; TDEFL_LZ_DICT_SIZE],
hash: [0; TDEFL_LZ_DICT_SIZE],
code_buf_dict_pos: 0,
lookahead_size: 0,
lookahead_pos: 0,
size: 0,
}
}
}
pub struct LZOxide {
pub codes: [u8; TDEFL_LZ_CODE_BUF_SIZE],
pub code_position: usize,
pub flag_position: usize,
pub total_bytes: c_uint,
pub num_flags_left: c_uint,
}
pub struct ParamsOxide {
pub flags: c_uint,
pub greedy_parsing: bool,
pub block_index: c_uint,
pub saved_match_dist: c_uint,
pub saved_match_len: libc::c_uint,
pub saved_lit: u8,
pub flush: TDEFLFlush,
pub flush_ofs: c_uint,
pub flush_remaining: c_uint,
pub finished: bool,
pub adler32: c_uint,
pub src_pos: usize,
pub src_buf_left: usize,
pub out_buf_ofs: usize,
pub prev_return_status: TDEFLStatus,
pub saved_bit_buffer: u32,
pub saved_bits_in: u32,
}
impl ParamsOxide {
pub fn new(flags: c_uint) -> Self {
ParamsOxide {
flags: flags,
greedy_parsing: flags & TDEFL_GREEDY_PARSING_FLAG != 0,
block_index: 0,
saved_match_dist: 0,
saved_match_len: 0,
saved_lit: 0,
flush: TDEFLFlush::None,
flush_ofs: 0,
flush_remaining: 0,
finished: false,
adler32: ::MZ_ADLER32_INIT as c_uint,
src_pos: 0,
src_buf_left: 0,
out_buf_ofs: 0,
prev_return_status: TDEFLStatus::Okay,
saved_bit_buffer: 0,
saved_bits_in: 0,
}
}
}
impl LZOxide {
pub fn new() -> Self {
LZOxide {
codes: [0; TDEFL_LZ_CODE_BUF_SIZE],
code_position: 1,
flag_position: 0,
total_bytes: 0,
num_flags_left: 8,
}
}
pub fn write_code(&mut self, val: u8) {
// TODO: uncheck slice get
self.codes[self.code_position] = val;
self.code_position += 1;
}
pub fn init_flag(&mut self) {
if self.num_flags_left == 8 {
*self.get_flag() = 0;
self.code_position -= 1;
} else {
*self.get_flag() >>= self.num_flags_left;
}
}
pub fn get_flag(&mut self) -> &mut u8 {
&mut self.codes[self.flag_position]
}
pub fn plant_flag(&mut self) {
self.flag_position = self.code_position;
self.code_position += 1;
}
pub fn consume_flag(&mut self) {
self.num_flags_left -= 1;
if self.num_flags_left == 0 {
self.num_flags_left = 8;
self.plant_flag();
}
}
}
pub struct SavedOutputBufferOxide {
pub pos: u64,
pub bit_buffer: u32,
pub bits_in: u32,
pub local: bool,
}
impl<'a> OutputBufferOxide<'a> {
fn put_bits(&mut self, bits: u32, len: u32) -> io::Result<()> {
assert!(bits <= ((1u32 << len) - 1u32));
self.bit_buffer |= bits << self.bits_in;
self.bits_in += len;
while self.bits_in >= 8 {
self.inner.write(&[self.bit_buffer as u8][..])?;
self.bit_buffer >>= 8;
self.bits_in -= 8;
}
Ok(())
}
fn save(&self) -> SavedOutputBufferOxide {
SavedOutputBufferOxide {
pos: self.inner.position(),
bit_buffer: self.bit_buffer,
bits_in: self.bits_in,
local: self.local,
}
}
fn load(&mut self, saved: SavedOutputBufferOxide) {
self.inner.set_position(saved.pos);
self.bit_buffer = saved.bit_buffer;
self.bits_in = saved.bits_in;
self.local = saved.local;
}
fn load_bits(&mut self, saved: &SavedOutputBufferOxide) {
self.bit_buffer = saved.bit_buffer;
self.bits_in = saved.bits_in;
}
fn pad_to_bytes(&mut self) -> io::Result<()> {
if self.bits_in != 0 {
let len = 8 - self.bits_in;
self.put_bits(0, len)?;
}
Ok(())
}
}
pub fn tdefl_radix_sort_syms_oxide<'a>(
symbols0: &'a mut [tdefl_sym_freq],
symbols1: &'a mut [tdefl_sym_freq]
) -> &'a mut [tdefl_sym_freq] {
let mut hist = [[0; 256]; 2];
for freq in symbols0.iter() {
hist[0][(freq.m_key & 0xFF) as usize] += 1;
hist[1][((freq.m_key >> 8) & 0xFF) as usize] += 1;
}
let mut n_passes = 2;
if symbols0.len() == hist[1][0] {
n_passes -= 1;
}
let mut current_symbols = symbols0;
let mut new_symbols = symbols1;
for pass in 0..n_passes {
let mut offsets = [0; 256];
let mut offset = 0;
for i in 0..256 {
offsets[i] = offset;
offset += hist[pass][i];
}
for sym in current_symbols.iter() {
let j = ((sym.m_key >> (pass * 8)) & 0xFF) as usize;
new_symbols[offsets[j]] = *sym;
offsets[j] += 1;
}
mem::swap(&mut current_symbols, &mut new_symbols);
}
current_symbols
}
// TODO change to iterators
pub fn tdefl_calculate_minimum_redundancy_oxide(symbols: &mut [tdefl_sym_freq]) {
match symbols.len() {
0 => (),
1 => symbols[0].m_key = 1,
n => {
symbols[0].m_key += symbols[1].m_key;
let mut root = 0;
let mut leaf = 2;
for next in 1..n - 1 {
if (leaf >= n) || (symbols[root].m_key < symbols[leaf].m_key) {
symbols[next].m_key = symbols[root].m_key;
symbols[root].m_key = next as u16;
root += 1;
} else {
symbols[next].m_key = symbols[leaf].m_key;
leaf += 1;
}
if (leaf >= n) || (root < next && symbols[root].m_key < symbols[leaf].m_key) {
symbols[next].m_key = symbols[next].m_key + symbols[root].m_key; // TODO why cast to u16 in C?
symbols[root].m_key = next as u16;
root += 1;
} else {
symbols[next].m_key = symbols[next].m_key + symbols[leaf].m_key;
leaf += 1;
}
}
symbols[n - 2].m_key = 0;
for next in (0..n - 2).rev() {
symbols[next].m_key = symbols[symbols[next].m_key as usize].m_key + 1;
}
let mut avbl = 1;
let mut used = 0;
let mut dpth = 0;
let mut root = (n - 2) as i32;
let mut next = (n - 1) as i32;
while avbl > 0 {
while (root >= 0) && (symbols[root as usize].m_key == dpth) {
used += 1;
root -= 1;
}
while avbl > used {
symbols[next as usize].m_key = dpth;
next -= 1;
avbl -= 1;
}
avbl = 2 * used;
dpth += 1;
used = 0;
}
}
}
}
pub fn tdefl_huffman_enforce_max_code_size_oxide(
num_codes: &mut [c_int],
code_list_len: usize,
max_code_size: usize
) {
if code_list_len <= 1 { return; }
num_codes[max_code_size] += num_codes[max_code_size + 1..].iter().sum();
let total = num_codes[1..max_code_size + 1].iter().rev().enumerate().fold(0u32, |total, (i, &x)| {
total + ((x as u32) << i)
});
for _ in (1 << max_code_size)..total {
num_codes[max_code_size] -= 1;
for i in (1..max_code_size).rev() {
if num_codes[i] != 0 {
num_codes[i] -= 1;
num_codes[i + 1] += 2;
break;
}
}
}
}
pub fn tdefl_optimize_huffman_table_oxide(
h: &mut HuffmanOxide,
table_num: usize,
table_len: usize,
code_size_limit: usize,
static_table: bool
) {
let mut num_codes = [0 as c_int; TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
let mut next_code = [0 as c_uint; TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
if static_table {
for &code_size in &h.code_sizes[table_num][..table_len] {
num_codes[code_size as usize] += 1;
}
} else {
let mut symbols0 = [tdefl_sym_freq { m_key: 0, m_sym_index: 0 }; TDEFL_MAX_HUFF_SYMBOLS];
let mut symbols1 = [tdefl_sym_freq { m_key: 0, m_sym_index: 0 }; TDEFL_MAX_HUFF_SYMBOLS];
let mut num_used_symbols = 0;
for i in 0..table_len {
if h.count[table_num][i] != 0 {
symbols0[num_used_symbols] = tdefl_sym_freq {
m_key: h.count[table_num][i],
m_sym_index: i as u16
};
num_used_symbols += 1;
}
}
let mut symbols = tdefl_radix_sort_syms_oxide(&mut symbols0[..num_used_symbols],
&mut symbols1[..num_used_symbols]);
tdefl_calculate_minimum_redundancy_oxide(symbols);
for symbol in symbols.iter() {
num_codes[symbol.m_key as usize] += 1;
}
tdefl_huffman_enforce_max_code_size_oxide(&mut num_codes, num_used_symbols, code_size_limit);
memset(&mut h.code_sizes[table_num][..], 0);
memset(&mut h.codes[table_num][..], 0);
let mut last = num_used_symbols;
for i in 1..code_size_limit + 1 {
let first = last - num_codes[i] as usize;
for symbol in &symbols[first..last] {
h.code_sizes[table_num][symbol.m_sym_index as usize] = i as u8;
}
last = first;
}
}
let mut j = 0;
next_code[1] = 0;
for i in 2..code_size_limit + 1 {
j = (j + num_codes[i - 1]) << 1;
next_code[i] = j as c_uint;
}
for (&code_size, huff_code) in h.code_sizes[table_num].iter().take(table_len)
.zip(h.codes[table_num].iter_mut().take(table_len))
{
if code_size == 0 { continue }
let mut code = next_code[code_size as usize];
next_code[code_size as usize] += 1;
let mut rev_code = 0;
for _ in 0..code_size { // TODO reverse u32 faster?
rev_code = (rev_code << 1) | (code & 1);
code >>= 1;
}
*huff_code = rev_code as u16;
}
}
const TDEFL_PACKED_CODE_SIZE_SYMS_SWIZZLE: [u8; 19] =
[16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
pub fn tdefl_start_dynamic_block_oxide(
h: &mut HuffmanOxide,
output: &mut OutputBufferOxide
) -> io::Result<()> {
h.count[0][256] = 1;
tdefl_optimize_huffman_table_oxide(h, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, false);
tdefl_optimize_huffman_table_oxide(h, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, false);
let num_lit_codes = 286 - &h.code_sizes[0][257..286]
.iter().rev().take_while(|&x| *x == 0).count();
let num_dist_codes = 30 - &h.code_sizes[1][1..30]
.iter().rev().take_while(|&x| *x == 0).count();
let mut code_sizes_to_pack = [0u8; TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1];
let mut packed_code_sizes = [0u8; TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1];
let total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
&code_sizes_to_pack[..num_lit_codes]
.copy_from_slice(&h.code_sizes[0][..num_lit_codes]);
&code_sizes_to_pack[num_lit_codes..total_code_sizes_to_pack]
.copy_from_slice(&h.code_sizes[1][..num_dist_codes]);
struct RLE {
pub rle_z_count: u32,
pub rle_repeat_count: u32,
pub prev_code_size: u8
}
let mut rle = RLE {
rle_z_count: 0,
rle_repeat_count: 0,
prev_code_size: 0xFF
};
let tdefl_rle_prev_code_size = |rle: &mut RLE,
packed_code_sizes: &mut Cursor<&mut [u8]>,
h: &mut HuffmanOxide| -> io::Result<()>
{
if rle.rle_repeat_count != 0 {
if rle.rle_repeat_count < 3 {
h.count[2][rle.prev_code_size as usize] = (h.count[2][rle.prev_code_size as usize] as i32 + rle.rle_repeat_count as i32) as u16; // TODO
while rle.rle_repeat_count != 0 {
rle.rle_repeat_count -= 1;
packed_code_sizes.write(&[rle.prev_code_size][..])?;
}
} else {
h.count[2][16] = (h.count[2][16] as i32 + 1) as u16;
packed_code_sizes.write(&[16, (rle.rle_repeat_count as i32 - 3) as u8][..])?;
}
rle.rle_repeat_count = 0;
}
Ok(())
};
let tdefl_rle_zero_code_size = |rle: &mut RLE,
packed_code_sizes: &mut Cursor<&mut [u8]>,
h: &mut HuffmanOxide| -> io::Result<()>
{
if rle.rle_z_count != 0 {
if rle.rle_z_count < 3 {
h.count[2][0] = (h.count[2][0] as i32 + rle.rle_z_count as i32) as u16;
while rle.rle_z_count != 0 {
rle.rle_z_count -= 1;
packed_code_sizes.write(&[0][..])?;
}
} else if rle.rle_z_count <= 10 {
h.count[2][17] = (h.count[2][17] as i32 + 1) as u16;
packed_code_sizes.write(&[17, (rle.rle_z_count as i32 - 3) as u8][..])?;
} else {
h.count[2][18] = (h.count[2][18] as i32 + 1) as u16;
packed_code_sizes.write(&[18, (rle.rle_z_count as i32 - 11) as u8][..])?;
}
rle.rle_z_count = 0;
}
Ok(())
};
memset(&mut h.count[2][..TDEFL_MAX_HUFF_SYMBOLS_2], 0);
let mut packed_code_sizes_cursor = Cursor::new(&mut packed_code_sizes[..]);
for &code_size in &code_sizes_to_pack[..total_code_sizes_to_pack] {
if code_size == 0 {
tdefl_rle_prev_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
rle.rle_z_count += 1;
if rle.rle_z_count == 138 {
tdefl_rle_zero_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
}
} else {
tdefl_rle_zero_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
if code_size != rle.prev_code_size {
tdefl_rle_prev_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
h.count[2][code_size as usize] = (h.count[2][code_size as usize] as i32 + 1) as u16; // TODO why as u16?
packed_code_sizes_cursor.write(&[code_size][..])?;
} else {
rle.rle_repeat_count += 1;
if rle.rle_repeat_count == 6 {
tdefl_rle_prev_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
}
}
}
rle.prev_code_size = code_size;
}
if rle.rle_repeat_count != 0 {
tdefl_rle_prev_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
} else {
tdefl_rle_zero_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
}
tdefl_optimize_huffman_table_oxide(h, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, false);
output.put_bits(2, 2)?;
output.put_bits((num_lit_codes - 257) as u32, 5)?;
output.put_bits((num_dist_codes - 1) as u32, 5)?;
let mut num_bit_lengths = 18 - TDEFL_PACKED_CODE_SIZE_SYMS_SWIZZLE
.iter().rev().take_while(|&swizzle| h.code_sizes[2][*swizzle as usize] == 0).count();
num_bit_lengths = cmp::max(4, num_bit_lengths + 1);
output.put_bits(num_bit_lengths as u32 - 4, 4)?;
for &swizzle in &TDEFL_PACKED_CODE_SIZE_SYMS_SWIZZLE[..num_bit_lengths] {
output.put_bits(h.code_sizes[2][swizzle as usize] as u32, 3)?;
}
let mut packed_code_size_index = 0 as usize;
let packed_code_sizes = packed_code_sizes_cursor.get_ref();
while packed_code_size_index < packed_code_sizes_cursor.position() as usize {
let code = packed_code_sizes[packed_code_size_index] as usize;
packed_code_size_index += 1;
assert!(code < TDEFL_MAX_HUFF_SYMBOLS_2);
output.put_bits(h.codes[2][code] as u32, h.code_sizes[2][code] as u32)?;
if code >= 16 {
output.put_bits(packed_code_sizes[packed_code_size_index] as u32,
[2, 3, 7][code - 16])?;
packed_code_size_index += 1;
}
}
Ok(())
}
pub fn tdefl_start_static_block_oxide(
h: &mut HuffmanOxide,
output: &mut OutputBufferOxide
) -> io::Result<()> {
memset(&mut h.code_sizes[0][0..144], 8);
memset(&mut h.code_sizes[0][144..256], 9);
memset(&mut h.code_sizes[0][256..280], 7);
memset(&mut h.code_sizes[0][280..288], 8);
memset(&mut h.code_sizes[1][..32], 5);
tdefl_optimize_huffman_table_oxide(h, 0, 288, 15, true);
tdefl_optimize_huffman_table_oxide(h, 1, 32, 15, true);
output.put_bits(1, 2)
}
pub fn tdefl_compress_lz_codes_oxide(
h: &mut HuffmanOxide,
output: &mut OutputBufferOxide,
lz_code_buf: &[u8]
) -> io::Result<bool> {
let mut flags = 1;
let mut bb = BitBuffer {
bit_buffer: output.bit_buffer as u64,
bits_in: output.bits_in
};
let mut i = 0;
while i < lz_code_buf.len() {
if flags == 1 {
flags = lz_code_buf[i] as u32 | 0x100;
i += 1;
}
if flags & 1 == 1 {
let sym;
let num_extra_bits;
let match_len = lz_code_buf[i] as usize;
let match_dist = read_unaligned_dict::<u16>(lz_code_buf, i as isize + 1);
i += 3;
assert!(h.code_sizes[0][TDEFL_LEN_SYM[match_len] as usize] != 0);
bb.put_fast(h.codes[0][TDEFL_LEN_SYM[match_len] as usize] as u64,
h.code_sizes[0][TDEFL_LEN_SYM[match_len] as usize] as u32);
bb.put_fast(match_len as u64 & MZ_BITMASKS[TDEFL_LEN_EXTRA[match_len] as usize] as u64,
TDEFL_LEN_EXTRA[match_len] as u32);
if match_dist < 512 {
sym = TDEFL_SMALL_DIST_SYM[match_dist as usize] as usize;
num_extra_bits = TDEFL_SMALL_DIST_EXTRA[match_dist as usize] as usize;
} else {
sym = TDEFL_LARGE_DIST_SYM[(match_dist >> 8) as usize] as usize;
num_extra_bits = TDEFL_LARGE_DIST_EXTRA[(match_dist >> 8) as usize] as usize;
}
assert!(h.code_sizes[1][sym] != 0);
bb.put_fast(h.codes[1][sym] as u64, h.code_sizes[1][sym] as u32);
bb.put_fast(match_dist as u64 & MZ_BITMASKS[num_extra_bits as usize] as u64, num_extra_bits as u32);
} else {
let mut lit = lz_code_buf[i];
i += 1;
assert!(h.code_sizes[0][lit as usize] != 0);
bb.put_fast(h.codes[0][lit as usize] as u64, h.code_sizes[0][lit as usize] as u32);
if flags & 2 == 0 && i < lz_code_buf.len() {
flags >>= 1;
lit = lz_code_buf[i];
i += 1;
assert!(h.code_sizes[0][lit as usize] != 0);
bb.put_fast(h.codes[0][lit as usize] as u64, h.code_sizes[0][lit as usize] as u32);
if flags & 2 == 0 && i < lz_code_buf.len() {
flags >>= 1;
lit = lz_code_buf[i];
i += 1;
assert!(h.code_sizes[0][lit as usize] != 0);
bb.put_fast(h.codes[0][lit as usize] as u64, h.code_sizes[0][lit as usize] as u32);
}
}
}
bb.flush(output)?;
flags >>= 1;
}
output.bits_in = 0;
output.bit_buffer = 0;
while bb.bits_in != 0 {
let n = cmp::min(bb.bits_in, 16);
output.put_bits(bb.bit_buffer as u32 & MZ_BITMASKS[n as usize], n)?;
bb.bit_buffer >>= n;
bb.bits_in -= n;
}
output.put_bits(h.codes[0][256] as u32, h.code_sizes[0][256] as u32)?;
Ok(true)
}
pub fn tdefl_compress_block_oxide(
h: &mut HuffmanOxide,
output: &mut OutputBufferOxide,
lz: &LZOxide,
static_block: bool
) -> io::Result<bool> {
if static_block {
tdefl_start_static_block_oxide(h, output)?;
} else {
tdefl_start_dynamic_block_oxide(h, output)?;
}
tdefl_compress_lz_codes_oxide(h, output, &lz.codes[..lz.code_position])
}
pub fn tdefl_flush_block_oxide(
huff: &mut HuffmanOxide,
lz: &mut LZOxide,
dict: &mut DictOxide,
params: &mut ParamsOxide,
callback: &mut CallbackOxide,
local_buf: &mut [u8],
flush: TDEFLFlush
) -> io::Result<c_int> {
let saved_bits;
{
let mut output = callback.out.new_output_buffer(local_buf, params.out_buf_ofs);
output.bit_buffer = params.saved_bit_buffer;
output.bits_in = params.saved_bits_in;
let use_raw_block = (params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0) &&
(dict.lookahead_pos - dict.code_buf_dict_pos) <= dict.size;
assert!(params.flush_remaining == 0);
params.flush_ofs = 0;
params.flush_remaining = 0;
lz.init_flag();
if params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 && params.block_index == 0 {
output.put_bits(0x78, 8)?;
output.put_bits(0x01, 8)?;
}
output.put_bits((flush == TDEFLFlush::Finish) as u32, 1)?;
let saved_buffer = output.save();
let mut comp_success = false;
if !use_raw_block {
let use_static = (params.flags & TDEFL_FORCE_ALL_STATIC_BLOCKS != 0) || (lz.total_bytes < 48);
comp_success = tdefl_compress_block_oxide(huff, &mut output, lz, use_static)?;
}
let expanded = (lz.total_bytes != 0) &&
(output.inner.position() - saved_buffer.pos + 1 >= lz.total_bytes as u64) &&
(dict.lookahead_pos - dict.code_buf_dict_pos <= dict.size);
if use_raw_block || expanded {
output.load(saved_buffer);
output.put_bits(0, 2)?;
output.pad_to_bytes()?;
for _ in 0..2 {
output.put_bits(lz.total_bytes & 0xFFFF, 16)?;
lz.total_bytes ^= 0xFFFF;
}
for i in 0..lz.total_bytes {
let pos = (dict.code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK;
output.put_bits(dict.dict[pos as usize] as u32, 8)?;
}
} else if !comp_success {
output.load(saved_buffer);
tdefl_compress_block_oxide(huff, &mut output, lz, true)?;
}
if flush != TDEFLFlush::None {
if flush == TDEFLFlush::Finish {
output.pad_to_bytes()?;
if params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 {
let mut adler = params.adler32;
for _ in 0..4 {
output.put_bits((adler >> 24) & 0xFF, 8)?;
adler <<= 8;
}
}
} else {
output.put_bits(0, 3)?;
output.pad_to_bytes()?;
output.put_bits(0, 16)?;
output.put_bits(0xFFFF, 16)?;
}
}
memset(&mut huff.count[0][..TDEFL_MAX_HUFF_SYMBOLS_0], 0);
memset(&mut huff.count[1][..TDEFL_MAX_HUFF_SYMBOLS_1], 0);
lz.code_position = 1;
lz.flag_position = 0;
lz.num_flags_left = 8;
dict.code_buf_dict_pos += lz.total_bytes;
lz.total_bytes = 0;
params.block_index += 1;
saved_bits = output.save();
}
let mut pos = saved_bits.pos;
let local = saved_bits.local;
params.saved_bit_buffer = saved_bits.bit_buffer;
params.saved_bits_in = saved_bits.bits_in;
if pos != 0 {
match callback.out {
CallbackOut::Func(ref mut cf) => {
// TODO: callback about buf_in_size before put_buf_func
let call_success = unsafe {
(cf.put_buf_func)(
&local_buf[0] as *const u8 as *const c_void,
pos as c_int,
cf.put_buf_user
)
};
if !call_success {
params.prev_return_status = TDEFLStatus::PutBufFailed;
return Ok(params.prev_return_status as c_int);
}
},
CallbackOut::Buf(ref mut cb) => {
if local {
let n = cmp::min(pos as usize, cb.out_buf.len() - params.out_buf_ofs);
(&mut cb.out_buf[params.out_buf_ofs..params.out_buf_ofs + n]).copy_from_slice(
&local_buf[..n]
);
params.out_buf_ofs += n;
pos -= n as u64;
if pos != 0 {
params.flush_ofs = n as c_uint;
params.flush_remaining = pos as c_uint;
}
} else {
params.out_buf_ofs += pos as usize;
}
},
}
}
Ok(params.flush_remaining as c_int)
}
fn read_unaligned_dict<T>(dict: &[u8], pos: isize) -> T {
unsafe {
ptr::read_unaligned((dict as *const [u8] as *const u8).offset(pos) as *const T)
}
}
pub fn tdefl_find_match_oxide(
dict: &DictOxide,
lookahead_pos: c_uint,
max_dist: c_uint,
max_match_len: c_uint,
mut match_dist: c_uint,
mut match_len: c_uint
) -> (c_uint, c_uint) {
assert!(max_match_len as usize <= TDEFL_MAX_MATCH_LEN);
let pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
let mut probe_pos = pos;
let mut num_probes_left = dict.max_probes[(match_len >= 32) as usize];
let mut c01: u16 = read_unaligned_dict(&dict.dict[..], (pos + match_len - 1) as isize);
let s01: u16 = read_unaligned_dict(&dict.dict[..], pos as isize);
if max_match_len <= match_len { return (match_dist, match_len) }
loop {
let mut dist = 0;
'found: loop {
num_probes_left -= 1;
if num_probes_left == 0 { return (match_dist, match_len) }
pub enum ProbeResult {
OutOfBounds,
Found,
NotFound
}
let mut tdefl_probe = || -> ProbeResult {
let next_probe_pos = dict.next[probe_pos as usize] as c_uint;
dist = ((lookahead_pos - next_probe_pos) & 0xFFFF) as c_uint;
if next_probe_pos == 0 || dist > max_dist {
return ProbeResult::OutOfBounds
}
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK;
if read_unaligned_dict::<u16>(&dict.dict[..], (probe_pos + match_len - 1) as isize) == c01 {
ProbeResult::Found
} else {
ProbeResult::NotFound
}
};
for _ in 0..3 {
match tdefl_probe() {
ProbeResult::OutOfBounds => return (match_dist, match_len),
ProbeResult::Found => break 'found,
ProbeResult::NotFound => ()
}
}
}
if dist == 0 { return (match_dist, match_len) }
if read_unaligned_dict::<u16>(&dict.dict[..], probe_pos as isize) != s01 { continue }
let mut probe_len = 32;
let mut p = pos as isize;
let mut q = probe_pos as isize;
'probe: loop {
for _ in 0..4 {
p += 2;
q += 2;
if read_unaligned_dict::<u16>(&dict.dict[..], p) != read_unaligned_dict(&dict.dict[..], q) {
break 'probe;
}
}
probe_len -= 1;
if probe_len == 0 {
return (dist, cmp::min(max_match_len, TDEFL_MAX_MATCH_LEN as c_uint))
}
}
probe_len = (p - pos as isize + (dict.dict[p as usize] == dict.dict[q as usize]) as isize) as c_uint;
if probe_len > match_len {
match_dist = dist;
match_len = cmp::min(max_match_len, probe_len);
if match_len == max_match_len {
return (match_dist, match_len);
}
c01 = read_unaligned_dict(&dict.dict[..], (pos + match_len - 1) as isize);
}
}
}
pub fn tdefl_record_literal_oxide(h: &mut HuffmanOxide, lz: &mut LZOxide, lit: u8) {
lz.total_bytes += 1;
lz.write_code(lit);
*lz.get_flag() >>= 1;
lz.consume_flag();
h.count[0][lit as usize] += 1;
}
pub fn tdefl_record_match_oxide(
h: &mut HuffmanOxide,
lz: &mut LZOxide,
mut match_len: c_uint,
mut match_dist: c_uint
) {
assert!(match_len >= TDEFL_MIN_MATCH_LEN);
assert!(match_dist >= 1);
assert!(match_dist as usize <= TDEFL_LZ_DICT_SIZE);
lz.total_bytes += match_len;
match_dist -= 1;
match_len -= TDEFL_MIN_MATCH_LEN as u32;
lz.write_code(match_len as u8);
lz.write_code(match_dist as u8);
lz.write_code((match_dist >> 8) as u8);
*lz.get_flag() >>= 1;
*lz.get_flag() |= 0x80;
lz.consume_flag();
let symbol = if match_dist < 512 {
TDEFL_SMALL_DIST_SYM[match_dist as usize]
} else {
TDEFL_LARGE_DIST_SYM[((match_dist >> 8) & 127) as usize]
} as usize;
h.count[1][symbol] += 1;
h.count[0][TDEFL_LEN_SYM[match_len as usize] as usize] += 1;
}
pub fn tdefl_compress_normal_oxide(
huff: &mut HuffmanOxide,
lz: &mut LZOxide,
dict: &mut DictOxide,
params: &mut ParamsOxide,
callback: &mut CallbackOxide,
local_buf: &mut [u8]
) -> bool {
let mut src_pos = params.src_pos;
let mut src_buf_left = params.src_buf_left;
while src_buf_left != 0 || (params.flush != TDEFLFlush::None && dict.lookahead_size != 0) {
let in_buf = callback.in_buf.expect("Unexpected null in_buf"); // TODO: make connection params.src_buf_left <-> in_buf
let num_bytes_to_process = cmp::min(src_buf_left, TDEFL_MAX_MATCH_LEN - dict.lookahead_size as usize);
src_buf_left -= num_bytes_to_process;
if dict.lookahead_size + dict.size >= TDEFL_MIN_MATCH_LEN - 1 {
let mut dst_pos = (dict.lookahead_pos + dict.lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
let mut ins_pos = dict.lookahead_pos + dict.lookahead_size - 2;
let mut hash = ((dict.dict[(ins_pos & TDEFL_LZ_DICT_SIZE_MASK) as usize] as c_uint) << TDEFL_LZ_HASH_SHIFT) ^
(dict.dict[((ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK) as usize] as c_uint);
dict.lookahead_size += num_bytes_to_process as c_uint;
for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
dict.dict[dst_pos as usize] = c;
if (dst_pos as usize) < TDEFL_MAX_MATCH_LEN - 1 {
dict.dict[TDEFL_LZ_DICT_SIZE + dst_pos as usize] = c;
}
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ (c as c_uint)) & (TDEFL_LZ_HASH_SIZE as c_uint - 1);
dict.next[(ins_pos & TDEFL_LZ_DICT_SIZE_MASK) as usize] = dict.hash[hash as usize];
dict.hash[hash as usize] = ins_pos as u16;
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos += 1;
}
src_pos += num_bytes_to_process;
} else {
for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
let dst_pos = (dict.lookahead_pos + dict.lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
dict.dict[dst_pos as usize] = c;
if (dst_pos as usize) < TDEFL_MAX_MATCH_LEN - 1 {
dict.dict[TDEFL_LZ_DICT_SIZE + dst_pos as usize] = c;
}
dict.lookahead_size += 1;
if dict.lookahead_size + dict.size >= TDEFL_MIN_MATCH_LEN {
let ins_pos = dict.lookahead_pos + dict.lookahead_size - 3;
let hash = (((dict.dict[(ins_pos & TDEFL_LZ_DICT_SIZE_MASK) as usize] as c_uint) << (TDEFL_LZ_HASH_SHIFT * 2)) ^
(((dict.dict[((ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK) as usize] as c_uint) << TDEFL_LZ_HASH_SHIFT) ^ (c as c_uint))) &
(TDEFL_LZ_HASH_SIZE as c_uint - 1);
dict.next[(ins_pos & TDEFL_LZ_DICT_SIZE_MASK) as usize] = dict.hash[hash as usize];
dict.hash[hash as usize] = ins_pos as u16;
}
}
src_pos += num_bytes_to_process;
}
dict.size = cmp::min(TDEFL_LZ_DICT_SIZE as c_uint - dict.lookahead_size, dict.size);
if params.flush == TDEFLFlush::None && (dict.lookahead_size as usize) < TDEFL_MAX_MATCH_LEN { break }
let mut len_to_move = 1;
let mut cur_match_dist = 0;
let mut cur_match_len = if params.saved_match_len != 0 { params.saved_match_len } else { TDEFL_MIN_MATCH_LEN - 1 };
let cur_pos = dict.lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if params.flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS) != 0 {
if dict.size != 0 && params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS == 0 {
let c = dict.dict[((cur_pos.wrapping_sub(1)) & TDEFL_LZ_DICT_SIZE_MASK) as usize];
cur_match_len = dict.dict[cur_pos as usize..(cur_pos + dict.lookahead_size) as usize]
.iter().take_while(|&x| *x == c).count() as c_uint;
if cur_match_len < TDEFL_MIN_MATCH_LEN { cur_match_len = 0 } else { cur_match_dist = 1 }
}
} else {
let dist_len = tdefl_find_match_oxide(
dict,
dict.lookahead_pos,
dict.size,
dict.lookahead_size,
cur_match_dist,
cur_match_len
);
cur_match_dist = dist_len.0;
cur_match_len = dist_len.1;
}
let far_and_small = cur_match_len == TDEFL_MIN_MATCH_LEN && cur_match_dist >= 8 * 1024;
let filter_small = params.flags & TDEFL_FILTER_MATCHES != 0 && cur_match_len <= 5;
if far_and_small || filter_small || cur_pos == cur_match_dist {
cur_match_dist = 0;
cur_match_len = 0;
}
if params.saved_match_len != 0 {
if cur_match_len > params.saved_match_len {
tdefl_record_literal_oxide(huff, lz, params.saved_lit);
if cur_match_len >= 128 {
tdefl_record_match_oxide(huff, lz, cur_match_len, cur_match_dist);
params.saved_match_len = 0;
len_to_move = cur_match_len;
} else {
params.saved_lit = dict.dict[cur_pos as usize];
params.saved_match_dist = cur_match_dist;
params.saved_match_len = cur_match_len;
}
} else {
tdefl_record_match_oxide(huff, lz, params.saved_match_len, params.saved_match_dist);
len_to_move = params.saved_match_len - 1;
params.saved_match_len = 0;
}
} else if cur_match_dist == 0 {
tdefl_record_literal_oxide(huff, lz, dict.dict[cmp::min(cur_pos as usize, dict.dict.len() - 1)]);
} else if params.greedy_parsing || (params.flags & TDEFL_RLE_MATCHES != 0) || cur_match_len >= 128 {
tdefl_record_match_oxide(huff, lz, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
params.saved_lit = dict.dict[cmp::min(cur_pos as usize, dict.dict.len() - 1)];
params.saved_match_dist = cur_match_dist;
params.saved_match_len = cur_match_len;
}
dict.lookahead_pos += len_to_move;
assert!(dict.lookahead_size >= len_to_move);
dict.lookahead_size -= len_to_move;
dict.size = cmp::min(dict.size + len_to_move, TDEFL_LZ_DICT_SIZE as c_uint);
let lz_buf_tight = lz.code_position > TDEFL_LZ_CODE_BUF_SIZE - 8;
let raw = params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0;
let fat = ((lz.code_position * 115) >> 7) >= lz.total_bytes as usize;
let fat_or_raw = (lz.total_bytes > 31 * 1024) && (fat || raw);
if lz_buf_tight || fat_or_raw {
params.src_pos = src_pos;
params.src_buf_left = src_buf_left;
let n = tdefl_flush_block_oxide(
huff,
lz,
dict,
params,
callback,
local_buf,
TDEFLFlush::None,
).unwrap_or(TDEFLStatus::PutBufFailed as c_int);
if n != 0 { return n > 0 }
}
}
params.src_pos = src_pos;
params.src_buf_left = src_buf_left;
true
}
const TDEFL_COMP_FAST_LOOKAHEAD_SIZE: c_uint = 4096;
pub fn tdefl_compress_fast_oxide(
huff: &mut HuffmanOxide,
lz: &mut LZOxide,
dict: &mut DictOxide,
params: &mut ParamsOxide,
callback: &mut CallbackOxide,
local_buf: &mut [u8]
) -> bool {
let mut cur_pos = dict.lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
let in_buf = callback.in_buf.expect("Unexpected null in_buf"); // TODO: make connection params.src_buf_left <-> in_buf
while params.src_buf_left > 0 || (params.flush != TDEFLFlush::None && dict.lookahead_size > 0) {
let mut dst_pos = ((dict.lookahead_pos + dict.lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK) as usize;
let mut num_bytes_to_process = cmp::min(params.src_buf_left, (TDEFL_COMP_FAST_LOOKAHEAD_SIZE - dict.lookahead_size) as usize);
params.src_buf_left -= num_bytes_to_process;
dict.lookahead_size += num_bytes_to_process as c_uint;
while num_bytes_to_process != 0 {
let n = cmp::min(TDEFL_LZ_DICT_SIZE - dst_pos , num_bytes_to_process);
&mut dict.dict[dst_pos..dst_pos + n]
.copy_from_slice(&in_buf[params.src_pos..params.src_pos + n]);
if dst_pos < TDEFL_MAX_MATCH_LEN - 1 {
let m = cmp::min(n, TDEFL_MAX_MATCH_LEN - 1 - dst_pos);
&mut dict.dict[dst_pos + TDEFL_LZ_DICT_SIZE..dst_pos + TDEFL_LZ_DICT_SIZE + m]
.copy_from_slice(&in_buf[params.src_pos..params.src_pos + m]);
}
params.src_pos += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK as usize;
num_bytes_to_process -= n;
}
dict.size = cmp::min(TDEFL_LZ_DICT_SIZE as c_uint - dict.lookahead_size, dict.size);
if params.flush == TDEFLFlush::None && dict.lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE {
break;
}
while dict.lookahead_size >= 4 {
let mut cur_match_len = 1;
let first_trigram = read_unaligned_dict::<u32>(&dict.dict[..], cur_pos as isize) & 0xFFFFFF;
let hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK;
let mut probe_pos = dict.hash[hash as usize] as u32;
dict.hash[hash as usize] = dict.lookahead_pos as u16;
let mut cur_match_dist = (dict.lookahead_pos - probe_pos) as u16;
if cur_match_dist as u32 <= dict.size {
probe_pos &= TDEFL_LZ_DICT_SIZE_MASK;
let trigram = read_unaligned_dict::<u32>(&dict.dict[..], probe_pos as isize) & 0xFFFFFF;
if first_trigram == trigram {
let mut p = cur_pos as isize;
let mut q = probe_pos as isize;
let mut probe_len = 32;
'probe: loop {
for _ in 0..4 {
p += 2;
q += 2;
if read_unaligned_dict::<u16>(&dict.dict[..], p) != read_unaligned_dict(&dict.dict[..], q) {
cur_match_len = (p as u32 - cur_pos) + (dict.dict[p as usize] == dict.dict[q as usize]) as u32;
break 'probe;
}
}
probe_len -= 1;
if probe_len == 0 {
cur_match_len = if cur_match_dist == 0 {
0
} else {
TDEFL_MAX_MATCH_LEN as u32
};
break 'probe;
}
}
if cur_match_len < TDEFL_MIN_MATCH_LEN || (cur_match_len == TDEFL_MIN_MATCH_LEN && cur_match_dist >= 8 * 1024) {
cur_match_len = 1;
lz.write_code(first_trigram as u8);
*lz.get_flag() >>= 1;
huff.count[0][first_trigram as u8 as usize] += 1;
} else {
cur_match_len = cmp::min(cur_match_len, dict.lookahead_size);
assert!(cur_match_len >= TDEFL_MIN_MATCH_LEN);
assert!(cur_match_dist >= 1);
assert!(cur_match_dist as usize <= TDEFL_LZ_DICT_SIZE);
cur_match_dist -= 1;
lz.write_code((cur_match_len - TDEFL_MIN_MATCH_LEN) as u8);
unsafe {
ptr::write_unaligned(
(&mut lz.codes[0] as *mut u8).offset(lz.code_position as isize) as *mut u16,
cur_match_dist as u16
);
lz.code_position += 2;
}
*lz.get_flag() >>= 1;
*lz.get_flag() |= 0x80;
if cur_match_dist < 512 {
huff.count[1][TDEFL_SMALL_DIST_SYM[cur_match_dist as usize] as usize] += 1;
} else {
huff.count[1][TDEFL_LARGE_DIST_SYM[(cur_match_dist >> 8) as usize] as usize] += 1;
}
huff.count[0][TDEFL_LEN_SYM[(cur_match_len - TDEFL_MIN_MATCH_LEN) as usize] as usize] += 1;
}
} else {
lz.write_code(first_trigram as u8);
*lz.get_flag() >>= 1;
huff.count[0][first_trigram as u8 as usize] += 1;
}
lz.consume_flag();
lz.total_bytes += cur_match_len;
dict.lookahead_pos += cur_match_len;
dict.size = cmp::min(dict.size + cur_match_len, TDEFL_LZ_DICT_SIZE as u32);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
assert!(dict.lookahead_size >= cur_match_len);
dict.lookahead_size -= cur_match_len;
let lz_buf_tight = lz.code_position > TDEFL_LZ_CODE_BUF_SIZE - 8;
if lz_buf_tight {
let saved_lookahead_pos = dict.lookahead_pos; // TODO
let saved_lookahead_size = dict.lookahead_size;
let saved_dict_size = dict.size;
let n = match tdefl_flush_block_oxide(
huff,
lz,
dict,
params,
callback,
local_buf,
TDEFLFlush::None
) {
Err(_) => {params.prev_return_status = TDEFLStatus::PutBufFailed; -1},
Ok(status) => status
};
if n != 0 { return n > 0 }
dict.lookahead_pos = saved_lookahead_pos;
dict.lookahead_size = saved_lookahead_size;
dict.size = saved_dict_size;
}
}
}
while dict.lookahead_size != 0 {
let lit = dict.dict[cur_pos as usize];
lz.total_bytes += 1;
lz.write_code(lit);
*lz.get_flag() >>= 1;
lz.consume_flag();
huff.count[0][lit as usize] += 1;
dict.lookahead_pos += 1;
dict.size = cmp::min(dict.size + 1, TDEFL_LZ_DICT_SIZE as u32);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
dict.lookahead_size -= 1;
let lz_buf_tight = lz.code_position > TDEFL_LZ_CODE_BUF_SIZE - 8;
if lz_buf_tight {
let saved_lookahead_pos = dict.lookahead_pos; // TODO
let saved_lookahead_size = dict.lookahead_size;
let saved_dict_size = dict.size;
let n = match tdefl_flush_block_oxide(
huff,
lz,
dict,
params,
callback,
local_buf,
TDEFLFlush::None
) {
Err(_) => {params.prev_return_status = TDEFLStatus::PutBufFailed; -1},
Ok(status) => status
};
if n != 0 { return n > 0 }
dict.lookahead_pos = saved_lookahead_pos;
dict.lookahead_size = saved_lookahead_size;
dict.size = saved_dict_size;
}
}
}
true
}
pub fn tdefl_flush_output_buffer_oxide(
c: &mut CallbackOxide,
p: &mut ParamsOxide,
local_buf: &[u8]
) -> (TDEFLStatus, usize, usize) {
let mut res = (TDEFLStatus::Okay, p.src_pos, 0);
if let CallbackOut::Buf(ref mut cb) = c.out {
let n = cmp::min(cb.out_buf.len() - p.out_buf_ofs, p.flush_remaining as usize);
if n != 0 {
(&mut cb.out_buf[p.out_buf_ofs..p.out_buf_ofs + n]).copy_from_slice(
&local_buf[p.flush_ofs as usize.. p.flush_ofs as usize + n]
);
}
p.flush_ofs += n as c_uint;
p.flush_remaining -= n as c_uint;
p.out_buf_ofs += n;
res.2 = p.out_buf_ofs;
}
if p.finished && p.flush_remaining == 0 {
res.0 = TDEFLStatus::Done
}
res
}
pub fn tdefl_compress_oxide(
d: &mut CompressorOxide,
callback: &mut CallbackOxide,
flush: TDEFLFlush
) -> (TDEFLStatus, usize, usize) {
d.params.src_buf_left = callback.in_buf.map_or(0, |buf| buf.len());
d.params.out_buf_ofs = 0;
d.params.src_pos = 0;
let prev_ok = d.params.prev_return_status == TDEFLStatus::Okay;
let flush_finish_once = d.params.flush != TDEFLFlush::Finish ||
flush == TDEFLFlush::Finish;
d.params.flush = flush;
if !prev_ok || !flush_finish_once {
d.params.prev_return_status = TDEFLStatus::BadParam;
return (d.params.prev_return_status, 0, 0);
}
if d.params.flush_remaining != 0 || d.params.finished {
let res = tdefl_flush_output_buffer_oxide(
callback,
&mut d.params,
&d.local_buf[..]
);
d.params.prev_return_status = res.0;
return res;
}
let one_probe = d.params.flags & TDEFL_MAX_PROBES_MASK as u32 == 1;
let greedy = d.params.flags & TDEFL_GREEDY_PARSING_FLAG != 0;
let filter_or_rle_or_raw = d.params.flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES) != 0;
let compress_success = if one_probe && greedy && !filter_or_rle_or_raw {
tdefl_compress_fast_oxide(
&mut d.huff,
&mut d.lz,
&mut d.dict,
&mut d.params,
callback,
&mut d.local_buf[..]
)
} else {
tdefl_compress_normal_oxide(
&mut d.huff,
&mut d.lz,
&mut d.dict,
&mut d.params,
callback,
&mut d.local_buf[..]
)
};
if !compress_success {
return (d.params.prev_return_status, d.params.src_pos, d.params.out_buf_ofs);
}
if let Some(in_buf) = callback.in_buf {
if d.params.flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32) != 0 {
d.params.adler32 = ::mz_adler32_oxide(
d.params.adler32,
&in_buf[..d.params.src_pos]
);
}
}
let flush_none = d.params.flush == TDEFLFlush::None;
let remaining = d.params.src_buf_left != 0 || d.params.flush_remaining != 0;
if !flush_none && d.dict.lookahead_size == 0 && !remaining {
let flush = d.params.flush;
match tdefl_flush_block_oxide(
&mut d.huff,
&mut d.lz,
&mut d.dict,
&mut d.params,
callback,
&mut d.local_buf[..],
flush
) {
Err(_) => {
d.params.prev_return_status = TDEFLStatus::PutBufFailed;
return (d.params.prev_return_status, d.params.src_pos, d.params.out_buf_ofs);
},
Ok(x) if x < 0 => return (d.params.prev_return_status, d.params.src_pos, d.params.out_buf_ofs),
_ => {
d.params.finished = d.params.flush == TDEFLFlush::Finish;
if d.params.flush == TDEFLFlush::Full {
memset(&mut d.dict.hash[..], 0);
memset(&mut d.dict.next[..], 0);
d.dict.size = 0;
}
},
}
}
let res = tdefl_flush_output_buffer_oxide(
callback,
&mut d.params,
&d.local_buf[..]
);
d.params.prev_return_status = res.0;
res
}
pub fn tdefl_get_adler32_oxide(d: &CompressorOxide) -> c_uint {
d.params.adler32
}
pub fn tdefl_get_prev_return_status_oxide(d: &CompressorOxide) -> TDEFLStatus {
d.params.prev_return_status
}
pub fn tdefl_get_flags_oxide(d: &CompressorOxide) -> c_int {
d.params.flags as c_int
}
pub fn tdefl_create_comp_flags_from_zip_params_oxide(
level: c_int,
window_bits: c_int,
strategy: c_int
) -> c_uint {
let num_probes = (if level >= 0 {
cmp::min(10, level)
} else {
::CompressionLevel::DefaultLevel as c_int
}) as usize;
let greedy = if level <= 3 { TDEFL_GREEDY_PARSING_FLAG } else { 0 } as c_uint;
let mut comp_flags = TDEFL_NUM_PROBES[num_probes] | greedy;
if window_bits > 0 {
comp_flags |= TDEFL_WRITE_ZLIB_HEADER as c_uint;
}
if level == 0 {
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
} else if strategy == ::CompressionStrategy::Filtered as c_int {
comp_flags |= TDEFL_FILTER_MATCHES;
} else if strategy == ::CompressionStrategy::HuffmanOnly as c_int {
comp_flags &= !TDEFL_MAX_PROBES_MASK as c_uint;
} else if strategy == ::CompressionStrategy::Fixed as c_int {
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
} else if strategy == ::CompressionStrategy::RLE as c_int {
comp_flags |= TDEFL_RLE_MATCHES;
}
comp_flags
}
style; wrapping_add
use super::*;
pub fn memset<T : Clone>(slice: &mut [T], val: T) {
for x in slice { *x = val.clone() }
}
pub struct CompressorOxide {
pub lz: LZOxide,
pub params: ParamsOxide,
pub callback: Option<CallbackFunc>,
pub local_buf: [u8; TDEFL_OUT_BUF_SIZE],
pub huff: HuffmanOxide,
pub dict: DictOxide,
}
#[derive(Copy, Clone)]
pub struct CallbackFunc {
pub put_buf_func: PutBufFuncPtrNotNull,
pub put_buf_user: *mut c_void,
}
pub struct CallbackBuf<'a> {
pub out_buf: &'a mut [u8],
}
pub enum CallbackOut<'a> {
Func(CallbackFunc),
Buf(CallbackBuf<'a>),
}
impl<'a> CallbackOut<'a> {
pub fn new_output_buffer<'b>(
&'b mut self,
local_buf: &'b mut [u8],
out_buf_ofs: usize
) -> OutputBufferOxide<'b> {
let is_local;
let buf_len = TDEFL_OUT_BUF_SIZE - 16;
let chosen_buffer = match *self {
CallbackOut::Buf(ref mut cb) if cb.out_buf.len() - out_buf_ofs >= TDEFL_OUT_BUF_SIZE => {
is_local = false;
&mut cb.out_buf[out_buf_ofs..out_buf_ofs + buf_len]
},
_ => {
is_local = true;
&mut local_buf[..buf_len]
},
};
let cursor = Cursor::new(chosen_buffer);
OutputBufferOxide {
inner: cursor,
local: is_local,
bit_buffer: 0,
bits_in: 0,
}
}
}
pub struct CallbackOxide<'a> {
pub in_buf: Option<&'a [u8]>,
pub out: CallbackOut<'a>,
}
impl<'a> CallbackOxide<'a> {
pub unsafe fn new(
callback_func: Option<CallbackFunc>,
in_buf: *const c_void,
in_size: usize,
out_buf: *mut c_void,
out_size: usize,
) -> Result<Self, TDEFLStatus> {
let out = match callback_func {
None => CallbackOut::Buf(CallbackBuf {
out_buf: slice::from_raw_parts_mut(
(out_buf as *mut u8).as_mut().ok_or(TDEFLStatus::BadParam)?,
out_size
),
}),
Some(func) => {
if out_size > 0 || out_buf.as_mut().is_some() {
return Err(TDEFLStatus::BadParam);
}
CallbackOut::Func(func)
},
};
if in_size > 0 && in_buf.is_null() {
return Err(TDEFLStatus::BadParam);
}
Ok(CallbackOxide {
in_buf: (in_buf as *const u8).as_ref().map(|in_buf|
slice::from_raw_parts(in_buf, in_size)
),
out: out,
})
}
}
impl CompressorOxide {
pub unsafe fn new(callback: Option<CallbackFunc>, flags: c_uint) -> Self {
CompressorOxide {
callback: callback,
lz: LZOxide::new(),
params: ParamsOxide::new(flags),
local_buf: [0; TDEFL_OUT_BUF_SIZE],
huff: HuffmanOxide::new(),
dict: DictOxide::new(flags),
}
}
}
pub struct HuffmanOxide {
pub count: [[u16; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
pub codes: [[u16; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
pub code_sizes: [[u8; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
}
impl HuffmanOxide {
pub fn new() -> Self {
HuffmanOxide {
count: [[0; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
codes: [[0; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
code_sizes: [[0; TDEFL_MAX_HUFF_SYMBOLS]; TDEFL_MAX_HUFF_TABLES],
}
}
}
pub struct OutputBufferOxide<'a> {
pub inner: Cursor<&'a mut [u8]>,
pub local: bool,
pub bit_buffer: u32,
pub bits_in: u32,
}
pub struct BitBuffer {
pub bit_buffer: u64,
pub bits_in: u32,
}
impl BitBuffer {
pub fn put_fast(&mut self, bits: u64, len: u32) {
self.bit_buffer |= bits << self.bits_in;
self.bits_in += len;
}
pub fn flush(&mut self, output: &mut OutputBufferOxide) -> io::Result<()> {
let pos = output.inner.position() as usize;
let inner = &mut((*output.inner.get_mut())[pos]) as *mut u8 as *mut u64;
unsafe {
ptr::write_unaligned(inner, self.bit_buffer);
}
output.inner.seek(SeekFrom::Current((self.bits_in >> 3) as i64))?;
self.bit_buffer >>= self.bits_in & !7;
self.bits_in &= 7;
Ok(())
}
}
pub struct DictOxide {
pub max_probes: [c_uint; 2],
pub dict: [u8; TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1],
pub next: [u16; TDEFL_LZ_DICT_SIZE],
pub hash: [u16; TDEFL_LZ_DICT_SIZE],
pub code_buf_dict_pos: c_uint,
pub lookahead_size: c_uint,
pub lookahead_pos: c_uint,
pub size: c_uint,
}
impl DictOxide {
pub fn new(flags: c_uint) -> Self {
DictOxide {
max_probes: [
1 + ((flags & 0xFFF) + 2) / 3,
1 + (((flags & 0xFFF) >> 2) + 2) / 3
],
dict: [0; TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1],
next: [0; TDEFL_LZ_DICT_SIZE],
hash: [0; TDEFL_LZ_DICT_SIZE],
code_buf_dict_pos: 0,
lookahead_size: 0,
lookahead_pos: 0,
size: 0,
}
}
}
pub struct LZOxide {
pub codes: [u8; TDEFL_LZ_CODE_BUF_SIZE],
pub code_position: usize,
pub flag_position: usize,
pub total_bytes: c_uint,
pub num_flags_left: c_uint,
}
pub struct ParamsOxide {
pub flags: c_uint,
pub greedy_parsing: bool,
pub block_index: c_uint,
pub saved_match_dist: c_uint,
pub saved_match_len: libc::c_uint,
pub saved_lit: u8,
pub flush: TDEFLFlush,
pub flush_ofs: c_uint,
pub flush_remaining: c_uint,
pub finished: bool,
pub adler32: c_uint,
pub src_pos: usize,
pub src_buf_left: usize,
pub out_buf_ofs: usize,
pub prev_return_status: TDEFLStatus,
pub saved_bit_buffer: u32,
pub saved_bits_in: u32,
}
impl ParamsOxide {
pub fn new(flags: c_uint) -> Self {
ParamsOxide {
flags: flags,
greedy_parsing: flags & TDEFL_GREEDY_PARSING_FLAG != 0,
block_index: 0,
saved_match_dist: 0,
saved_match_len: 0,
saved_lit: 0,
flush: TDEFLFlush::None,
flush_ofs: 0,
flush_remaining: 0,
finished: false,
adler32: ::MZ_ADLER32_INIT as c_uint,
src_pos: 0,
src_buf_left: 0,
out_buf_ofs: 0,
prev_return_status: TDEFLStatus::Okay,
saved_bit_buffer: 0,
saved_bits_in: 0,
}
}
}
impl LZOxide {
pub fn new() -> Self {
LZOxide {
codes: [0; TDEFL_LZ_CODE_BUF_SIZE],
code_position: 1,
flag_position: 0,
total_bytes: 0,
num_flags_left: 8,
}
}
pub fn write_code(&mut self, val: u8) {
self.codes[self.code_position] = val;
self.code_position += 1;
}
pub fn init_flag(&mut self) {
if self.num_flags_left == 8 {
*self.get_flag() = 0;
self.code_position -= 1;
} else {
*self.get_flag() >>= self.num_flags_left;
}
}
pub fn get_flag(&mut self) -> &mut u8 {
&mut self.codes[self.flag_position]
}
pub fn plant_flag(&mut self) {
self.flag_position = self.code_position;
self.code_position += 1;
}
pub fn consume_flag(&mut self) {
self.num_flags_left -= 1;
if self.num_flags_left == 0 {
self.num_flags_left = 8;
self.plant_flag();
}
}
}
pub struct SavedOutputBufferOxide {
pub pos: u64,
pub bit_buffer: u32,
pub bits_in: u32,
pub local: bool,
}
impl<'a> OutputBufferOxide<'a> {
fn put_bits(&mut self, bits: u32, len: u32) -> io::Result<()> {
assert!(bits <= ((1u32 << len) - 1u32));
self.bit_buffer |= bits << self.bits_in;
self.bits_in += len;
while self.bits_in >= 8 {
self.inner.write(&[self.bit_buffer as u8][..])?;
self.bit_buffer >>= 8;
self.bits_in -= 8;
}
Ok(())
}
fn save(&self) -> SavedOutputBufferOxide {
SavedOutputBufferOxide {
pos: self.inner.position(),
bit_buffer: self.bit_buffer,
bits_in: self.bits_in,
local: self.local,
}
}
fn load(&mut self, saved: SavedOutputBufferOxide) {
self.inner.set_position(saved.pos);
self.bit_buffer = saved.bit_buffer;
self.bits_in = saved.bits_in;
self.local = saved.local;
}
fn load_bits(&mut self, saved: &SavedOutputBufferOxide) {
self.bit_buffer = saved.bit_buffer;
self.bits_in = saved.bits_in;
}
fn pad_to_bytes(&mut self) -> io::Result<()> {
if self.bits_in != 0 {
let len = 8 - self.bits_in;
self.put_bits(0, len)?;
}
Ok(())
}
}
pub fn tdefl_radix_sort_syms_oxide<'a>(
symbols0: &'a mut [tdefl_sym_freq],
symbols1: &'a mut [tdefl_sym_freq]
) -> &'a mut [tdefl_sym_freq] {
let mut hist = [[0; 256]; 2];
for freq in symbols0.iter() {
hist[0][(freq.m_key & 0xFF) as usize] += 1;
hist[1][((freq.m_key >> 8) & 0xFF) as usize] += 1;
}
let mut n_passes = 2;
if symbols0.len() == hist[1][0] {
n_passes -= 1;
}
let mut current_symbols = symbols0;
let mut new_symbols = symbols1;
for pass in 0..n_passes {
let mut offsets = [0; 256];
let mut offset = 0;
for i in 0..256 {
offsets[i] = offset;
offset += hist[pass][i];
}
for sym in current_symbols.iter() {
let j = ((sym.m_key >> (pass * 8)) & 0xFF) as usize;
new_symbols[offsets[j]] = *sym;
offsets[j] += 1;
}
mem::swap(&mut current_symbols, &mut new_symbols);
}
current_symbols
}
pub fn tdefl_calculate_minimum_redundancy_oxide(symbols: &mut [tdefl_sym_freq]) {
match symbols.len() {
0 => (),
1 => symbols[0].m_key = 1,
n => {
symbols[0].m_key += symbols[1].m_key;
let mut root = 0;
let mut leaf = 2;
for next in 1..n - 1 {
if (leaf >= n) || (symbols[root].m_key < symbols[leaf].m_key) {
symbols[next].m_key = symbols[root].m_key;
symbols[root].m_key = next as u16;
root += 1;
} else {
symbols[next].m_key = symbols[leaf].m_key;
leaf += 1;
}
if (leaf >= n) || (root < next && symbols[root].m_key < symbols[leaf].m_key) {
symbols[next].m_key = symbols[next].m_key.wrapping_add(symbols[root].m_key);
symbols[root].m_key = next as u16;
root += 1;
} else {
symbols[next].m_key = symbols[next].m_key.wrapping_add(symbols[leaf].m_key);
leaf += 1;
}
}
symbols[n - 2].m_key = 0;
for next in (0..n - 2).rev() {
symbols[next].m_key = symbols[symbols[next].m_key as usize].m_key + 1;
}
let mut avbl = 1;
let mut used = 0;
let mut dpth = 0;
let mut root = (n - 2) as i32;
let mut next = (n - 1) as i32;
while avbl > 0 {
while (root >= 0) && (symbols[root as usize].m_key == dpth) {
used += 1;
root -= 1;
}
while avbl > used {
symbols[next as usize].m_key = dpth;
next -= 1;
avbl -= 1;
}
avbl = 2 * used;
dpth += 1;
used = 0;
}
}
}
}
pub fn tdefl_huffman_enforce_max_code_size_oxide(
num_codes: &mut [c_int],
code_list_len: usize,
max_code_size: usize
) {
if code_list_len <= 1 { return; }
num_codes[max_code_size] += num_codes[max_code_size + 1..].iter().sum();
let total = num_codes[1..max_code_size + 1].iter().rev().enumerate().fold(0u32, |total, (i, &x)| {
total + ((x as u32) << i)
});
for _ in (1 << max_code_size)..total {
num_codes[max_code_size] -= 1;
for i in (1..max_code_size).rev() {
if num_codes[i] != 0 {
num_codes[i] -= 1;
num_codes[i + 1] += 2;
break;
}
}
}
}
pub fn tdefl_optimize_huffman_table_oxide(
h: &mut HuffmanOxide,
table_num: usize,
table_len: usize,
code_size_limit: usize,
static_table: bool
) {
let mut num_codes = [0 as c_int; TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
let mut next_code = [0 as c_uint; TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
if static_table {
for &code_size in &h.code_sizes[table_num][..table_len] {
num_codes[code_size as usize] += 1;
}
} else {
let mut symbols0 = [tdefl_sym_freq { m_key: 0, m_sym_index: 0 }; TDEFL_MAX_HUFF_SYMBOLS];
let mut symbols1 = [tdefl_sym_freq { m_key: 0, m_sym_index: 0 }; TDEFL_MAX_HUFF_SYMBOLS];
let mut num_used_symbols = 0;
for i in 0..table_len {
if h.count[table_num][i] != 0 {
symbols0[num_used_symbols] = tdefl_sym_freq {
m_key: h.count[table_num][i],
m_sym_index: i as u16
};
num_used_symbols += 1;
}
}
let mut symbols = tdefl_radix_sort_syms_oxide(&mut symbols0[..num_used_symbols],
&mut symbols1[..num_used_symbols]);
tdefl_calculate_minimum_redundancy_oxide(symbols);
for symbol in symbols.iter() {
num_codes[symbol.m_key as usize] += 1;
}
tdefl_huffman_enforce_max_code_size_oxide(&mut num_codes, num_used_symbols, code_size_limit);
memset(&mut h.code_sizes[table_num][..], 0);
memset(&mut h.codes[table_num][..], 0);
let mut last = num_used_symbols;
for i in 1..code_size_limit + 1 {
let first = last - num_codes[i] as usize;
for symbol in &symbols[first..last] {
h.code_sizes[table_num][symbol.m_sym_index as usize] = i as u8;
}
last = first;
}
}
let mut j = 0;
next_code[1] = 0;
for i in 2..code_size_limit + 1 {
j = (j + num_codes[i - 1]) << 1;
next_code[i] = j as c_uint;
}
for (&code_size, huff_code) in h.code_sizes[table_num].iter().take(table_len)
.zip(h.codes[table_num].iter_mut().take(table_len))
{
if code_size == 0 { continue }
let mut code = next_code[code_size as usize];
next_code[code_size as usize] += 1;
let mut rev_code = 0;
for _ in 0..code_size { // TODO reverse u32 faster?
rev_code = (rev_code << 1) | (code & 1);
code >>= 1;
}
*huff_code = rev_code as u16;
}
}
const TDEFL_PACKED_CODE_SIZE_SYMS_SWIZZLE: [u8; 19] =
[16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
pub fn tdefl_start_dynamic_block_oxide(
h: &mut HuffmanOxide,
output: &mut OutputBufferOxide
) -> io::Result<()> {
h.count[0][256] = 1;
tdefl_optimize_huffman_table_oxide(h, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, false);
tdefl_optimize_huffman_table_oxide(h, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, false);
let num_lit_codes = 286 - &h.code_sizes[0][257..286]
.iter().rev().take_while(|&x| *x == 0).count();
let num_dist_codes = 30 - &h.code_sizes[1][1..30]
.iter().rev().take_while(|&x| *x == 0).count();
let mut code_sizes_to_pack = [0u8; TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1];
let mut packed_code_sizes = [0u8; TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1];
let total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
&code_sizes_to_pack[..num_lit_codes]
.copy_from_slice(&h.code_sizes[0][..num_lit_codes]);
&code_sizes_to_pack[num_lit_codes..total_code_sizes_to_pack]
.copy_from_slice(&h.code_sizes[1][..num_dist_codes]);
struct RLE {
pub rle_z_count: u32,
pub rle_repeat_count: u32,
pub prev_code_size: u8,
}
let mut rle = RLE {
rle_z_count: 0,
rle_repeat_count: 0,
prev_code_size: 0xFF,
};
let tdefl_rle_prev_code_size = |
rle: &mut RLE,
packed_code_sizes: &mut Cursor<&mut [u8]>,
h: &mut HuffmanOxide,
| -> io::Result<()> {
if rle.rle_repeat_count != 0 {
if rle.rle_repeat_count < 3 {
h.count[2][rle.prev_code_size as usize] = h.count[2][rle.prev_code_size as usize].wrapping_add(rle.rle_repeat_count as u16);
while rle.rle_repeat_count != 0 {
rle.rle_repeat_count -= 1;
packed_code_sizes.write(&[rle.prev_code_size][..])?;
}
} else {
h.count[2][16] = h.count[2][16].wrapping_add(1);
packed_code_sizes.write(&[16, (rle.rle_repeat_count - 3) as u8][..])?;
}
rle.rle_repeat_count = 0;
}
Ok(())
};
let tdefl_rle_zero_code_size = |
rle: &mut RLE,
packed_code_sizes: &mut Cursor<&mut [u8]>,
h: &mut HuffmanOxide,
| -> io::Result<()> {
if rle.rle_z_count != 0 {
if rle.rle_z_count < 3 {
h.count[2][0] = h.count[2][0].wrapping_add(rle.rle_z_count as u16);
while rle.rle_z_count != 0 {
rle.rle_z_count -= 1;
packed_code_sizes.write(&[0][..])?;
}
} else if rle.rle_z_count <= 10 {
h.count[2][17] = h.count[2][17].wrapping_add(1);
packed_code_sizes.write(&[17, (rle.rle_z_count - 3) as u8][..])?;
} else {
h.count[2][18] = h.count[2][18].wrapping_add(1);
packed_code_sizes.write(&[18, (rle.rle_z_count - 11) as u8][..])?;
}
rle.rle_z_count = 0;
}
Ok(())
};
memset(&mut h.count[2][..TDEFL_MAX_HUFF_SYMBOLS_2], 0);
let mut packed_code_sizes_cursor = Cursor::new(&mut packed_code_sizes[..]);
for &code_size in &code_sizes_to_pack[..total_code_sizes_to_pack] {
if code_size == 0 {
tdefl_rle_prev_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
rle.rle_z_count += 1;
if rle.rle_z_count == 138 {
tdefl_rle_zero_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
}
} else {
tdefl_rle_zero_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
if code_size != rle.prev_code_size {
tdefl_rle_prev_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
h.count[2][code_size as usize] = h.count[2][code_size as usize].wrapping_add(1);
packed_code_sizes_cursor.write(&[code_size][..])?;
} else {
rle.rle_repeat_count += 1;
if rle.rle_repeat_count == 6 {
tdefl_rle_prev_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
}
}
}
rle.prev_code_size = code_size;
}
if rle.rle_repeat_count != 0 {
tdefl_rle_prev_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
} else {
tdefl_rle_zero_code_size(&mut rle, &mut packed_code_sizes_cursor, h)?;
}
tdefl_optimize_huffman_table_oxide(h, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, false);
output.put_bits(2, 2)?;
output.put_bits((num_lit_codes - 257) as u32, 5)?;
output.put_bits((num_dist_codes - 1) as u32, 5)?;
let mut num_bit_lengths = 18 - TDEFL_PACKED_CODE_SIZE_SYMS_SWIZZLE
.iter().rev().take_while(|&swizzle| h.code_sizes[2][*swizzle as usize] == 0).count();
num_bit_lengths = cmp::max(4, num_bit_lengths + 1);
output.put_bits(num_bit_lengths as u32 - 4, 4)?;
for &swizzle in &TDEFL_PACKED_CODE_SIZE_SYMS_SWIZZLE[..num_bit_lengths] {
output.put_bits(h.code_sizes[2][swizzle as usize] as u32, 3)?;
}
let mut packed_code_size_index = 0 as usize;
let packed_code_sizes = packed_code_sizes_cursor.get_ref();
while packed_code_size_index < packed_code_sizes_cursor.position() as usize {
let code = packed_code_sizes[packed_code_size_index] as usize;
packed_code_size_index += 1;
assert!(code < TDEFL_MAX_HUFF_SYMBOLS_2);
output.put_bits(h.codes[2][code] as u32, h.code_sizes[2][code] as u32)?;
if code >= 16 {
output.put_bits(packed_code_sizes[packed_code_size_index] as u32,
[2, 3, 7][code - 16])?;
packed_code_size_index += 1;
}
}
Ok(())
}
pub fn tdefl_start_static_block_oxide(
h: &mut HuffmanOxide,
output: &mut OutputBufferOxide
) -> io::Result<()> {
memset(&mut h.code_sizes[0][0..144], 8);
memset(&mut h.code_sizes[0][144..256], 9);
memset(&mut h.code_sizes[0][256..280], 7);
memset(&mut h.code_sizes[0][280..288], 8);
memset(&mut h.code_sizes[1][..32], 5);
tdefl_optimize_huffman_table_oxide(h, 0, 288, 15, true);
tdefl_optimize_huffman_table_oxide(h, 1, 32, 15, true);
output.put_bits(1, 2)
}
pub fn tdefl_compress_lz_codes_oxide(
h: &mut HuffmanOxide,
output: &mut OutputBufferOxide,
lz_code_buf: &[u8]
) -> io::Result<bool> {
let mut flags = 1;
let mut bb = BitBuffer {
bit_buffer: output.bit_buffer as u64,
bits_in: output.bits_in
};
let mut i = 0;
while i < lz_code_buf.len() {
if flags == 1 {
flags = lz_code_buf[i] as u32 | 0x100;
i += 1;
}
if flags & 1 == 1 {
flags >>= 1;
let sym;
let num_extra_bits;
let match_len = lz_code_buf[i] as usize;
let match_dist = read_unaligned_dict::<u16>(lz_code_buf, i as isize + 1);
i += 3;
assert!(h.code_sizes[0][TDEFL_LEN_SYM[match_len] as usize] != 0);
bb.put_fast(h.codes[0][TDEFL_LEN_SYM[match_len] as usize] as u64,
h.code_sizes[0][TDEFL_LEN_SYM[match_len] as usize] as u32);
bb.put_fast(match_len as u64 & MZ_BITMASKS[TDEFL_LEN_EXTRA[match_len] as usize] as u64,
TDEFL_LEN_EXTRA[match_len] as u32);
if match_dist < 512 {
sym = TDEFL_SMALL_DIST_SYM[match_dist as usize] as usize;
num_extra_bits = TDEFL_SMALL_DIST_EXTRA[match_dist as usize] as usize;
} else {
sym = TDEFL_LARGE_DIST_SYM[(match_dist >> 8) as usize] as usize;
num_extra_bits = TDEFL_LARGE_DIST_EXTRA[(match_dist >> 8) as usize] as usize;
}
assert!(h.code_sizes[1][sym] != 0);
bb.put_fast(h.codes[1][sym] as u64, h.code_sizes[1][sym] as u32);
bb.put_fast(match_dist as u64 & MZ_BITMASKS[num_extra_bits as usize] as u64, num_extra_bits as u32);
} else {
for _ in 0..3 {
flags >>= 1;
let lit = lz_code_buf[i];
i += 1;
assert!(h.code_sizes[0][lit as usize] != 0);
bb.put_fast(h.codes[0][lit as usize] as u64, h.code_sizes[0][lit as usize] as u32);
if flags & 1 == 1 || i >= lz_code_buf.len() {
break;
}
}
}
bb.flush(output)?;
}
output.bits_in = 0;
output.bit_buffer = 0;
while bb.bits_in != 0 {
let n = cmp::min(bb.bits_in, 16);
output.put_bits(bb.bit_buffer as u32 & MZ_BITMASKS[n as usize], n)?;
bb.bit_buffer >>= n;
bb.bits_in -= n;
}
output.put_bits(h.codes[0][256] as u32, h.code_sizes[0][256] as u32)?;
Ok(true)
}
pub fn tdefl_compress_block_oxide(
h: &mut HuffmanOxide,
output: &mut OutputBufferOxide,
lz: &LZOxide,
static_block: bool
) -> io::Result<bool> {
if static_block {
tdefl_start_static_block_oxide(h, output)?;
} else {
tdefl_start_dynamic_block_oxide(h, output)?;
}
tdefl_compress_lz_codes_oxide(h, output, &lz.codes[..lz.code_position])
}
pub fn tdefl_flush_block_oxide(
huff: &mut HuffmanOxide,
lz: &mut LZOxide,
dict: &mut DictOxide,
params: &mut ParamsOxide,
callback: &mut CallbackOxide,
local_buf: &mut [u8],
flush: TDEFLFlush
) -> io::Result<c_int> {
let saved_bits;
{
let mut output = callback.out.new_output_buffer(local_buf, params.out_buf_ofs);
output.bit_buffer = params.saved_bit_buffer;
output.bits_in = params.saved_bits_in;
let use_raw_block = (params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0) &&
(dict.lookahead_pos - dict.code_buf_dict_pos) <= dict.size;
assert!(params.flush_remaining == 0);
params.flush_ofs = 0;
params.flush_remaining = 0;
lz.init_flag();
if params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 && params.block_index == 0 {
output.put_bits(0x78, 8)?;
output.put_bits(0x01, 8)?;
}
output.put_bits((flush == TDEFLFlush::Finish) as u32, 1)?;
let saved_buffer = output.save();
let mut comp_success = false;
if !use_raw_block {
let use_static = (params.flags & TDEFL_FORCE_ALL_STATIC_BLOCKS != 0) || (lz.total_bytes < 48);
comp_success = tdefl_compress_block_oxide(huff, &mut output, lz, use_static)?;
}
let expanded = (lz.total_bytes != 0) &&
(output.inner.position() - saved_buffer.pos + 1 >= lz.total_bytes as u64) &&
(dict.lookahead_pos - dict.code_buf_dict_pos <= dict.size);
if use_raw_block || expanded {
output.load(saved_buffer);
output.put_bits(0, 2)?;
output.pad_to_bytes()?;
for _ in 0..2 {
output.put_bits(lz.total_bytes & 0xFFFF, 16)?;
lz.total_bytes ^= 0xFFFF;
}
for i in 0..lz.total_bytes {
let pos = (dict.code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK;
output.put_bits(dict.dict[pos as usize] as u32, 8)?;
}
} else if !comp_success {
output.load(saved_buffer);
tdefl_compress_block_oxide(huff, &mut output, lz, true)?;
}
if flush != TDEFLFlush::None {
if flush == TDEFLFlush::Finish {
output.pad_to_bytes()?;
if params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 {
let mut adler = params.adler32;
for _ in 0..4 {
output.put_bits((adler >> 24) & 0xFF, 8)?;
adler <<= 8;
}
}
} else {
output.put_bits(0, 3)?;
output.pad_to_bytes()?;
output.put_bits(0, 16)?;
output.put_bits(0xFFFF, 16)?;
}
}
memset(&mut huff.count[0][..TDEFL_MAX_HUFF_SYMBOLS_0], 0);
memset(&mut huff.count[1][..TDEFL_MAX_HUFF_SYMBOLS_1], 0);
lz.code_position = 1;
lz.flag_position = 0;
lz.num_flags_left = 8;
dict.code_buf_dict_pos += lz.total_bytes;
lz.total_bytes = 0;
params.block_index += 1;
saved_bits = output.save();
}
let mut pos = saved_bits.pos;
let local = saved_bits.local;
params.saved_bit_buffer = saved_bits.bit_buffer;
params.saved_bits_in = saved_bits.bits_in;
if pos != 0 {
match callback.out {
CallbackOut::Func(ref mut cf) => {
// TODO: callback about buf_in_size before put_buf_func
let call_success = unsafe {
(cf.put_buf_func)(
&local_buf[0] as *const u8 as *const c_void,
pos as c_int,
cf.put_buf_user
)
};
if !call_success {
params.prev_return_status = TDEFLStatus::PutBufFailed;
return Ok(params.prev_return_status as c_int);
}
},
CallbackOut::Buf(ref mut cb) => {
if local {
let n = cmp::min(pos as usize, cb.out_buf.len() - params.out_buf_ofs);
(&mut cb.out_buf[params.out_buf_ofs..params.out_buf_ofs + n]).copy_from_slice(
&local_buf[..n]
);
params.out_buf_ofs += n;
pos -= n as u64;
if pos != 0 {
params.flush_ofs = n as c_uint;
params.flush_remaining = pos as c_uint;
}
} else {
params.out_buf_ofs += pos as usize;
}
},
}
}
Ok(params.flush_remaining as c_int)
}
fn read_unaligned_dict<T>(dict: &[u8], pos: isize) -> T {
unsafe {
ptr::read_unaligned((dict as *const [u8] as *const u8).offset(pos) as *const T)
}
}
pub fn tdefl_find_match_oxide(
dict: &DictOxide,
lookahead_pos: c_uint,
max_dist: c_uint,
max_match_len: c_uint,
mut match_dist: c_uint,
mut match_len: c_uint
) -> (c_uint, c_uint) {
assert!(max_match_len as usize <= TDEFL_MAX_MATCH_LEN);
let pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
let mut probe_pos = pos;
let mut num_probes_left = dict.max_probes[(match_len >= 32) as usize];
let mut c01: u16 = read_unaligned_dict(&dict.dict[..], (pos + match_len - 1) as isize);
let s01: u16 = read_unaligned_dict(&dict.dict[..], pos as isize);
if max_match_len <= match_len { return (match_dist, match_len) }
loop {
let mut dist = 0;
'found: loop {
num_probes_left -= 1;
if num_probes_left == 0 { return (match_dist, match_len) }
pub enum ProbeResult {
OutOfBounds,
Found,
NotFound
}
let mut tdefl_probe = || -> ProbeResult {
let next_probe_pos = dict.next[probe_pos as usize] as c_uint;
dist = ((lookahead_pos - next_probe_pos) & 0xFFFF) as c_uint;
if next_probe_pos == 0 || dist > max_dist {
return ProbeResult::OutOfBounds
}
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK;
if read_unaligned_dict::<u16>(&dict.dict[..], (probe_pos + match_len - 1) as isize) == c01 {
ProbeResult::Found
} else {
ProbeResult::NotFound
}
};
for _ in 0..3 {
match tdefl_probe() {
ProbeResult::OutOfBounds => return (match_dist, match_len),
ProbeResult::Found => break 'found,
ProbeResult::NotFound => ()
}
}
}
if dist == 0 { return (match_dist, match_len) }
if read_unaligned_dict::<u16>(&dict.dict[..], probe_pos as isize) != s01 { continue }
let mut probe_len = 32;
let mut p = pos as isize;
let mut q = probe_pos as isize;
'probe: loop {
for _ in 0..4 {
p += 2;
q += 2;
if read_unaligned_dict::<u16>(&dict.dict[..], p) != read_unaligned_dict(&dict.dict[..], q) {
break 'probe;
}
}
probe_len -= 1;
if probe_len == 0 {
return (dist, cmp::min(max_match_len, TDEFL_MAX_MATCH_LEN as c_uint))
}
}
probe_len = (p - pos as isize + (dict.dict[p as usize] == dict.dict[q as usize]) as isize) as c_uint;
if probe_len > match_len {
match_dist = dist;
match_len = cmp::min(max_match_len, probe_len);
if match_len == max_match_len {
return (match_dist, match_len);
}
c01 = read_unaligned_dict(&dict.dict[..], (pos + match_len - 1) as isize);
}
}
}
pub fn tdefl_record_literal_oxide(h: &mut HuffmanOxide, lz: &mut LZOxide, lit: u8) {
lz.total_bytes += 1;
lz.write_code(lit);
*lz.get_flag() >>= 1;
lz.consume_flag();
h.count[0][lit as usize] += 1;
}
pub fn tdefl_record_match_oxide(
h: &mut HuffmanOxide,
lz: &mut LZOxide,
mut match_len: c_uint,
mut match_dist: c_uint
) {
assert!(match_len >= TDEFL_MIN_MATCH_LEN);
assert!(match_dist >= 1);
assert!(match_dist as usize <= TDEFL_LZ_DICT_SIZE);
lz.total_bytes += match_len;
match_dist -= 1;
match_len -= TDEFL_MIN_MATCH_LEN as u32;
lz.write_code(match_len as u8);
lz.write_code(match_dist as u8);
lz.write_code((match_dist >> 8) as u8);
*lz.get_flag() >>= 1;
*lz.get_flag() |= 0x80;
lz.consume_flag();
let symbol = if match_dist < 512 {
TDEFL_SMALL_DIST_SYM[match_dist as usize]
} else {
TDEFL_LARGE_DIST_SYM[((match_dist >> 8) & 127) as usize]
} as usize;
h.count[1][symbol] += 1;
h.count[0][TDEFL_LEN_SYM[match_len as usize] as usize] += 1;
}
pub fn tdefl_compress_normal_oxide(
huff: &mut HuffmanOxide,
lz: &mut LZOxide,
dict: &mut DictOxide,
params: &mut ParamsOxide,
callback: &mut CallbackOxide,
local_buf: &mut [u8]
) -> bool {
let mut src_pos = params.src_pos;
let mut src_buf_left = params.src_buf_left;
while src_buf_left != 0 || (params.flush != TDEFLFlush::None && dict.lookahead_size != 0) {
let in_buf = callback.in_buf.expect("Unexpected null in_buf"); // TODO: make connection params.src_buf_left <-> in_buf
let num_bytes_to_process = cmp::min(src_buf_left, TDEFL_MAX_MATCH_LEN - dict.lookahead_size as usize);
src_buf_left -= num_bytes_to_process;
if dict.lookahead_size + dict.size >= TDEFL_MIN_MATCH_LEN - 1 {
let mut dst_pos = (dict.lookahead_pos + dict.lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
let mut ins_pos = dict.lookahead_pos + dict.lookahead_size - 2;
let mut hash = ((dict.dict[(ins_pos & TDEFL_LZ_DICT_SIZE_MASK) as usize] as c_uint) << TDEFL_LZ_HASH_SHIFT) ^
(dict.dict[((ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK) as usize] as c_uint);
dict.lookahead_size += num_bytes_to_process as c_uint;
for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
dict.dict[dst_pos as usize] = c;
if (dst_pos as usize) < TDEFL_MAX_MATCH_LEN - 1 {
dict.dict[TDEFL_LZ_DICT_SIZE + dst_pos as usize] = c;
}
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ (c as c_uint)) & (TDEFL_LZ_HASH_SIZE as c_uint - 1);
dict.next[(ins_pos & TDEFL_LZ_DICT_SIZE_MASK) as usize] = dict.hash[hash as usize];
dict.hash[hash as usize] = ins_pos as u16;
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos += 1;
}
src_pos += num_bytes_to_process;
} else {
for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
let dst_pos = (dict.lookahead_pos + dict.lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
dict.dict[dst_pos as usize] = c;
if (dst_pos as usize) < TDEFL_MAX_MATCH_LEN - 1 {
dict.dict[TDEFL_LZ_DICT_SIZE + dst_pos as usize] = c;
}
dict.lookahead_size += 1;
if dict.lookahead_size + dict.size >= TDEFL_MIN_MATCH_LEN {
let ins_pos = dict.lookahead_pos + dict.lookahead_size - 3;
let hash = (((dict.dict[(ins_pos & TDEFL_LZ_DICT_SIZE_MASK) as usize] as c_uint) << (TDEFL_LZ_HASH_SHIFT * 2)) ^
(((dict.dict[((ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK) as usize] as c_uint) << TDEFL_LZ_HASH_SHIFT) ^ (c as c_uint))) &
(TDEFL_LZ_HASH_SIZE as c_uint - 1);
dict.next[(ins_pos & TDEFL_LZ_DICT_SIZE_MASK) as usize] = dict.hash[hash as usize];
dict.hash[hash as usize] = ins_pos as u16;
}
}
src_pos += num_bytes_to_process;
}
dict.size = cmp::min(TDEFL_LZ_DICT_SIZE as c_uint - dict.lookahead_size, dict.size);
if params.flush == TDEFLFlush::None && (dict.lookahead_size as usize) < TDEFL_MAX_MATCH_LEN { break }
let mut len_to_move = 1;
let mut cur_match_dist = 0;
let mut cur_match_len = if params.saved_match_len != 0 { params.saved_match_len } else { TDEFL_MIN_MATCH_LEN - 1 };
let cur_pos = dict.lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if params.flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS) != 0 {
if dict.size != 0 && params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS == 0 {
let c = dict.dict[((cur_pos.wrapping_sub(1)) & TDEFL_LZ_DICT_SIZE_MASK) as usize];
cur_match_len = dict.dict[cur_pos as usize..(cur_pos + dict.lookahead_size) as usize]
.iter().take_while(|&x| *x == c).count() as c_uint;
if cur_match_len < TDEFL_MIN_MATCH_LEN { cur_match_len = 0 } else { cur_match_dist = 1 }
}
} else {
let dist_len = tdefl_find_match_oxide(
dict,
dict.lookahead_pos,
dict.size,
dict.lookahead_size,
cur_match_dist,
cur_match_len
);
cur_match_dist = dist_len.0;
cur_match_len = dist_len.1;
}
let far_and_small = cur_match_len == TDEFL_MIN_MATCH_LEN && cur_match_dist >= 8 * 1024;
let filter_small = params.flags & TDEFL_FILTER_MATCHES != 0 && cur_match_len <= 5;
if far_and_small || filter_small || cur_pos == cur_match_dist {
cur_match_dist = 0;
cur_match_len = 0;
}
if params.saved_match_len != 0 {
if cur_match_len > params.saved_match_len {
tdefl_record_literal_oxide(huff, lz, params.saved_lit);
if cur_match_len >= 128 {
tdefl_record_match_oxide(huff, lz, cur_match_len, cur_match_dist);
params.saved_match_len = 0;
len_to_move = cur_match_len;
} else {
params.saved_lit = dict.dict[cur_pos as usize];
params.saved_match_dist = cur_match_dist;
params.saved_match_len = cur_match_len;
}
} else {
tdefl_record_match_oxide(huff, lz, params.saved_match_len, params.saved_match_dist);
len_to_move = params.saved_match_len - 1;
params.saved_match_len = 0;
}
} else if cur_match_dist == 0 {
tdefl_record_literal_oxide(huff, lz, dict.dict[cmp::min(cur_pos as usize, dict.dict.len() - 1)]);
} else if params.greedy_parsing || (params.flags & TDEFL_RLE_MATCHES != 0) || cur_match_len >= 128 {
tdefl_record_match_oxide(huff, lz, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
params.saved_lit = dict.dict[cmp::min(cur_pos as usize, dict.dict.len() - 1)];
params.saved_match_dist = cur_match_dist;
params.saved_match_len = cur_match_len;
}
dict.lookahead_pos += len_to_move;
assert!(dict.lookahead_size >= len_to_move);
dict.lookahead_size -= len_to_move;
dict.size = cmp::min(dict.size + len_to_move, TDEFL_LZ_DICT_SIZE as c_uint);
let lz_buf_tight = lz.code_position > TDEFL_LZ_CODE_BUF_SIZE - 8;
let raw = params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0;
let fat = ((lz.code_position * 115) >> 7) >= lz.total_bytes as usize;
let fat_or_raw = (lz.total_bytes > 31 * 1024) && (fat || raw);
if lz_buf_tight || fat_or_raw {
params.src_pos = src_pos;
params.src_buf_left = src_buf_left;
let n = tdefl_flush_block_oxide(
huff,
lz,
dict,
params,
callback,
local_buf,
TDEFLFlush::None,
).unwrap_or(TDEFLStatus::PutBufFailed as c_int);
if n != 0 { return n > 0 }
}
}
params.src_pos = src_pos;
params.src_buf_left = src_buf_left;
true
}
const TDEFL_COMP_FAST_LOOKAHEAD_SIZE: c_uint = 4096;
pub fn tdefl_compress_fast_oxide(
huff: &mut HuffmanOxide,
lz: &mut LZOxide,
dict: &mut DictOxide,
params: &mut ParamsOxide,
callback: &mut CallbackOxide,
local_buf: &mut [u8]
) -> bool {
let mut cur_pos = dict.lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
let in_buf = callback.in_buf.expect("Unexpected null in_buf"); // TODO: make connection params.src_buf_left <-> in_buf
while params.src_buf_left > 0 || (params.flush != TDEFLFlush::None && dict.lookahead_size > 0) {
let mut dst_pos = ((dict.lookahead_pos + dict.lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK) as usize;
let mut num_bytes_to_process = cmp::min(params.src_buf_left, (TDEFL_COMP_FAST_LOOKAHEAD_SIZE - dict.lookahead_size) as usize);
params.src_buf_left -= num_bytes_to_process;
dict.lookahead_size += num_bytes_to_process as c_uint;
while num_bytes_to_process != 0 {
let n = cmp::min(TDEFL_LZ_DICT_SIZE - dst_pos , num_bytes_to_process);
&mut dict.dict[dst_pos..dst_pos + n]
.copy_from_slice(&in_buf[params.src_pos..params.src_pos + n]);
if dst_pos < TDEFL_MAX_MATCH_LEN - 1 {
let m = cmp::min(n, TDEFL_MAX_MATCH_LEN - 1 - dst_pos);
&mut dict.dict[dst_pos + TDEFL_LZ_DICT_SIZE..dst_pos + TDEFL_LZ_DICT_SIZE + m]
.copy_from_slice(&in_buf[params.src_pos..params.src_pos + m]);
}
params.src_pos += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK as usize;
num_bytes_to_process -= n;
}
dict.size = cmp::min(TDEFL_LZ_DICT_SIZE as c_uint - dict.lookahead_size, dict.size);
if params.flush == TDEFLFlush::None && dict.lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE {
break;
}
while dict.lookahead_size >= 4 {
let mut cur_match_len = 1;
let first_trigram = read_unaligned_dict::<u32>(&dict.dict[..], cur_pos as isize) & 0xFFFFFF;
let hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK;
let mut probe_pos = dict.hash[hash as usize] as u32;
dict.hash[hash as usize] = dict.lookahead_pos as u16;
let mut cur_match_dist = (dict.lookahead_pos - probe_pos) as u16;
if cur_match_dist as u32 <= dict.size {
probe_pos &= TDEFL_LZ_DICT_SIZE_MASK;
let trigram = read_unaligned_dict::<u32>(&dict.dict[..], probe_pos as isize) & 0xFFFFFF;
if first_trigram == trigram {
let mut p = cur_pos as isize;
let mut q = probe_pos as isize;
let mut probe_len = 32;
'probe: loop {
for _ in 0..4 {
p += 2;
q += 2;
if read_unaligned_dict::<u16>(&dict.dict[..], p) != read_unaligned_dict(&dict.dict[..], q) {
cur_match_len = (p as u32 - cur_pos) + (dict.dict[p as usize] == dict.dict[q as usize]) as u32;
break 'probe;
}
}
probe_len -= 1;
if probe_len == 0 {
cur_match_len = if cur_match_dist == 0 {
0
} else {
TDEFL_MAX_MATCH_LEN as u32
};
break 'probe;
}
}
if cur_match_len < TDEFL_MIN_MATCH_LEN || (cur_match_len == TDEFL_MIN_MATCH_LEN && cur_match_dist >= 8 * 1024) {
cur_match_len = 1;
lz.write_code(first_trigram as u8);
*lz.get_flag() >>= 1;
huff.count[0][first_trigram as u8 as usize] += 1;
} else {
cur_match_len = cmp::min(cur_match_len, dict.lookahead_size);
assert!(cur_match_len >= TDEFL_MIN_MATCH_LEN);
assert!(cur_match_dist >= 1);
assert!(cur_match_dist as usize <= TDEFL_LZ_DICT_SIZE);
cur_match_dist -= 1;
lz.write_code((cur_match_len - TDEFL_MIN_MATCH_LEN) as u8);
unsafe {
ptr::write_unaligned(
(&mut lz.codes[0] as *mut u8).offset(lz.code_position as isize) as *mut u16,
cur_match_dist as u16
);
lz.code_position += 2;
}
*lz.get_flag() >>= 1;
*lz.get_flag() |= 0x80;
if cur_match_dist < 512 {
huff.count[1][TDEFL_SMALL_DIST_SYM[cur_match_dist as usize] as usize] += 1;
} else {
huff.count[1][TDEFL_LARGE_DIST_SYM[(cur_match_dist >> 8) as usize] as usize] += 1;
}
huff.count[0][TDEFL_LEN_SYM[(cur_match_len - TDEFL_MIN_MATCH_LEN) as usize] as usize] += 1;
}
} else {
lz.write_code(first_trigram as u8);
*lz.get_flag() >>= 1;
huff.count[0][first_trigram as u8 as usize] += 1;
}
lz.consume_flag();
lz.total_bytes += cur_match_len;
dict.lookahead_pos += cur_match_len;
dict.size = cmp::min(dict.size + cur_match_len, TDEFL_LZ_DICT_SIZE as u32);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
assert!(dict.lookahead_size >= cur_match_len);
dict.lookahead_size -= cur_match_len;
if lz.code_position > TDEFL_LZ_CODE_BUF_SIZE - 8 {
let n = match tdefl_flush_block_oxide(
huff,
lz,
dict,
params,
callback,
local_buf,
TDEFLFlush::None
) {
Err(_) => {params.prev_return_status = TDEFLStatus::PutBufFailed; -1},
Ok(status) => status
};
if n != 0 { return n > 0 }
}
}
}
while dict.lookahead_size != 0 {
let lit = dict.dict[cur_pos as usize];
lz.total_bytes += 1;
lz.write_code(lit);
*lz.get_flag() >>= 1;
lz.consume_flag();
huff.count[0][lit as usize] += 1;
dict.lookahead_pos += 1;
dict.size = cmp::min(dict.size + 1, TDEFL_LZ_DICT_SIZE as u32);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
dict.lookahead_size -= 1;
if lz.code_position > TDEFL_LZ_CODE_BUF_SIZE - 8 {
let n = match tdefl_flush_block_oxide(
huff,
lz,
dict,
params,
callback,
local_buf,
TDEFLFlush::None
) {
Err(_) => {params.prev_return_status = TDEFLStatus::PutBufFailed; -1},
Ok(status) => status
};
if n != 0 { return n > 0 }
}
}
}
true
}
pub fn tdefl_flush_output_buffer_oxide(
c: &mut CallbackOxide,
p: &mut ParamsOxide,
local_buf: &[u8]
) -> (TDEFLStatus, usize, usize) {
let mut res = (TDEFLStatus::Okay, p.src_pos, 0);
if let CallbackOut::Buf(ref mut cb) = c.out {
let n = cmp::min(cb.out_buf.len() - p.out_buf_ofs, p.flush_remaining as usize);
if n != 0 {
(&mut cb.out_buf[p.out_buf_ofs..p.out_buf_ofs + n]).copy_from_slice(
&local_buf[p.flush_ofs as usize.. p.flush_ofs as usize + n]
);
}
p.flush_ofs += n as c_uint;
p.flush_remaining -= n as c_uint;
p.out_buf_ofs += n;
res.2 = p.out_buf_ofs;
}
if p.finished && p.flush_remaining == 0 {
res.0 = TDEFLStatus::Done
}
res
}
pub fn tdefl_compress_oxide(
d: &mut CompressorOxide,
callback: &mut CallbackOxide,
flush: TDEFLFlush
) -> (TDEFLStatus, usize, usize) {
d.params.src_buf_left = callback.in_buf.map_or(0, |buf| buf.len());
d.params.out_buf_ofs = 0;
d.params.src_pos = 0;
let prev_ok = d.params.prev_return_status == TDEFLStatus::Okay;
let flush_finish_once = d.params.flush != TDEFLFlush::Finish ||
flush == TDEFLFlush::Finish;
d.params.flush = flush;
if !prev_ok || !flush_finish_once {
d.params.prev_return_status = TDEFLStatus::BadParam;
return (d.params.prev_return_status, 0, 0);
}
if d.params.flush_remaining != 0 || d.params.finished {
let res = tdefl_flush_output_buffer_oxide(
callback,
&mut d.params,
&d.local_buf[..]
);
d.params.prev_return_status = res.0;
return res;
}
let one_probe = d.params.flags & TDEFL_MAX_PROBES_MASK as u32 == 1;
let greedy = d.params.flags & TDEFL_GREEDY_PARSING_FLAG != 0;
let filter_or_rle_or_raw = d.params.flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES) != 0;
let compress_success = if one_probe && greedy && !filter_or_rle_or_raw {
tdefl_compress_fast_oxide(
&mut d.huff,
&mut d.lz,
&mut d.dict,
&mut d.params,
callback,
&mut d.local_buf[..]
)
} else {
tdefl_compress_normal_oxide(
&mut d.huff,
&mut d.lz,
&mut d.dict,
&mut d.params,
callback,
&mut d.local_buf[..]
)
};
if !compress_success {
return (d.params.prev_return_status, d.params.src_pos, d.params.out_buf_ofs);
}
if let Some(in_buf) = callback.in_buf {
if d.params.flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32) != 0 {
d.params.adler32 = ::mz_adler32_oxide(
d.params.adler32,
&in_buf[..d.params.src_pos]
);
}
}
let flush_none = d.params.flush == TDEFLFlush::None;
let remaining = d.params.src_buf_left != 0 || d.params.flush_remaining != 0;
if !flush_none && d.dict.lookahead_size == 0 && !remaining {
let flush = d.params.flush;
match tdefl_flush_block_oxide(
&mut d.huff,
&mut d.lz,
&mut d.dict,
&mut d.params,
callback,
&mut d.local_buf[..],
flush
) {
Err(_) => {
d.params.prev_return_status = TDEFLStatus::PutBufFailed;
return (d.params.prev_return_status, d.params.src_pos, d.params.out_buf_ofs);
},
Ok(x) if x < 0 => return (d.params.prev_return_status, d.params.src_pos, d.params.out_buf_ofs),
_ => {
d.params.finished = d.params.flush == TDEFLFlush::Finish;
if d.params.flush == TDEFLFlush::Full {
memset(&mut d.dict.hash[..], 0);
memset(&mut d.dict.next[..], 0);
d.dict.size = 0;
}
},
}
}
let res = tdefl_flush_output_buffer_oxide(
callback,
&mut d.params,
&d.local_buf[..]
);
d.params.prev_return_status = res.0;
res
}
pub fn tdefl_get_adler32_oxide(d: &CompressorOxide) -> c_uint {
d.params.adler32
}
pub fn tdefl_get_prev_return_status_oxide(d: &CompressorOxide) -> TDEFLStatus {
d.params.prev_return_status
}
pub fn tdefl_get_flags_oxide(d: &CompressorOxide) -> c_int {
d.params.flags as c_int
}
pub fn tdefl_create_comp_flags_from_zip_params_oxide(
level: c_int,
window_bits: c_int,
strategy: c_int
) -> c_uint {
let num_probes = (if level >= 0 {
cmp::min(10, level)
} else {
::CompressionLevel::DefaultLevel as c_int
}) as usize;
let greedy = if level <= 3 { TDEFL_GREEDY_PARSING_FLAG } else { 0 } as c_uint;
let mut comp_flags = TDEFL_NUM_PROBES[num_probes] | greedy;
if window_bits > 0 {
comp_flags |= TDEFL_WRITE_ZLIB_HEADER as c_uint;
}
if level == 0 {
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
} else if strategy == ::CompressionStrategy::Filtered as c_int {
comp_flags |= TDEFL_FILTER_MATCHES;
} else if strategy == ::CompressionStrategy::HuffmanOnly as c_int {
comp_flags &= !TDEFL_MAX_PROBES_MASK as c_uint;
} else if strategy == ::CompressionStrategy::Fixed as c_int {
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
} else if strategy == ::CompressionStrategy::RLE as c_int {
comp_flags |= TDEFL_RLE_MATCHES;
}
comp_flags
}
|
//! Plugin specific structures.
use std::{mem, ptr};
use libc::c_void;
use channels::ChannelInfo;
use host::{self, Host};
use api::{AEffect, HostCallbackProc, Supported};
use api::consts::VST_MAGIC;
use buffer::AudioBuffer;
use editor::Editor;
use event::Event;
/// Plugin type. Generally either Effect or Synth.
///
/// Other types are not necessary to build a plugin and are only useful for the host to categorize
/// the plugin.
#[repr(usize)]
#[derive(Clone, Copy, Debug)]
pub enum Category {
/// Unknown / not implemented
Unknown,
/// Any effect
Effect,
/// VST instrument
Synth,
/// Scope, tuner, spectrum analyser, etc.
Analysis,
/// Dynamics, etc.
Mastering,
/// Panners, etc.
Spacializer,
/// Delays and Reverbs
RoomFx,
/// Dedicated surround processor.
SurroundFx,
/// Denoiser, etc.
Restoration,
/// Offline processing.
OfflineProcess,
/// Contains other plugins.
Shell,
/// Tone generator, etc.
Generator
}
impl_clike!(Category);
#[repr(usize)]
#[derive(Clone, Copy, Debug)]
#[doc(hidden)]
pub enum OpCode {
/// Called when plugin is initialized.
Initialize,
/// Called when plugin is being shut down.
Shutdown,
/// [value]: preset number to change to.
ChangePreset,
/// [return]: current preset number.
GetCurrentPresetNum,
/// [ptr]: char array with new preset name, limited to `consts::MAX_PRESET_NAME_LEN`.
SetCurrentPresetName,
/// [ptr]: char buffer for current preset name, limited to `consts::MAX_PRESET_NAME_LEN`.
GetCurrentPresetName,
/// [index]: parameter
/// [ptr]: char buffer, limited to `consts::MAX_PARAM_STR_LEN` (e.g. "db", "ms", etc)
GetParameterLabel,
/// [index]: paramter
/// [ptr]: char buffer, limited to `consts::MAX_PARAM_STR_LEN` (e.g. "0.5", "ROOM", etc).
GetParameterDisplay,
/// [index]: parameter
/// [ptr]: char buffer, limited to `consts::MAX_PARAM_STR_LEN` (e.g. "Release", "Gain")
GetParameterName,
/// Deprecated.
_GetVu,
/// [opt]: new sample rate.
SetSampleRate,
/// [value]: new maximum block size.
SetBlockSize,
/// [value]: 1 when plugin enabled, 0 when disabled.
StateChanged,
/// [ptr]: Rect** receiving pointer to editor size.
EditorGetRect,
/// [ptr]: system dependent window pointer, eg HWND on Windows.
EditorOpen,
/// Close editor. No arguments.
EditorClose,
/// Deprecated.
_EditorDraw,
/// Deprecated.
_EditorMouse,
/// Deprecated.
_EditorKey,
/// Idle call from host.
EditorIdle,
/// Deprecated.
_EditorTop,
/// Deprecated.
_EditorSleep,
/// Deprecated.
_EditorIdentify,
/// [ptr]: pointer for chunk data address (void**).
/// [index]: 0 for bank, 1 for program
GetData,
/// [ptr]: data (void*)
/// [value]: data size in bytes
/// [index]: 0 for bank, 1 for program
SetData,
/// [ptr]: VstEvents* TODO: Events
ProcessEvents,
/// [index]: param index
/// [return]: 1=true, 0=false
CanBeAutomated,
/// [index]: param index
/// [ptr]: parameter string
/// [return]: true for success
StringToParameter,
/// Deprecated.
_GetNumCategories,
/// [index]: program name
/// [ptr]: char buffer for name, limited to `consts::MAX_PRESET_NAME_LEN`
/// [return]: true for success
GetPresetName,
/// Deprecated.
_CopyPreset,
/// Deprecated.
_ConnectIn,
/// Deprecated.
_ConnectOut,
/// [index]: input index
/// [ptr]: `VstPinProperties`
/// [return]: 1 if supported
GetInputInfo,
/// [index]: output index
/// [ptr]: `VstPinProperties`
/// [return]: 1 if supported
GetOutputInfo,
/// [return]: `PluginCategory` category.
GetCategory,
/// Deprecated.
_GetCurrentPosition,
/// Deprecated.
_GetDestinationBuffer,
/// [ptr]: `VstAudioFile` array
/// [value]: count
/// [index]: start flag
OfflineNotify,
/// [ptr]: `VstOfflineTask` array
/// [value]: count
OfflinePrepare,
/// [ptr]: `VstOfflineTask` array
/// [value]: count
OfflineRun,
/// [ptr]: `VstVariableIo`
/// [use]: used for variable I/O processing (offline e.g. timestretching)
ProcessVarIo,
/// TODO: implement
/// [value]: input `*mut VstSpeakerArrangement`.
/// [ptr]: output `*mut VstSpeakerArrangement`.
SetSpeakerArrangement,
/// Deprecated.
_SetBlocksizeAndSampleRate,
/// Soft bypass (automatable).
/// [value]: 1 = bypass, 0 = nobypass.
SoftBypass,
// [ptr]: buffer for effect name, limited to `kVstMaxEffectNameLen`
GetEffectName,
/// Deprecated.
_GetErrorText,
/// [ptr]: buffer for vendor name, limited to `consts::MAX_VENDOR_STR_LEN`.
GetVendorName,
/// [ptr]: buffer for product name, limited to `consts::MAX_PRODUCT_STR_LEN`.
GetProductName,
/// [return]: vendor specific version.
GetVendorVersion,
/// no definition, vendor specific.
VendorSpecific,
/// [ptr]: "Can do" string.
/// [return]: 1 = yes, 0 = maybe, -1 = no.
CanDo,
/// [return]: tail size (e.g. reverb time). 0 is defualt, 1 means no tail.
GetTailSize,
/// Deprecated.
_Idle,
/// Deprecated.
_GetIcon,
/// Deprecated.
_SetVewPosition,
/// [index]: param index
/// [ptr]: `*mut VstParamInfo` //TODO: Implement
/// [return]: 1 if supported
GetParamInfo,
/// Deprecated.
_KeysRequired,
/// [return]: 2400 for vst 2.4.
GetApiVersion,
/// [index]: ASCII char.
/// [value]: `Key` keycode.
/// [opt]: `flags::modifier_key` bitmask.
/// [return]: 1 if used.
EditorKeyDown,
/// [index]: ASCII char.
/// [value]: `Key` keycode.
/// [opt]: `flags::modifier_key` bitmask.
/// [return]: 1 if used.
EditorKeyUp,
/// [value]: 0 = circular, 1 = circular relative, 2 = linear.
EditorSetKnobMode,
/// [index]: MIDI channel.
/// [ptr]: `*mut MidiProgramName`. //TODO: Implement
/// [return]: number of used programs, 0 = unsupported.
GetMidiProgramName,
/// [index]: MIDI channel.
/// [ptr]: `*mut MidiProgramName`. //TODO: Implement
/// [return]: index of current program.
GetCurrentMidiProgram,
/// [index]: MIDI channel.
/// [ptr]: `*mut MidiProgramCategory`. //TODO: Implement
/// [return]: number of used categories.
GetMidiProgramCategory,
/// [index]: MIDI channel.
/// [return]: 1 if `MidiProgramName` or `MidiKeyName` has changed. //TODO: Implement
HasMidiProgramsChanged,
/// [index]: MIDI channel.
/// [ptr]: `*mut MidiKeyName`. //TODO: Implement
/// [return]: 1 = supported 0 = not.
GetMidiKeyName,
/// Called before a preset is loaded.
BeginSetPreset,
/// Called after a preset is loaded.
EndSetPreset,
/// [value]: inputs `*mut VstSpeakerArrangement` //TODO: Implement
/// [ptr]: Outputs `*mut VstSpeakerArrangement`
GetSpeakerArrangement,
/// [ptr]: buffer for plugin name, limited to `consts::MAX_PRODUCT_STR_LEN`.
/// [return]: next plugin's uniqueID.
ShellGetNextPlugin,
/// No args. Called once before start of process call. This indicates that the process call
/// will be interrupted (e.g. Host reconfiguration or bypass when plugin doesn't support
/// SoftBypass)
StartProcess,
/// No arguments. Called after stop of process call.
StopProcess,
/// [value]: number of samples to process. Called in offline mode before process.
SetTotalSampleToProcess,
/// [value]: pan law `PanLaw`. //TODO: Implement
/// [opt]: gain.
SetPanLaw,
/// [ptr]: `*mut VstPatchChunkInfo`. //TODO: Implement
/// [return]: -1 = bank cant be loaded, 1 = can be loaded, 0 = unsupported.
BeginLoadBank,
/// [ptr]: `*mut VstPatchChunkInfo`. //TODO: Implement
/// [return]: -1 = bank cant be loaded, 1 = can be loaded, 0 = unsupported.
BeginLoadPreset,
/// [value]: 0 if 32 bit, anything else if 64 bit.
SetPrecision,
/// [return]: number of used MIDI Inputs (1-15).
GetNumMidiInputs,
/// [return]: number of used MIDI Outputs (1-15).
GetNumMidiOutputs,
}
impl_clike!(OpCode);
/// A structure representing static plugin information.
#[derive(Clone, Debug)]
pub struct Info {
/// Plugin Name.
pub name: String,
/// Plugin Vendor.
pub vendor: String,
/// Number of different presets.
pub presets: i32,
/// Number of parameters.
pub parameters: i32,
/// Number of inputs.
pub inputs: i32,
/// Number of outputs.
pub outputs: i32,
/// Unique plugin ID. Can be registered with Steinberg to prevent conflicts with other plugins.
///
/// This ID is used to identify a plugin during save and load of a preset and project.
pub unique_id: i32,
/// Plugin version (e.g. 0001 = `v0.0.0.1`, 1283 = `v1.2.8.3`).
pub version: i32,
/// Plugin category. Possible values are found in `enums::PluginCategory`.
pub category: Category,
/// Latency of the plugin in samples.
///
/// This reports how many samples it takes for the plugin to create an output (group delay).
pub initial_delay: i32,
/// Indicates that preset data is handled in formatless chunks.
///
/// If false, host saves and restores plugin by reading/writing parameter data. If true, it is
/// up to the plugin to manage saving preset data by implementing the
/// `{get, load}_{preset, bank}_chunks()` methods. Default is `false`.
pub preset_chunks: bool,
/// Indicates whether this plugin can process f64 based `AudioBuffer` buffers.
///
/// Default is `true`.
pub f64_precision: bool,
/// If this is true, the plugin will not produce sound when the input is silence.
///
/// Default is `false`.
pub silent_when_stopped: bool,
}
impl Default for Info {
fn default() -> Info {
Info {
name: "VST".to_string(),
vendor: String::new(),
presets: 1, // default preset
parameters: 0,
inputs: 2, // Stereo in,out
outputs: 2,
unique_id: 0, // This must be changed.
version: 0001, // v0.0.0.1
category: Category::Effect,
initial_delay: 0,
preset_chunks: false,
f64_precision: true,
silent_when_stopped: false,
}
}
}
/// Features which are optionally supported by a plugin. These are queried by the host at run time.
#[derive(Debug)]
#[allow(missing_docs)]
pub enum CanDo {
SendEvents,
SendMidiEvent,
ReceiveEvents,
ReceiveMidiEvent,
ReceiveTimeInfo,
Offline,
MidiProgramNames,
Bypass,
ReceiveSysExEvent,
//Bitwig specific?
MidiSingleNoteTuningChange,
MidiKeyBasedInstrumentControl,
Other(String)
}
use std::str::FromStr;
impl FromStr for CanDo {
type Err = String;
fn from_str(s: &str) -> Result<CanDo, String> {
use self::CanDo::*;
Ok(match s {
"sendVstEvents" => SendEvents,
"sendVstMidiEvent" => SendMidiEvent,
"receiveVstEvents" => ReceiveEvents,
"receiveVstMidiEvent" => ReceiveMidiEvent,
"receiveVstTimeInfo" => ReceiveTimeInfo,
"offline" => Offline,
"midiProgramNames" => MidiProgramNames,
"bypass" => Bypass,
"receiveVstSysexEvent" => ReceiveSysExEvent,
"midiSingleNoteTuningChange" => MidiSingleNoteTuningChange,
"midiKeyBasedInstrumentControl" => MidiKeyBasedInstrumentControl,
otherwise => Other(otherwise.to_string())
})
}
}
impl Into<String> for CanDo {
fn into(self) -> String {
use self::CanDo::*;
match self {
SendEvents => "sendVstEvents".to_string(),
SendMidiEvent => "sendVstMidiEvent".to_string(),
ReceiveEvents => "receiveVstEvents".to_string(),
ReceiveMidiEvent => "receiveVstMidiEvent".to_string(),
ReceiveTimeInfo => "receiveVstTimeInfo".to_string(),
Offline => "offline".to_string(),
MidiProgramNames => "midiProgramNames".to_string(),
Bypass => "bypass".to_string(),
ReceiveSysExEvent => "receiveVstSysexEvent".to_string(),
MidiSingleNoteTuningChange => "midiSingleNoteTuningChange".to_string(),
MidiKeyBasedInstrumentControl => "midiKeyBasedInstrumentControl".to_string(),
Other(other) => other
}
}
}
/// Must be implemented by all VST plugins.
///
/// All methods except `get_info` provide a default implementation which does nothing and can be
/// safely overridden.
#[allow(unused_variables)]
pub trait Plugin {
/// This method must return an `Info` struct.
fn get_info(&self) -> Info;
/// Called during initialization to pass a `HostCallback` to the plugin.
///
/// This method can be overriden to set `host` as a field in the plugin struct.
///
/// # Example
///
/// ```
/// // ...
/// # extern crate vst2;
/// # #[macro_use] extern crate log;
/// # use vst2::plugin::{Plugin, Info};
/// use vst2::plugin::HostCallback;
///
/// # #[derive(Default)]
/// struct ExamplePlugin {
/// host: HostCallback
/// }
///
/// impl Plugin for ExamplePlugin {
/// fn new(host: HostCallback) -> ExamplePlugin {
/// ExamplePlugin {
/// host: host
/// }
/// }
///
/// fn init(&mut self) {
/// info!("loaded with host vst version: {}", self.host.vst_version());
/// }
///
/// // ...
/// # fn get_info(&self) -> Info {
/// # Info {
/// # name: "Example Plugin".to_string(),
/// # ..Default::default()
/// # }
/// # }
/// }
///
/// # fn main() {}
/// ```
fn new(host: HostCallback) -> Self where Self: Sized + Default {
Default::default()
}
/// Called when plugin is fully initialized.
fn init(&mut self) { trace!("Initialized vst plugin."); }
/// Set the current preset to the index specified by `preset`.
fn change_preset(&mut self, preset: i32) { }
/// Get the current preset index.
fn get_preset_num(&self) -> i32 { 0 }
/// Set the current preset name.
fn set_preset_name(&mut self, name: String) { }
/// Get the name of the preset at the index specified by `preset`.
fn get_preset_name(&self, preset: i32) -> String { "".to_string() }
/// Get parameter label for parameter at `index` (e.g. "db", "sec", "ms", "%").
fn get_parameter_label(&self, index: i32) -> String { "".to_string() }
/// Get the parameter value for parameter at `index` (e.g. "1.0", "150", "Plate", "Off").
fn get_parameter_text(&self, index: i32) -> String {
format!("{:.3}", self.get_parameter(index))
}
/// Get the name of parameter at `index`.
fn get_parameter_name(&self, index: i32) -> String { format!("Param {}", index) }
/// Get the value of paramater at `index`. Should be value between 0.0 and 1.0.
fn get_parameter(&self, index: i32) -> f32 { 0.0 }
/// Set the value of parameter at `index`. `value` is between 0.0 and 1.0.
fn set_parameter(&mut self, index: i32, value: f32) { }
/// Return whether parameter at `index` can be automated.
fn can_be_automated(&self, index: i32) -> bool { false }
/// Use String as input for parameter value. Used by host to provide an editable field to
/// adjust a parameter value. E.g. "100" may be interpreted as 100hz for parameter. Returns if
/// the input string was used.
fn string_to_parameter(&mut self, index: i32, text: String) -> bool { false }
/// Called when sample rate is changed by host.
fn set_sample_rate(&mut self, rate: f32) { }
/// Called when block size is changed by host.
fn set_block_size(&mut self, size: i64) { }
/// Called when plugin is turned on.
fn resume(&mut self) { }
/// Called when plugin is turned off.
fn suspend(&mut self) { }
/// Vendor specific handling.
fn vendor_specific(&mut self, index: i32, value: isize, ptr: *mut c_void, opt: f32) -> isize { 0 }
/// Return whether plugin supports specified action.
fn can_do(&self, can_do: CanDo) -> Supported {
info!("Host is asking if plugin can: {:?}.", can_do);
Supported::Maybe
}
/// Get the tail size of plugin when it is stopped. Used in offline processing as well.
fn get_tail_size(&self) -> isize { 0 }
/// Process an audio buffer containing `f32` values.
///
/// # Example
/// ```no_run
/// # use vst2::plugin::{Info, Plugin};
/// # use vst2::buffer::AudioBuffer;
/// #
/// # struct ExamplePlugin;
/// # impl Plugin for ExamplePlugin {
/// # fn get_info(&self) -> Info { Default::default() }
/// #
/// // Processor that clips samples above 0.4 or below -0.4:
/// fn process(&mut self, buffer: AudioBuffer<f32>){
/// let (inputs, mut outputs) = buffer.split();
///
/// for (channel, ibuf) in inputs.iter().enumerate() {
/// for (i, sample) in ibuf.iter().enumerate() {
/// outputs[channel][i] = if *sample > 0.4 {
/// 0.4
/// } else if *sample < -0.4 {
/// -0.4
/// } else {
/// *sample
/// };
/// }
/// }
/// }
/// # }
/// ```
fn process(&mut self, buffer: AudioBuffer<f32>) {
// For each input and output
for (input, output) in buffer.zip() {
// For each input sample and output sample in buffer
for (in_frame, out_frame) in input.into_iter().zip(output.into_iter()) {
*out_frame = *in_frame;
}
}
}
/// Process an audio buffer containing `f64` values.
///
/// # Example
/// ```no_run
/// # use vst2::plugin::{Info, Plugin};
/// # use vst2::buffer::AudioBuffer;
/// #
/// # struct ExamplePlugin;
/// # impl Plugin for ExamplePlugin {
/// # fn get_info(&self) -> Info { Default::default() }
/// #
/// // Processor that clips samples above 0.4 or below -0.4:
/// fn process_f64(&mut self, buffer: AudioBuffer<f64>){
/// let (inputs, mut outputs) = buffer.split();
///
/// for (channel, ibuf) in inputs.iter().enumerate() {
/// for (i, sample) in ibuf.iter().enumerate() {
/// outputs[channel][i] = if *sample > 0.4 {
/// 0.4
/// } else if *sample < -0.4 {
/// -0.4
/// } else {
/// *sample
/// };
/// }
/// }
/// }
/// # }
/// ```
fn process_f64(&mut self, buffer: AudioBuffer<f64>) {
// For each input and output
for (input, output) in buffer.zip() {
// For each input sample and output sample in buffer
for (in_frame, out_frame) in input.into_iter().zip(output.into_iter()) {
*out_frame = *in_frame;
}
}
}
/// Handle incoming events sent from the host.
///
/// This is always called before the start of `process` or `process_f64`.
fn process_events(&mut self, events: Vec<Event>) {}
/// Return handle to plugin editor if supported.
fn get_editor(&mut self) -> Option<&mut Editor> { None }
/// If `preset_chunks` is set to true in plugin info, this should return the raw chunk data for
/// the current preset.
fn get_preset_data(&mut self) -> Vec<u8> { Vec::new() }
/// If `preset_chunks` is set to true in plugin info, this should return the raw chunk data for
/// the current plugin bank.
fn get_bank_data(&mut self) -> Vec<u8> { Vec::new() }
/// If `preset_chunks` is set to true in plugin info, this should load a preset from the given
/// chunk data.
fn load_preset_data(&mut self, data: &[u8]) {}
/// If `preset_chunks` is set to true in plugin info, this should load a preset bank from the
/// given chunk data.
fn load_bank_data(&mut self, data: &[u8]) {}
/// Get information about an input channel. Only used by some hosts.
fn get_input_info(&self, input: i32) -> ChannelInfo {
ChannelInfo::new(format!("Input channel {}", input),
Some(format!("In {}", input)),
true, None)
}
/// Get information about an output channel. Only used by some hosts.
fn get_output_info(&self, output: i32) -> ChannelInfo {
ChannelInfo::new(format!("Output channel {}", output),
Some(format!("Out {}", output)),
true, None)
}
}
/// A reference to the host which allows the plugin to call back and access information.
///
/// # Panics
///
/// All methods in this struct will panic if the plugin has not yet been initialized. In practice,
/// this can only occur if the plugin queries the host for information when `Default::default()` is
/// called.
///
/// ```should_panic
/// # use vst2::plugin::{Info, Plugin, HostCallback};
/// struct ExamplePlugin;
///
/// impl Default for ExamplePlugin {
/// fn default() -> ExamplePlugin {
/// // Will panic, don't do this. If needed, you can query
/// // the host during initialization via Vst::new()
/// let host: HostCallback = Default::default();
/// let version = host.vst_version();
///
/// // ...
/// # ExamplePlugin
/// }
/// }
/// #
/// # impl Plugin for ExamplePlugin {
/// # fn get_info(&self) -> Info { Default::default() }
/// # }
/// # fn main() { let plugin: ExamplePlugin = Default::default(); }
/// ```
pub struct HostCallback {
callback: Option<HostCallbackProc>,
effect: *mut AEffect,
}
/// `HostCallback` implements `Default` so that the plugin can implement `Default` and have a
/// `HostCallback` field.
impl Default for HostCallback {
fn default() -> HostCallback {
HostCallback {
callback: None,
effect: ptr::null_mut(),
}
}
}
impl HostCallback {
/// Wrap callback in a function to avoid using fn pointer notation.
#[doc(hidden)]
fn callback(&self,
effect: *mut AEffect,
opcode: host::OpCode,
index: i32,
value: isize,
ptr: *mut c_void,
opt: f32)
-> isize {
let callback = self.callback.unwrap_or_else(|| panic!("Host not yet initialized."));
callback(effect, opcode.into(), index, value, ptr, opt)
}
/// Check whether the plugin has been initialized.
#[doc(hidden)]
fn is_effect_valid(&self) -> bool {
// Check whether `effect` points to a valid AEffect struct
unsafe { *mem::transmute::<*mut AEffect, *mut i32>(self.effect) == VST_MAGIC }
}
/// Create a new Host structure wrapping a host callback.
#[doc(hidden)]
pub fn wrap(callback: HostCallbackProc, effect: *mut AEffect) -> HostCallback {
HostCallback {
callback: Some(callback),
effect: effect,
}
}
/// Get the VST API version supported by the host e.g. `2400 = VST 2.4`.
pub fn vst_version(&self) -> i32 {
self.callback(self.effect, host::OpCode::Version,
0, 0, ptr::null_mut(), 0.0) as i32
}
fn read_string(&self, opcode: host::OpCode, max: usize) -> String {
self.read_string_param(opcode, 0, 0, 0.0, max)
}
fn read_string_param(&self,
opcode: host::OpCode,
index: i32,
value: isize,
opt: f32,
max: usize)
-> String {
let mut buf = vec![0; max];
self.callback(self.effect, opcode, index, value, buf.as_mut_ptr() as *mut c_void, opt);
String::from_utf8_lossy(&buf).chars().take_while(|c| *c != '\0').collect()
}
}
impl Host for HostCallback {
fn automate(&mut self, index: i32, value: f32) {
if self.is_effect_valid() { // TODO: Investigate removing this check, should be up to host
self.callback(self.effect, host::OpCode::Automate,
index, 0, ptr::null_mut(), value);
}
}
fn get_plugin_id(&self) -> i32 {
self.callback(self.effect, host::OpCode::CurrentId,
0, 0, ptr::null_mut(), 0.0) as i32
}
fn idle(&self) {
self.callback(self.effect, host::OpCode::Idle,
0, 0, ptr::null_mut(), 0.0);
}
fn get_info(&self) -> (isize, String, String) {
use api::consts::*;
let version = self.callback(self.effect, host::OpCode::CurrentId, 0, 0, ptr::null_mut(), 0.0) as isize;
let vendor_name = self.read_string(host::OpCode::GetVendorString, MAX_VENDOR_STR_LEN);
let product_name = self.read_string(host::OpCode::GetProductString, MAX_PRODUCT_STR_LEN);
(version, vendor_name, product_name)
}
fn process_events(&mut self, events: Vec<Event>) {
use interfaces;
interfaces::process_events(
events,
|ptr| {
self.callback(
self.effect,
host::OpCode::ProcessEvents,
0,
0,
ptr,
0.0
);
}
);
}
}
#[cfg(test)]
mod tests {
use std::ptr;
use plugin;
/// Create a plugin instance.
///
/// This is a macro to allow you to specify attributes on the created struct.
macro_rules! make_plugin {
($($attr:meta) *) => {
use libc::c_void;
use main;
use api::AEffect;
use host::{Host, OpCode};
use plugin::{HostCallback, Info, Plugin};
$(#[$attr]) *
struct TestPlugin {
host: HostCallback
}
impl Plugin for TestPlugin {
fn get_info(&self) -> Info {
Info {
name: "Test Plugin".to_string(),
..Default::default()
}
}
fn new(host: HostCallback) -> TestPlugin {
TestPlugin {
host: host
}
}
fn init(&mut self) {
info!("Loaded with host vst version: {}", self.host.vst_version());
assert_eq!(2400, self.host.vst_version());
assert_eq!(9876, self.host.get_plugin_id());
// Callback will assert these.
self.host.automate(123, 12.3);
self.host.idle();
}
}
#[allow(dead_code)]
fn instance() -> *mut AEffect {
fn host_callback(_effect: *mut AEffect,
opcode: i32,
index: i32,
_value: isize,
_ptr: *mut c_void,
opt: f32)
-> isize {
let opcode = OpCode::from(opcode);
match opcode {
OpCode::Automate => {
assert_eq!(index, 123);
assert_eq!(opt, 12.3);
0
}
OpCode::Version => 2400,
OpCode::CurrentId => 9876,
OpCode::Idle => 0,
_ => 0
}
}
main::<TestPlugin>(host_callback)
}
}
}
make_plugin!(derive(Default));
#[test]
#[should_panic]
fn null_panic() {
make_plugin!(/* no `derive(Default)` */);
impl Default for TestPlugin {
fn default() -> TestPlugin {
let plugin = TestPlugin { host: Default::default() };
// Should panic
let version = plugin.host.vst_version();
info!("Loaded with host vst version: {}", version);
plugin
}
}
TestPlugin::default();
}
#[test]
fn host_callbacks() {
let aeffect = instance();
(unsafe { (*aeffect).dispatcher })(aeffect, plugin::OpCode::Initialize.into(),
0, 0, ptr::null_mut(), 0.0);
}
}
Document `process_events` on `HostCallback`
//! Plugin specific structures.
use std::{mem, ptr};
use libc::c_void;
use channels::ChannelInfo;
use host::{self, Host};
use api::{AEffect, HostCallbackProc, Supported};
use api::consts::VST_MAGIC;
use buffer::AudioBuffer;
use editor::Editor;
use event::Event;
/// Plugin type. Generally either Effect or Synth.
///
/// Other types are not necessary to build a plugin and are only useful for the host to categorize
/// the plugin.
#[repr(usize)]
#[derive(Clone, Copy, Debug)]
pub enum Category {
/// Unknown / not implemented
Unknown,
/// Any effect
Effect,
/// VST instrument
Synth,
/// Scope, tuner, spectrum analyser, etc.
Analysis,
/// Dynamics, etc.
Mastering,
/// Panners, etc.
Spacializer,
/// Delays and Reverbs
RoomFx,
/// Dedicated surround processor.
SurroundFx,
/// Denoiser, etc.
Restoration,
/// Offline processing.
OfflineProcess,
/// Contains other plugins.
Shell,
/// Tone generator, etc.
Generator
}
impl_clike!(Category);
#[repr(usize)]
#[derive(Clone, Copy, Debug)]
#[doc(hidden)]
pub enum OpCode {
/// Called when plugin is initialized.
Initialize,
/// Called when plugin is being shut down.
Shutdown,
/// [value]: preset number to change to.
ChangePreset,
/// [return]: current preset number.
GetCurrentPresetNum,
/// [ptr]: char array with new preset name, limited to `consts::MAX_PRESET_NAME_LEN`.
SetCurrentPresetName,
/// [ptr]: char buffer for current preset name, limited to `consts::MAX_PRESET_NAME_LEN`.
GetCurrentPresetName,
/// [index]: parameter
/// [ptr]: char buffer, limited to `consts::MAX_PARAM_STR_LEN` (e.g. "db", "ms", etc)
GetParameterLabel,
/// [index]: paramter
/// [ptr]: char buffer, limited to `consts::MAX_PARAM_STR_LEN` (e.g. "0.5", "ROOM", etc).
GetParameterDisplay,
/// [index]: parameter
/// [ptr]: char buffer, limited to `consts::MAX_PARAM_STR_LEN` (e.g. "Release", "Gain")
GetParameterName,
/// Deprecated.
_GetVu,
/// [opt]: new sample rate.
SetSampleRate,
/// [value]: new maximum block size.
SetBlockSize,
/// [value]: 1 when plugin enabled, 0 when disabled.
StateChanged,
/// [ptr]: Rect** receiving pointer to editor size.
EditorGetRect,
/// [ptr]: system dependent window pointer, eg HWND on Windows.
EditorOpen,
/// Close editor. No arguments.
EditorClose,
/// Deprecated.
_EditorDraw,
/// Deprecated.
_EditorMouse,
/// Deprecated.
_EditorKey,
/// Idle call from host.
EditorIdle,
/// Deprecated.
_EditorTop,
/// Deprecated.
_EditorSleep,
/// Deprecated.
_EditorIdentify,
/// [ptr]: pointer for chunk data address (void**).
/// [index]: 0 for bank, 1 for program
GetData,
/// [ptr]: data (void*)
/// [value]: data size in bytes
/// [index]: 0 for bank, 1 for program
SetData,
/// [ptr]: VstEvents* TODO: Events
ProcessEvents,
/// [index]: param index
/// [return]: 1=true, 0=false
CanBeAutomated,
/// [index]: param index
/// [ptr]: parameter string
/// [return]: true for success
StringToParameter,
/// Deprecated.
_GetNumCategories,
/// [index]: program name
/// [ptr]: char buffer for name, limited to `consts::MAX_PRESET_NAME_LEN`
/// [return]: true for success
GetPresetName,
/// Deprecated.
_CopyPreset,
/// Deprecated.
_ConnectIn,
/// Deprecated.
_ConnectOut,
/// [index]: input index
/// [ptr]: `VstPinProperties`
/// [return]: 1 if supported
GetInputInfo,
/// [index]: output index
/// [ptr]: `VstPinProperties`
/// [return]: 1 if supported
GetOutputInfo,
/// [return]: `PluginCategory` category.
GetCategory,
/// Deprecated.
_GetCurrentPosition,
/// Deprecated.
_GetDestinationBuffer,
/// [ptr]: `VstAudioFile` array
/// [value]: count
/// [index]: start flag
OfflineNotify,
/// [ptr]: `VstOfflineTask` array
/// [value]: count
OfflinePrepare,
/// [ptr]: `VstOfflineTask` array
/// [value]: count
OfflineRun,
/// [ptr]: `VstVariableIo`
/// [use]: used for variable I/O processing (offline e.g. timestretching)
ProcessVarIo,
/// TODO: implement
/// [value]: input `*mut VstSpeakerArrangement`.
/// [ptr]: output `*mut VstSpeakerArrangement`.
SetSpeakerArrangement,
/// Deprecated.
_SetBlocksizeAndSampleRate,
/// Soft bypass (automatable).
/// [value]: 1 = bypass, 0 = nobypass.
SoftBypass,
// [ptr]: buffer for effect name, limited to `kVstMaxEffectNameLen`
GetEffectName,
/// Deprecated.
_GetErrorText,
/// [ptr]: buffer for vendor name, limited to `consts::MAX_VENDOR_STR_LEN`.
GetVendorName,
/// [ptr]: buffer for product name, limited to `consts::MAX_PRODUCT_STR_LEN`.
GetProductName,
/// [return]: vendor specific version.
GetVendorVersion,
/// no definition, vendor specific.
VendorSpecific,
/// [ptr]: "Can do" string.
/// [return]: 1 = yes, 0 = maybe, -1 = no.
CanDo,
/// [return]: tail size (e.g. reverb time). 0 is defualt, 1 means no tail.
GetTailSize,
/// Deprecated.
_Idle,
/// Deprecated.
_GetIcon,
/// Deprecated.
_SetVewPosition,
/// [index]: param index
/// [ptr]: `*mut VstParamInfo` //TODO: Implement
/// [return]: 1 if supported
GetParamInfo,
/// Deprecated.
_KeysRequired,
/// [return]: 2400 for vst 2.4.
GetApiVersion,
/// [index]: ASCII char.
/// [value]: `Key` keycode.
/// [opt]: `flags::modifier_key` bitmask.
/// [return]: 1 if used.
EditorKeyDown,
/// [index]: ASCII char.
/// [value]: `Key` keycode.
/// [opt]: `flags::modifier_key` bitmask.
/// [return]: 1 if used.
EditorKeyUp,
/// [value]: 0 = circular, 1 = circular relative, 2 = linear.
EditorSetKnobMode,
/// [index]: MIDI channel.
/// [ptr]: `*mut MidiProgramName`. //TODO: Implement
/// [return]: number of used programs, 0 = unsupported.
GetMidiProgramName,
/// [index]: MIDI channel.
/// [ptr]: `*mut MidiProgramName`. //TODO: Implement
/// [return]: index of current program.
GetCurrentMidiProgram,
/// [index]: MIDI channel.
/// [ptr]: `*mut MidiProgramCategory`. //TODO: Implement
/// [return]: number of used categories.
GetMidiProgramCategory,
/// [index]: MIDI channel.
/// [return]: 1 if `MidiProgramName` or `MidiKeyName` has changed. //TODO: Implement
HasMidiProgramsChanged,
/// [index]: MIDI channel.
/// [ptr]: `*mut MidiKeyName`. //TODO: Implement
/// [return]: 1 = supported 0 = not.
GetMidiKeyName,
/// Called before a preset is loaded.
BeginSetPreset,
/// Called after a preset is loaded.
EndSetPreset,
/// [value]: inputs `*mut VstSpeakerArrangement` //TODO: Implement
/// [ptr]: Outputs `*mut VstSpeakerArrangement`
GetSpeakerArrangement,
/// [ptr]: buffer for plugin name, limited to `consts::MAX_PRODUCT_STR_LEN`.
/// [return]: next plugin's uniqueID.
ShellGetNextPlugin,
/// No args. Called once before start of process call. This indicates that the process call
/// will be interrupted (e.g. Host reconfiguration or bypass when plugin doesn't support
/// SoftBypass)
StartProcess,
/// No arguments. Called after stop of process call.
StopProcess,
/// [value]: number of samples to process. Called in offline mode before process.
SetTotalSampleToProcess,
/// [value]: pan law `PanLaw`. //TODO: Implement
/// [opt]: gain.
SetPanLaw,
/// [ptr]: `*mut VstPatchChunkInfo`. //TODO: Implement
/// [return]: -1 = bank cant be loaded, 1 = can be loaded, 0 = unsupported.
BeginLoadBank,
/// [ptr]: `*mut VstPatchChunkInfo`. //TODO: Implement
/// [return]: -1 = bank cant be loaded, 1 = can be loaded, 0 = unsupported.
BeginLoadPreset,
/// [value]: 0 if 32 bit, anything else if 64 bit.
SetPrecision,
/// [return]: number of used MIDI Inputs (1-15).
GetNumMidiInputs,
/// [return]: number of used MIDI Outputs (1-15).
GetNumMidiOutputs,
}
impl_clike!(OpCode);
/// A structure representing static plugin information.
#[derive(Clone, Debug)]
pub struct Info {
/// Plugin Name.
pub name: String,
/// Plugin Vendor.
pub vendor: String,
/// Number of different presets.
pub presets: i32,
/// Number of parameters.
pub parameters: i32,
/// Number of inputs.
pub inputs: i32,
/// Number of outputs.
pub outputs: i32,
/// Unique plugin ID. Can be registered with Steinberg to prevent conflicts with other plugins.
///
/// This ID is used to identify a plugin during save and load of a preset and project.
pub unique_id: i32,
/// Plugin version (e.g. 0001 = `v0.0.0.1`, 1283 = `v1.2.8.3`).
pub version: i32,
/// Plugin category. Possible values are found in `enums::PluginCategory`.
pub category: Category,
/// Latency of the plugin in samples.
///
/// This reports how many samples it takes for the plugin to create an output (group delay).
pub initial_delay: i32,
/// Indicates that preset data is handled in formatless chunks.
///
/// If false, host saves and restores plugin by reading/writing parameter data. If true, it is
/// up to the plugin to manage saving preset data by implementing the
/// `{get, load}_{preset, bank}_chunks()` methods. Default is `false`.
pub preset_chunks: bool,
/// Indicates whether this plugin can process f64 based `AudioBuffer` buffers.
///
/// Default is `true`.
pub f64_precision: bool,
/// If this is true, the plugin will not produce sound when the input is silence.
///
/// Default is `false`.
pub silent_when_stopped: bool,
}
impl Default for Info {
fn default() -> Info {
Info {
name: "VST".to_string(),
vendor: String::new(),
presets: 1, // default preset
parameters: 0,
inputs: 2, // Stereo in,out
outputs: 2,
unique_id: 0, // This must be changed.
version: 0001, // v0.0.0.1
category: Category::Effect,
initial_delay: 0,
preset_chunks: false,
f64_precision: true,
silent_when_stopped: false,
}
}
}
/// Features which are optionally supported by a plugin. These are queried by the host at run time.
#[derive(Debug)]
#[allow(missing_docs)]
pub enum CanDo {
SendEvents,
SendMidiEvent,
ReceiveEvents,
ReceiveMidiEvent,
ReceiveTimeInfo,
Offline,
MidiProgramNames,
Bypass,
ReceiveSysExEvent,
//Bitwig specific?
MidiSingleNoteTuningChange,
MidiKeyBasedInstrumentControl,
Other(String)
}
use std::str::FromStr;
impl FromStr for CanDo {
type Err = String;
fn from_str(s: &str) -> Result<CanDo, String> {
use self::CanDo::*;
Ok(match s {
"sendVstEvents" => SendEvents,
"sendVstMidiEvent" => SendMidiEvent,
"receiveVstEvents" => ReceiveEvents,
"receiveVstMidiEvent" => ReceiveMidiEvent,
"receiveVstTimeInfo" => ReceiveTimeInfo,
"offline" => Offline,
"midiProgramNames" => MidiProgramNames,
"bypass" => Bypass,
"receiveVstSysexEvent" => ReceiveSysExEvent,
"midiSingleNoteTuningChange" => MidiSingleNoteTuningChange,
"midiKeyBasedInstrumentControl" => MidiKeyBasedInstrumentControl,
otherwise => Other(otherwise.to_string())
})
}
}
impl Into<String> for CanDo {
fn into(self) -> String {
use self::CanDo::*;
match self {
SendEvents => "sendVstEvents".to_string(),
SendMidiEvent => "sendVstMidiEvent".to_string(),
ReceiveEvents => "receiveVstEvents".to_string(),
ReceiveMidiEvent => "receiveVstMidiEvent".to_string(),
ReceiveTimeInfo => "receiveVstTimeInfo".to_string(),
Offline => "offline".to_string(),
MidiProgramNames => "midiProgramNames".to_string(),
Bypass => "bypass".to_string(),
ReceiveSysExEvent => "receiveVstSysexEvent".to_string(),
MidiSingleNoteTuningChange => "midiSingleNoteTuningChange".to_string(),
MidiKeyBasedInstrumentControl => "midiKeyBasedInstrumentControl".to_string(),
Other(other) => other
}
}
}
/// Must be implemented by all VST plugins.
///
/// All methods except `get_info` provide a default implementation which does nothing and can be
/// safely overridden.
#[allow(unused_variables)]
pub trait Plugin {
/// This method must return an `Info` struct.
fn get_info(&self) -> Info;
/// Called during initialization to pass a `HostCallback` to the plugin.
///
/// This method can be overriden to set `host` as a field in the plugin struct.
///
/// # Example
///
/// ```
/// // ...
/// # extern crate vst2;
/// # #[macro_use] extern crate log;
/// # use vst2::plugin::{Plugin, Info};
/// use vst2::plugin::HostCallback;
///
/// # #[derive(Default)]
/// struct ExamplePlugin {
/// host: HostCallback
/// }
///
/// impl Plugin for ExamplePlugin {
/// fn new(host: HostCallback) -> ExamplePlugin {
/// ExamplePlugin {
/// host: host
/// }
/// }
///
/// fn init(&mut self) {
/// info!("loaded with host vst version: {}", self.host.vst_version());
/// }
///
/// // ...
/// # fn get_info(&self) -> Info {
/// # Info {
/// # name: "Example Plugin".to_string(),
/// # ..Default::default()
/// # }
/// # }
/// }
///
/// # fn main() {}
/// ```
fn new(host: HostCallback) -> Self where Self: Sized + Default {
Default::default()
}
/// Called when plugin is fully initialized.
fn init(&mut self) { trace!("Initialized vst plugin."); }
/// Set the current preset to the index specified by `preset`.
fn change_preset(&mut self, preset: i32) { }
/// Get the current preset index.
fn get_preset_num(&self) -> i32 { 0 }
/// Set the current preset name.
fn set_preset_name(&mut self, name: String) { }
/// Get the name of the preset at the index specified by `preset`.
fn get_preset_name(&self, preset: i32) -> String { "".to_string() }
/// Get parameter label for parameter at `index` (e.g. "db", "sec", "ms", "%").
fn get_parameter_label(&self, index: i32) -> String { "".to_string() }
/// Get the parameter value for parameter at `index` (e.g. "1.0", "150", "Plate", "Off").
fn get_parameter_text(&self, index: i32) -> String {
format!("{:.3}", self.get_parameter(index))
}
/// Get the name of parameter at `index`.
fn get_parameter_name(&self, index: i32) -> String { format!("Param {}", index) }
/// Get the value of paramater at `index`. Should be value between 0.0 and 1.0.
fn get_parameter(&self, index: i32) -> f32 { 0.0 }
/// Set the value of parameter at `index`. `value` is between 0.0 and 1.0.
fn set_parameter(&mut self, index: i32, value: f32) { }
/// Return whether parameter at `index` can be automated.
fn can_be_automated(&self, index: i32) -> bool { false }
/// Use String as input for parameter value. Used by host to provide an editable field to
/// adjust a parameter value. E.g. "100" may be interpreted as 100hz for parameter. Returns if
/// the input string was used.
fn string_to_parameter(&mut self, index: i32, text: String) -> bool { false }
/// Called when sample rate is changed by host.
fn set_sample_rate(&mut self, rate: f32) { }
/// Called when block size is changed by host.
fn set_block_size(&mut self, size: i64) { }
/// Called when plugin is turned on.
fn resume(&mut self) { }
/// Called when plugin is turned off.
fn suspend(&mut self) { }
/// Vendor specific handling.
fn vendor_specific(&mut self, index: i32, value: isize, ptr: *mut c_void, opt: f32) -> isize { 0 }
/// Return whether plugin supports specified action.
fn can_do(&self, can_do: CanDo) -> Supported {
info!("Host is asking if plugin can: {:?}.", can_do);
Supported::Maybe
}
/// Get the tail size of plugin when it is stopped. Used in offline processing as well.
fn get_tail_size(&self) -> isize { 0 }
/// Process an audio buffer containing `f32` values.
///
/// # Example
/// ```no_run
/// # use vst2::plugin::{Info, Plugin};
/// # use vst2::buffer::AudioBuffer;
/// #
/// # struct ExamplePlugin;
/// # impl Plugin for ExamplePlugin {
/// # fn get_info(&self) -> Info { Default::default() }
/// #
/// // Processor that clips samples above 0.4 or below -0.4:
/// fn process(&mut self, buffer: AudioBuffer<f32>){
/// let (inputs, mut outputs) = buffer.split();
///
/// for (channel, ibuf) in inputs.iter().enumerate() {
/// for (i, sample) in ibuf.iter().enumerate() {
/// outputs[channel][i] = if *sample > 0.4 {
/// 0.4
/// } else if *sample < -0.4 {
/// -0.4
/// } else {
/// *sample
/// };
/// }
/// }
/// }
/// # }
/// ```
fn process(&mut self, buffer: AudioBuffer<f32>) {
// For each input and output
for (input, output) in buffer.zip() {
// For each input sample and output sample in buffer
for (in_frame, out_frame) in input.into_iter().zip(output.into_iter()) {
*out_frame = *in_frame;
}
}
}
/// Process an audio buffer containing `f64` values.
///
/// # Example
/// ```no_run
/// # use vst2::plugin::{Info, Plugin};
/// # use vst2::buffer::AudioBuffer;
/// #
/// # struct ExamplePlugin;
/// # impl Plugin for ExamplePlugin {
/// # fn get_info(&self) -> Info { Default::default() }
/// #
/// // Processor that clips samples above 0.4 or below -0.4:
/// fn process_f64(&mut self, buffer: AudioBuffer<f64>){
/// let (inputs, mut outputs) = buffer.split();
///
/// for (channel, ibuf) in inputs.iter().enumerate() {
/// for (i, sample) in ibuf.iter().enumerate() {
/// outputs[channel][i] = if *sample > 0.4 {
/// 0.4
/// } else if *sample < -0.4 {
/// -0.4
/// } else {
/// *sample
/// };
/// }
/// }
/// }
/// # }
/// ```
fn process_f64(&mut self, buffer: AudioBuffer<f64>) {
// For each input and output
for (input, output) in buffer.zip() {
// For each input sample and output sample in buffer
for (in_frame, out_frame) in input.into_iter().zip(output.into_iter()) {
*out_frame = *in_frame;
}
}
}
/// Handle incoming events sent from the host.
///
/// This is always called before the start of `process` or `process_f64`.
fn process_events(&mut self, events: Vec<Event>) {}
/// Return handle to plugin editor if supported.
fn get_editor(&mut self) -> Option<&mut Editor> { None }
/// If `preset_chunks` is set to true in plugin info, this should return the raw chunk data for
/// the current preset.
fn get_preset_data(&mut self) -> Vec<u8> { Vec::new() }
/// If `preset_chunks` is set to true in plugin info, this should return the raw chunk data for
/// the current plugin bank.
fn get_bank_data(&mut self) -> Vec<u8> { Vec::new() }
/// If `preset_chunks` is set to true in plugin info, this should load a preset from the given
/// chunk data.
fn load_preset_data(&mut self, data: &[u8]) {}
/// If `preset_chunks` is set to true in plugin info, this should load a preset bank from the
/// given chunk data.
fn load_bank_data(&mut self, data: &[u8]) {}
/// Get information about an input channel. Only used by some hosts.
fn get_input_info(&self, input: i32) -> ChannelInfo {
ChannelInfo::new(format!("Input channel {}", input),
Some(format!("In {}", input)),
true, None)
}
/// Get information about an output channel. Only used by some hosts.
fn get_output_info(&self, output: i32) -> ChannelInfo {
ChannelInfo::new(format!("Output channel {}", output),
Some(format!("Out {}", output)),
true, None)
}
}
/// A reference to the host which allows the plugin to call back and access information.
///
/// # Panics
///
/// All methods in this struct will panic if the plugin has not yet been initialized. In practice,
/// this can only occur if the plugin queries the host for information when `Default::default()` is
/// called.
///
/// ```should_panic
/// # use vst2::plugin::{Info, Plugin, HostCallback};
/// struct ExamplePlugin;
///
/// impl Default for ExamplePlugin {
/// fn default() -> ExamplePlugin {
/// // Will panic, don't do this. If needed, you can query
/// // the host during initialization via Vst::new()
/// let host: HostCallback = Default::default();
/// let version = host.vst_version();
///
/// // ...
/// # ExamplePlugin
/// }
/// }
/// #
/// # impl Plugin for ExamplePlugin {
/// # fn get_info(&self) -> Info { Default::default() }
/// # }
/// # fn main() { let plugin: ExamplePlugin = Default::default(); }
/// ```
pub struct HostCallback {
callback: Option<HostCallbackProc>,
effect: *mut AEffect,
}
/// `HostCallback` implements `Default` so that the plugin can implement `Default` and have a
/// `HostCallback` field.
impl Default for HostCallback {
fn default() -> HostCallback {
HostCallback {
callback: None,
effect: ptr::null_mut(),
}
}
}
impl HostCallback {
/// Wrap callback in a function to avoid using fn pointer notation.
#[doc(hidden)]
fn callback(&self,
effect: *mut AEffect,
opcode: host::OpCode,
index: i32,
value: isize,
ptr: *mut c_void,
opt: f32)
-> isize {
let callback = self.callback.unwrap_or_else(|| panic!("Host not yet initialized."));
callback(effect, opcode.into(), index, value, ptr, opt)
}
/// Check whether the plugin has been initialized.
#[doc(hidden)]
fn is_effect_valid(&self) -> bool {
// Check whether `effect` points to a valid AEffect struct
unsafe { *mem::transmute::<*mut AEffect, *mut i32>(self.effect) == VST_MAGIC }
}
/// Create a new Host structure wrapping a host callback.
#[doc(hidden)]
pub fn wrap(callback: HostCallbackProc, effect: *mut AEffect) -> HostCallback {
HostCallback {
callback: Some(callback),
effect: effect,
}
}
/// Get the VST API version supported by the host e.g. `2400 = VST 2.4`.
pub fn vst_version(&self) -> i32 {
self.callback(self.effect, host::OpCode::Version,
0, 0, ptr::null_mut(), 0.0) as i32
}
fn read_string(&self, opcode: host::OpCode, max: usize) -> String {
self.read_string_param(opcode, 0, 0, 0.0, max)
}
fn read_string_param(&self,
opcode: host::OpCode,
index: i32,
value: isize,
opt: f32,
max: usize)
-> String {
let mut buf = vec![0; max];
self.callback(self.effect, opcode, index, value, buf.as_mut_ptr() as *mut c_void, opt);
String::from_utf8_lossy(&buf).chars().take_while(|c| *c != '\0').collect()
}
}
impl Host for HostCallback {
fn automate(&mut self, index: i32, value: f32) {
if self.is_effect_valid() { // TODO: Investigate removing this check, should be up to host
self.callback(self.effect, host::OpCode::Automate,
index, 0, ptr::null_mut(), value);
}
}
fn get_plugin_id(&self) -> i32 {
self.callback(self.effect, host::OpCode::CurrentId,
0, 0, ptr::null_mut(), 0.0) as i32
}
fn idle(&self) {
self.callback(self.effect, host::OpCode::Idle,
0, 0, ptr::null_mut(), 0.0);
}
fn get_info(&self) -> (isize, String, String) {
use api::consts::*;
let version = self.callback(self.effect, host::OpCode::CurrentId, 0, 0, ptr::null_mut(), 0.0) as isize;
let vendor_name = self.read_string(host::OpCode::GetVendorString, MAX_VENDOR_STR_LEN);
let product_name = self.read_string(host::OpCode::GetProductString, MAX_PRODUCT_STR_LEN);
(version, vendor_name, product_name)
}
/// Send events to the host.
///
/// This should only be called within [`process`] or [`process_f64`]. Calling `process_events`
/// anywhere else is undefined behaviour and may crash some hosts.
///
/// [`process`]: trait.Plugin.html#method.process
/// [`process_f64`]: trait.Plugin.html#method.process_f64
fn process_events(&mut self, events: Vec<Event>) {
use interfaces;
interfaces::process_events(
events,
|ptr| {
self.callback(
self.effect,
host::OpCode::ProcessEvents,
0,
0,
ptr,
0.0
);
}
);
}
}
#[cfg(test)]
mod tests {
use std::ptr;
use plugin;
/// Create a plugin instance.
///
/// This is a macro to allow you to specify attributes on the created struct.
macro_rules! make_plugin {
($($attr:meta) *) => {
use libc::c_void;
use main;
use api::AEffect;
use host::{Host, OpCode};
use plugin::{HostCallback, Info, Plugin};
$(#[$attr]) *
struct TestPlugin {
host: HostCallback
}
impl Plugin for TestPlugin {
fn get_info(&self) -> Info {
Info {
name: "Test Plugin".to_string(),
..Default::default()
}
}
fn new(host: HostCallback) -> TestPlugin {
TestPlugin {
host: host
}
}
fn init(&mut self) {
info!("Loaded with host vst version: {}", self.host.vst_version());
assert_eq!(2400, self.host.vst_version());
assert_eq!(9876, self.host.get_plugin_id());
// Callback will assert these.
self.host.automate(123, 12.3);
self.host.idle();
}
}
#[allow(dead_code)]
fn instance() -> *mut AEffect {
fn host_callback(_effect: *mut AEffect,
opcode: i32,
index: i32,
_value: isize,
_ptr: *mut c_void,
opt: f32)
-> isize {
let opcode = OpCode::from(opcode);
match opcode {
OpCode::Automate => {
assert_eq!(index, 123);
assert_eq!(opt, 12.3);
0
}
OpCode::Version => 2400,
OpCode::CurrentId => 9876,
OpCode::Idle => 0,
_ => 0
}
}
main::<TestPlugin>(host_callback)
}
}
}
make_plugin!(derive(Default));
#[test]
#[should_panic]
fn null_panic() {
make_plugin!(/* no `derive(Default)` */);
impl Default for TestPlugin {
fn default() -> TestPlugin {
let plugin = TestPlugin { host: Default::default() };
// Should panic
let version = plugin.host.vst_version();
info!("Loaded with host vst version: {}", version);
plugin
}
}
TestPlugin::default();
}
#[test]
fn host_callbacks() {
let aeffect = instance();
(unsafe { (*aeffect).dispatcher })(aeffect, plugin::OpCode::Initialize.into(),
0, 0, ptr::null_mut(), 0.0);
}
}
|
extern crate nalgebra as na;
#[allow(unused_imports)]
use na::{Vector,Dim,Real,Vector4,Vector3,Vector2,U1,Matrix,DVector,Dynamic,VecStorage};
use na::storage::{Storage};
use std::cmp;
enum ConvolveMode{
Full,
Valid,
Same
}
fn convolve_full<R: Real, D: Dim, E: Dim, S: Storage<R, D>, Q: Storage<R, E>>(
vector : Vector<R,D,S>,
kernel : Vector<R,E,Q>
) -> Matrix<R, Dynamic, U1, VecStorage<R, Dynamic, U1>>
{
let vec = vector.len();
let ker = kernel.len();
let len = vec + ker - 1;
let v = vec as i8;
let k = ker as i8;
let l = len as i8;
let mut conv = DVector::<R>::zeros(len);
for i in 0..l{
let u_i = cmp::max(0, i - k);
let u_f = cmp::min(i, v - 1);
if u_i == u_f{
conv[i as usize] += vector[u_i as usize] * kernel[(i - u_i) as usize];
}
else{
for u in u_i..(u_f+1){
if i - u < k{
conv[i as usize] += vector[u as usize] * kernel[(i - u ) as usize];
}
}
}
}
conv
}
fn convolve_valid<R: Real, D: Dim, E: Dim, S: Storage<R, D>, Q: Storage<R, E>>(
vector : Vector<R,D,S>,
kernel : Vector<R,E,Q>
) -> Matrix<R, Dynamic, U1, VecStorage<R, Dynamic, U1>>
{
let vec = vector.len();
let ker = kernel.len();
let len = vec - ker + 1;
let mut conv = DVector::<R>::zeros(len);
for i in 0..len {
for j in 0..ker {
conv[i] += vector[i + j] * kernel[ker - j - 1];
}
}
conv
}
fn convolve_same<R: Real, D: Dim, E: Dim, S: Storage<R, D>, Q: Storage<R, E>>(
vector : Vector<R,D,S>,
kernel : Vector<R,E,Q>
) -> Matrix<R, Dynamic, U1, VecStorage<R, Dynamic, U1>>
{
let vec = vector.len();
let ker = kernel.len();
let len = vec + ker - 1;
let v = vec as i8;
let k = ker as i8;
let l = len as i8;
let mut conv = DVector::<R>::zeros(len);
for i in 0..l {
let u_i = cmp::max(0, i - k);
let u_f = cmp::min(i, v - 1);
if u_i == u_f {
conv[i as usize] += vector[u_i as usize] * kernel[(i - u_i) as usize];
}
else{
for u in u_i..(u_f+1){
if i - u < k{
conv[i as usize] += vector[u as usize] * kernel[(i - u ) as usize];
}
}
}
}
conv
}
fn convolve<R: Real, D: Dim, E: Dim, S: Storage<R, D>, Q: Storage<R, E>>(
vector : Vector<R,D,S>,
kernel : Vector<R,E,Q>,
mode : Option<ConvolveMode>
) -> Matrix<R, Dynamic, U1, VecStorage<R, Dynamic, U1>>
{
//
// vector is the vector, Kervel is the kervel
// C is the returv vector
//
if kernel.len() > vector.len(){
return convolve(kernel, vector, mode);
}
match mode.unwrap_or(ConvolveMode::Full) {
ConvolveMode::Full => return convolve_full(vector,kernel),
ConvolveMode::Valid => return convolve_valid(vector,kernel),
ConvolveMode::Same => return convolve_same(vector,kernel)
}
}
fn main() {
let v1 = Vector2::new(3.0,1.0);
let v2 = Vector4::new(1.0,2.0,5.0,9.0);
let x = convolve(v1,v2,Some(ConvolveMode::Valid));
println!("{:?}",x)
}
Cleaned up some labels and steps
extern crate nalgebra as na;
use na::storage::Storage;
#[allow(unused_imports)]
use na::{
DMatrix, DVector, Dim, Dynamic, Matrix, Matrix2x3, Real, VecStorage, Vector, Vector2, Vector3,
Vector4, U1,
};
use std::cmp;
enum ConvolveMode {
Full,
Valid,
Same,
}
fn convolve_full<R: Real, D: Dim, E: Dim, S: Storage<R, D>, Q: Storage<R, E>>(
vector: Vector<R, D, S>,
kernel: Vector<R, E, Q>,
) -> Matrix<R, Dynamic, U1, VecStorage<R, Dynamic, U1>> {
let vec = vector.len();
let ker = kernel.len();
let newlen = vec + ker - 1;
let mut conv = DVector::<R>::zeros(newlen);
for i in 0..newlen {
let u_i = if i > ker {i - ker} else {0};
let u_f = cmp::min(i, vec - 1);
if u_i == u_f {
conv[i] += vector[u_i] * kernel[(i - u_i)];
} else {
for u in u_i..(u_f + 1) {
if i - u < ker {
conv[i] += vector[u] * kernel[(i - u)];
}
}
}
}
conv
}
fn convolve_valid<R: Real, D: Dim, E: Dim, S: Storage<R, D>, Q: Storage<R, E>>(
vector: Vector<R, D, S>,
kernel: Vector<R, E, Q>,
) -> Matrix<R, Dynamic, U1, VecStorage<R, Dynamic, U1>> {
let vec = vector.len();
let ker = kernel.len();
let newlen = vec - ker + 1;
let mut conv = DVector::<R>::zeros(newlen);
for i in 0..newlen {
for j in 0..ker {
conv[i] += vector[i + j] * kernel[ker - j - 1];
}
}
conv
}
fn convolve_same<R: Real, D: Dim, E: Dim, S: Storage<R, D>, Q: Storage<R, E>>(
vector: Vector<R, D, S>,
kernel: Vector<R, E, Q>,
) -> Matrix<R, Dynamic, U1, VecStorage<R, Dynamic, U1>> {
let vec = vector.len();
let ker = kernel.len();
let newlen = vec + ker - 1;
let mut conv = DVector::<R>::zeros(newlen);
for i in 0..newlen {
// let u_i = cmp::max(0, i - k);
// let u_f = cmp::min(i, v - 1);
// if u_i == u_f {
// conv[i as usize] += vector[u_i as usize] * kernel[(i - u_i) as usize];
// } else {
// for u in u_i..(u_f + 1) {
// if i - u < k {
// conv[i as usize] += vector[u as usize] * kernel[(i - u) as usize];
// }
// }
// }
}
conv
}
fn convolve<R: Real, D: Dim, E: Dim, S: Storage<R, D>, Q: Storage<R, E>>(
vector: Vector<R, D, S>,
kernel: Vector<R, E, Q>,
mode: Option<ConvolveMode>,
) -> Matrix<R, Dynamic, U1, VecStorage<R, Dynamic, U1>> {
//
// vector is the vector, Kervel is the kervel
// C is the returv vector
//
if kernel.len() > vector.len() {
return convolve(kernel, vector, mode);
}
match mode.unwrap_or(ConvolveMode::Full) {
ConvolveMode::Full => return convolve_full(vector, kernel),
ConvolveMode::Valid => return convolve_valid(vector, kernel),
ConvolveMode::Same => return convolve_same(vector, kernel),
}
}
fn main() {
let v1 = Vector2::new(3.0,1.0);
let v2 = Vector4::new(1.0,2.0,5.0,9.0);
let x = convolve(v1,v2,Some(ConvolveMode::Valid));
println!("{:?}",x)
// let m = Matrix2x3::from_anti_diagonal_element(5.0);
// The two additional arguments represent the matrix dimensions.
// let dm = DMatrix::from_anti_diagonal_element(2, 3, 5.0);
let mut m = Matrix2x3::new(1.1, 1.2, 1.3,
2.1, 2.2, 2.3);
// assert!(m.m11 == 0.0 && m.m12 == 0.0 && m.m13 == 5.0 &&
// m.m21 == 0.0 && m.m22 == 5.0 && m.m23 == 0.0);
// assert!(dm[(0, 0)] == 0.0 && dm[(0, 1)] == 0.0 && dm[(0, 2)] == 5.0 &&
// dm[(1, 0)] == 0.0 && dm[(1, 1)] == 5.0 && dm[(1, 2)] == 0.0);
println!("m={:?}",m);
for i in 0..std::cmp::min(m.nrows(),m.ncols()) {
// for j in 0..3 {
println!("m({:?},{:?})={:?}",i,3-i-1,m[(i,3-i-1)]);
unsafe { println!("m({:?},{:?})={:?}",i,3-i-1,*m.get_unchecked_mut((i, 3-i-1))) }
// }
}
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use autocxx::include_cpp;
use autocxx::subclass::{is_subclass, CppSubclass};
use cxx::CxxString;
use std::cell::RefCell;
include_cpp! {
// C++ headers we want to include.
#include "messages.h"
// Safety policy. We are marking that this whole C++ inclusion is unsafe
// which means the functions themselves do not need to be marked
// as unsafe. Other policies are possible.
safety!(unsafe)
// What types and functions we want to generate
generate!("run_demo")
generate!("register_cpp_thingies")
generate!("register_producer")
generate!("register_displayer")
// Declare some C++ classes have Rust subclasses.
subclass!("MessageDisplayer",UwuDisplayer)
subclass!("MessageDisplayer",BoxDisplayer)
subclass!("MessageProducer",QuoteProducer)
}
static SHAKESPEARE_QUOTES: [&str; 10] = [
"All that glitters is not gold",
"Hell is empty and all the devils are here.",
"Good night, good night! parting is such sweet sorrow, That I shall say good night till it be morrow.",
"These violent delights have violent ends...",
"Something is rotten in the state of Denmark.",
"Love all, trust a few, do wrong to none.",
"The lady doth protest too much, methinks.",
"Brevity is the soul of wit.",
"Uneasy lies the head that wears a crown.",
"Now is the winter of our discontent.",
];
// The following lines define a subclass of MessageDisplayer,
// together with the "subclass!" directive above in the include_cpp!
// macro. See the main function at the bottom for how this subclass
// is instantiated.
#[is_subclass]
#[derive(Default)]
pub struct UwuDisplayer {}
impl ffi::MessageDisplayer_methods for UwuDisplayer {
fn display_message(&self, msg: &CxxString) {
let uwu = uwuifier::uwuify_str_sse(msg.to_str().unwrap());
println!("{}", uwu);
}
}
// And here's a different pure virtual class.
// This one is notable only in that the interface of the C++ class
// involves std::string, yet in Rust the subclass uses
// std::unique_ptr<std::string> (for all the normal reasons in autocxx -
// for now, at least, we can't hold non-trivial C++ objects on the Rust stack.)
// All the boxing and unboxing is done automatically by autocxx layers.
#[is_subclass]
#[derive(Default)]
pub struct QuoteProducer {}
impl ffi::MessageProducer_methods for QuoteProducer {
fn get_message(&self) -> cxx::UniquePtr<CxxString> {
use ffi::ToCppString;
SHAKESPEARE_QUOTES[fastrand::usize(0..SHAKESPEARE_QUOTES.len())].into_cpp()
}
}
// Here's another subclass of the same 'displayer' class.
// This one is more complex in two ways.
//
// First, we actually want to store some data here in our subclass.
// That means we can't just allocate ourselves with Default::default().
// And that means we need to be aware of the cpp_peer field which is
// added by the #[subclass] macro.
//
// Second, we're going to simulate the observer/listener type pattern
// in C++ where a const* is used to send messages around a codebase yet
// recipients need to react by mutating themselves or otherwise actively
// doing stuff. In C++ you'd probably need a const_cast. Here we use
// interior mutability.
#[is_subclass]
pub struct BoxDisplayer {
message_count: RefCell<usize>,
}
impl BoxDisplayer {
fn new() -> Self {
Self {
// As we're allocating this class ourselves instead of using [`Default`]
// we need to initialize the `cpp_peer` member ourselves. This member is
// inserted by the `#[is_subclass]` annotation. autocxx will
// later use this to store a pointer back to the C++ peer.
cpp_peer: Default::default(),
message_count: RefCell::new(1usize),
}
}
}
impl ffi::MessageDisplayer_methods for BoxDisplayer {
fn display_message(&self, msg: &CxxString) {
let msg = textwrap::fill(msg.to_str().unwrap(), 70);
let horz_line = std::iter::repeat("#").take(74).collect::<String>();
println!("{}", horz_line);
let msgmsg = format!("Message {}", self.message_count.borrow());
self.message_count.replace_with(|old| *old + 1usize);
println!("# {:^70} #", msgmsg);
println!("{}", horz_line);
for l in msg.lines() {
println!("# {:^70} #", l);
}
println!("{}", horz_line);
}
}
fn main() {
// Register some C++ example classes.
ffi::register_cpp_thingies();
// Create a message displayer. We create and pass an instance of
// the subclass on the Rust side - that's the first parameter -
// and also a closure which calls an appropriate C++ constructor for
// the C++ side peer object. Constructors exist for each superclass
// constructor, so effectively this is a call to the C++ superclass
// constructor.
let uwu =
UwuDisplayer::new_rust_owned(UwuDisplayer::default(), ffi::UwuDisplayerCpp::make_unique);
// The next line casts the &UwuDisplayerCpp to a &MessageDisplayer.
ffi::register_displayer(uwu.as_ref().borrow().as_ref());
// Same again for the next message displayer object.
// In each case, we could have said `new_cpp_owned` instead, which would
// have given us a cxx::UniquePtr that we could have passed to C++.
let boxd = BoxDisplayer::new_rust_owned(BoxDisplayer::new(), ffi::BoxDisplayerCpp::make_unique);
ffi::register_displayer(boxd.as_ref().borrow().as_ref());
// Let's register a producer too.
let shakespeare =
QuoteProducer::new_rust_owned(QuoteProducer::default(), ffi::QuoteProducerCpp::make_unique);
ffi::register_producer(shakespeare.as_ref().borrow().as_ref());
ffi::run_demo();
ffi::run_demo();
}
Allow example to build on non-Intel platforms
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use autocxx::include_cpp;
use autocxx::subclass::{is_subclass, CppSubclass};
use cxx::CxxString;
use std::cell::RefCell;
include_cpp! {
// C++ headers we want to include.
#include "messages.h"
// Safety policy. We are marking that this whole C++ inclusion is unsafe
// which means the functions themselves do not need to be marked
// as unsafe. Other policies are possible.
safety!(unsafe)
// What types and functions we want to generate
generate!("run_demo")
generate!("register_cpp_thingies")
generate!("register_producer")
generate!("register_displayer")
// Declare some C++ classes have Rust subclasses.
subclass!("MessageDisplayer",UwuDisplayer)
subclass!("MessageDisplayer",BoxDisplayer)
subclass!("MessageProducer",QuoteProducer)
}
static SHAKESPEARE_QUOTES: [&str; 10] = [
"All that glitters is not gold",
"Hell is empty and all the devils are here.",
"Good night, good night! parting is such sweet sorrow, That I shall say good night till it be morrow.",
"These violent delights have violent ends...",
"Something is rotten in the state of Denmark.",
"Love all, trust a few, do wrong to none.",
"The lady doth protest too much, methinks.",
"Brevity is the soul of wit.",
"Uneasy lies the head that wears a crown.",
"Now is the winter of our discontent.",
];
// The following lines define a subclass of MessageDisplayer,
// together with the "subclass!" directive above in the include_cpp!
// macro. See the main function at the bottom for how this subclass
// is instantiated.
#[is_subclass]
#[derive(Default)]
pub struct UwuDisplayer {}
impl ffi::MessageDisplayer_methods for UwuDisplayer {
fn display_message(&self, _msg: &CxxString) {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
let uwu = uwuifier::uwuify_str_sse(_msg.to_str().unwrap());
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
let uwu = "uwuification is unavailable for this pwatform :(";
println!("{}", uwu);
}
}
// And here's a different pure virtual class.
// This one is notable only in that the interface of the C++ class
// involves std::string, yet in Rust the subclass uses
// std::unique_ptr<std::string> (for all the normal reasons in autocxx -
// for now, at least, we can't hold non-trivial C++ objects on the Rust stack.)
// All the boxing and unboxing is done automatically by autocxx layers.
#[is_subclass]
#[derive(Default)]
pub struct QuoteProducer {}
impl ffi::MessageProducer_methods for QuoteProducer {
fn get_message(&self) -> cxx::UniquePtr<CxxString> {
use ffi::ToCppString;
SHAKESPEARE_QUOTES[fastrand::usize(0..SHAKESPEARE_QUOTES.len())].into_cpp()
}
}
// Here's another subclass of the same 'displayer' class.
// This one is more complex in two ways.
//
// First, we actually want to store some data here in our subclass.
// That means we can't just allocate ourselves with Default::default().
// And that means we need to be aware of the cpp_peer field which is
// added by the #[subclass] macro.
//
// Second, we're going to simulate the observer/listener type pattern
// in C++ where a const* is used to send messages around a codebase yet
// recipients need to react by mutating themselves or otherwise actively
// doing stuff. In C++ you'd probably need a const_cast. Here we use
// interior mutability.
#[is_subclass]
pub struct BoxDisplayer {
message_count: RefCell<usize>,
}
impl BoxDisplayer {
fn new() -> Self {
Self {
// As we're allocating this class ourselves instead of using [`Default`]
// we need to initialize the `cpp_peer` member ourselves. This member is
// inserted by the `#[is_subclass]` annotation. autocxx will
// later use this to store a pointer back to the C++ peer.
cpp_peer: Default::default(),
message_count: RefCell::new(1usize),
}
}
}
impl ffi::MessageDisplayer_methods for BoxDisplayer {
fn display_message(&self, msg: &CxxString) {
let msg = textwrap::fill(msg.to_str().unwrap(), 70);
let horz_line = std::iter::repeat("#").take(74).collect::<String>();
println!("{}", horz_line);
let msgmsg = format!("Message {}", self.message_count.borrow());
self.message_count.replace_with(|old| *old + 1usize);
println!("# {:^70} #", msgmsg);
println!("{}", horz_line);
for l in msg.lines() {
println!("# {:^70} #", l);
}
println!("{}", horz_line);
}
}
fn main() {
// Register some C++ example classes.
ffi::register_cpp_thingies();
// Create a message displayer. We create and pass an instance of
// the subclass on the Rust side - that's the first parameter -
// and also a closure which calls an appropriate C++ constructor for
// the C++ side peer object. Constructors exist for each superclass
// constructor, so effectively this is a call to the C++ superclass
// constructor.
let uwu =
UwuDisplayer::new_rust_owned(UwuDisplayer::default(), ffi::UwuDisplayerCpp::make_unique);
// The next line casts the &UwuDisplayerCpp to a &MessageDisplayer.
ffi::register_displayer(uwu.as_ref().borrow().as_ref());
// Same again for the next message displayer object.
// In each case, we could have said `new_cpp_owned` instead, which would
// have given us a cxx::UniquePtr that we could have passed to C++.
let boxd = BoxDisplayer::new_rust_owned(BoxDisplayer::new(), ffi::BoxDisplayerCpp::make_unique);
ffi::register_displayer(boxd.as_ref().borrow().as_ref());
// Let's register a producer too.
let shakespeare =
QuoteProducer::new_rust_owned(QuoteProducer::default(), ffi::QuoteProducerCpp::make_unique);
ffi::register_producer(shakespeare.as_ref().borrow().as_ref());
ffi::run_demo();
ffi::run_demo();
}
|
//! https://arkada38.github.io/2018/01/14/creating-a-turtle/
//!
//! This is inspired by a child's drawing of a turtle.
//! We are going to draw the turtle using arcs and straight lines.
//! To draw arcs, we use multiple Rust for-loops to create different tilt angles and
//! lengths. The more sophisticated the figure is, the more loops we need to make it.
extern crate turtle;
use turtle::{Turtle, Color, color};
const SIZE: f64 = 1.0;
const SHELL_COLOR: Color = Color {red: 62.0, green: 114.0, blue: 29.0, alpha: 1.0};
const BODY_COLOR: Color = Color {red: 119.0, green: 178.0, blue: 85.0, alpha: 1.0};
const EYE_COLOR: Color = color::BLACK;
fn main() {
let mut turtle = Turtle::new();
turtle.set_speed(8);
turtle.pen_up();
turtle.set_x(-280.0);
turtle.set_y(-90.0);
draw_shell(&mut turtle);
draw_tail(&mut turtle);
turtle.pen_up();
turtle.right(55.0);
turtle.forward(SIZE * 10.0);
turtle.right(55.0);
// Back leg
draw_leg(&mut turtle);
turtle.right(86.5);
turtle.forward(SIZE * 55.0);
turtle.right(86.0);
// Front leg
draw_leg(&mut turtle);
turtle.right(54.5);
turtle.forward(SIZE * 8.0);
draw_neck(&mut turtle);
turtle.right(172.6);
turtle.forward(SIZE * 40.0);
turtle.right(110.0);
draw_head(&mut turtle);
turtle.left(128.0);
turtle.forward(SIZE * 15.0);
draw_eye(&mut turtle);
turtle.set_fill_color(BODY_COLOR);
turtle.left(175.0);
turtle.forward(SIZE * 43.0);
// Here we start to draw highlights on the shell.
// We have 3 highlights: on the right, in the middle and on the left.
draw_right_highlight(&mut turtle);
turtle.right(18.0);
turtle.forward(SIZE * 37.0);
turtle.set_heading(180.0);
draw_middle_highlight(&mut turtle);
turtle.left(24.0);
turtle.forward(SIZE * 36.0);
turtle.set_heading(180.0);
draw_left_highlight(&mut turtle);/**/
}
fn draw_shell(turtle: &mut Turtle) {
turtle.set_fill_color(SHELL_COLOR);
turtle.begin_fill();
for _ in 0..180 {
turtle.forward(SIZE);
turtle.right(1.0);
}
for _ in 0..90 {
turtle.forward(SIZE / 3.0);
turtle.right(1.0);
}
turtle.set_speed(5);
let d = SIZE * 360.0 / std::f64::consts::PI;
turtle.forward(d - d / 3.0);
turtle.set_speed(10);
for _ in 0..90 {
turtle.forward(SIZE / 3.0);
turtle.right(1.0);
}
turtle.end_fill();
}
fn draw_tail(turtle: &mut Turtle) {
turtle.set_fill_color(BODY_COLOR);
turtle.begin_fill();
turtle.left(90.0);
turtle.forward(SIZE);
for _ in 0..45 {
turtle.forward(SIZE / 3.0);
turtle.left(1.0);
}
turtle.forward(SIZE * 3.0);
for _ in 0..25 {
turtle.forward(SIZE / 3.0);
turtle.left(6.0);
}
turtle.forward(SIZE * 9.0);
turtle.right(6.0);
turtle.forward(SIZE * 8.0);
turtle.end_fill();
}
fn draw_leg(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..15 {
turtle.forward(SIZE);
turtle.left(0.5);
}
for _ in 0..90 {
turtle.forward(SIZE / 6.0);
turtle.left(1.0);
}
turtle.forward(SIZE * 3.0);
for _ in 0..90 {
turtle.forward(SIZE / 6.0);
turtle.left(1.0);
}
turtle.forward(SIZE * 14.5);
turtle.end_fill();
}
fn draw_neck(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..15 {
turtle.forward(SIZE * 3.0);
turtle.left(1.5);
}
turtle.end_fill();
turtle.begin_fill();
turtle.left(100.0);
turtle.forward(SIZE * 20.0);
turtle.left(80.0);
for _ in 0..4 {
turtle.forward(SIZE * 3.7);
turtle.left(1.5);
}
turtle.left(30.0);
for _ in 0..27 {
turtle.forward(SIZE);
turtle.right(1.0);
}
turtle.end_fill();
}
// In our case the most difficult part of the turtle is head. It requires six loops to draw.
// There was a way to draw it as one circle but
// I've decided to make it look a like a real turtle's head.
fn draw_head(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..20 {
turtle.forward(SIZE * 1.2);
turtle.left(1.0);
}
for _ in 0..10 {
turtle.forward(SIZE * 1.2);
turtle.left(4.0);
}
for _ in 0..10 {
turtle.forward(SIZE / 1.5);
turtle.left(7.0);
}
for _ in 0..10 {
turtle.forward(SIZE);
turtle.left(2.0);
}
for _ in 0..50 {
turtle.forward(SIZE / 2.5);
turtle.left(1.0);
}
for _ in 0..30 {
turtle.forward(SIZE / 3.0);
turtle.left(1.8);
}
for _ in 0..10 {
turtle.forward(SIZE / 1.5);
turtle.left(1.8);
}
turtle.end_fill();
}
fn draw_eye(turtle: &mut Turtle) {
turtle.set_fill_color(EYE_COLOR);
turtle.begin_fill();
for _ in 0..90 {
turtle.forward(SIZE / 3.0);
turtle.right(4.0);
}
turtle.end_fill();
}
fn draw_right_highlight(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..39 {
turtle.forward(SIZE / 5.0);
turtle.left(2.0);
}
turtle.forward(SIZE * 36.0);
for _ in 0..90 {
turtle.forward(SIZE / 2.5);
turtle.left(2.0);
}
for _ in 0..42 {
turtle.forward(SIZE);
turtle.left(0.9);
}
for _ in 0..26 {
turtle.forward(SIZE / 5.0);
turtle.left(2.0);
}
turtle.end_fill();
}
fn draw_middle_highlight(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..40 {
turtle.forward(SIZE / 4.0);
turtle.left(2.0);
}
turtle.forward(SIZE * 47.0);
turtle.left(8.5);
for _ in 0..90 {
turtle.forward(SIZE / 2.0);
turtle.left(2.0);
}
turtle.forward(SIZE / 2.0);
turtle.left(8.5);
turtle.forward(SIZE * 47.0);
for _ in 0..40 {
turtle.left(2.0);
turtle.forward(SIZE / 4.0);
}
turtle.end_fill();
}
fn draw_left_highlight(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..26 {
turtle.forward(SIZE / 5.0);
turtle.left(2.0);
}
for _ in 0..42 {
turtle.forward(SIZE);
turtle.left(0.9);
}
for _ in 0..90 {
turtle.forward(SIZE / 2.5);
turtle.left(2.0);
}
turtle.forward(SIZE * 36.0);
for _ in 0..39 {
turtle.forward(SIZE / 5.0);
turtle.left(2.0);
}
turtle.end_fill();
}
Removing empty comment
//! https://arkada38.github.io/2018/01/14/creating-a-turtle/
//!
//! This is inspired by a child's drawing of a turtle.
//! We are going to draw the turtle using arcs and straight lines.
//! To draw arcs, we use multiple Rust for-loops to create different tilt angles and
//! lengths. The more sophisticated the figure is, the more loops we need to make it.
extern crate turtle;
use turtle::{Turtle, Color, color};
const SIZE: f64 = 1.0;
const SHELL_COLOR: Color = Color {red: 62.0, green: 114.0, blue: 29.0, alpha: 1.0};
const BODY_COLOR: Color = Color {red: 119.0, green: 178.0, blue: 85.0, alpha: 1.0};
const EYE_COLOR: Color = color::BLACK;
fn main() {
let mut turtle = Turtle::new();
turtle.set_speed(8);
turtle.pen_up();
turtle.set_x(-280.0);
turtle.set_y(-90.0);
draw_shell(&mut turtle);
draw_tail(&mut turtle);
turtle.pen_up();
turtle.right(55.0);
turtle.forward(SIZE * 10.0);
turtle.right(55.0);
// Back leg
draw_leg(&mut turtle);
turtle.right(86.5);
turtle.forward(SIZE * 55.0);
turtle.right(86.0);
// Front leg
draw_leg(&mut turtle);
turtle.right(54.5);
turtle.forward(SIZE * 8.0);
draw_neck(&mut turtle);
turtle.right(172.6);
turtle.forward(SIZE * 40.0);
turtle.right(110.0);
draw_head(&mut turtle);
turtle.left(128.0);
turtle.forward(SIZE * 15.0);
draw_eye(&mut turtle);
turtle.set_fill_color(BODY_COLOR);
turtle.left(175.0);
turtle.forward(SIZE * 43.0);
// Here we start to draw highlights on the shell.
// We have 3 highlights: on the right, in the middle and on the left.
draw_right_highlight(&mut turtle);
turtle.right(18.0);
turtle.forward(SIZE * 37.0);
turtle.set_heading(180.0);
draw_middle_highlight(&mut turtle);
turtle.left(24.0);
turtle.forward(SIZE * 36.0);
turtle.set_heading(180.0);
draw_left_highlight(&mut turtle);
}
fn draw_shell(turtle: &mut Turtle) {
turtle.set_fill_color(SHELL_COLOR);
turtle.begin_fill();
for _ in 0..180 {
turtle.forward(SIZE);
turtle.right(1.0);
}
for _ in 0..90 {
turtle.forward(SIZE / 3.0);
turtle.right(1.0);
}
turtle.set_speed(5);
let d = SIZE * 360.0 / std::f64::consts::PI;
turtle.forward(d - d / 3.0);
turtle.set_speed(10);
for _ in 0..90 {
turtle.forward(SIZE / 3.0);
turtle.right(1.0);
}
turtle.end_fill();
}
fn draw_tail(turtle: &mut Turtle) {
turtle.set_fill_color(BODY_COLOR);
turtle.begin_fill();
turtle.left(90.0);
turtle.forward(SIZE);
for _ in 0..45 {
turtle.forward(SIZE / 3.0);
turtle.left(1.0);
}
turtle.forward(SIZE * 3.0);
for _ in 0..25 {
turtle.forward(SIZE / 3.0);
turtle.left(6.0);
}
turtle.forward(SIZE * 9.0);
turtle.right(6.0);
turtle.forward(SIZE * 8.0);
turtle.end_fill();
}
fn draw_leg(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..15 {
turtle.forward(SIZE);
turtle.left(0.5);
}
for _ in 0..90 {
turtle.forward(SIZE / 6.0);
turtle.left(1.0);
}
turtle.forward(SIZE * 3.0);
for _ in 0..90 {
turtle.forward(SIZE / 6.0);
turtle.left(1.0);
}
turtle.forward(SIZE * 14.5);
turtle.end_fill();
}
fn draw_neck(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..15 {
turtle.forward(SIZE * 3.0);
turtle.left(1.5);
}
turtle.end_fill();
turtle.begin_fill();
turtle.left(100.0);
turtle.forward(SIZE * 20.0);
turtle.left(80.0);
for _ in 0..4 {
turtle.forward(SIZE * 3.7);
turtle.left(1.5);
}
turtle.left(30.0);
for _ in 0..27 {
turtle.forward(SIZE);
turtle.right(1.0);
}
turtle.end_fill();
}
// In our case the most difficult part of the turtle is head. It requires six loops to draw.
// There was a way to draw it as one circle but
// I've decided to make it look a like a real turtle's head.
fn draw_head(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..20 {
turtle.forward(SIZE * 1.2);
turtle.left(1.0);
}
for _ in 0..10 {
turtle.forward(SIZE * 1.2);
turtle.left(4.0);
}
for _ in 0..10 {
turtle.forward(SIZE / 1.5);
turtle.left(7.0);
}
for _ in 0..10 {
turtle.forward(SIZE);
turtle.left(2.0);
}
for _ in 0..50 {
turtle.forward(SIZE / 2.5);
turtle.left(1.0);
}
for _ in 0..30 {
turtle.forward(SIZE / 3.0);
turtle.left(1.8);
}
for _ in 0..10 {
turtle.forward(SIZE / 1.5);
turtle.left(1.8);
}
turtle.end_fill();
}
fn draw_eye(turtle: &mut Turtle) {
turtle.set_fill_color(EYE_COLOR);
turtle.begin_fill();
for _ in 0..90 {
turtle.forward(SIZE / 3.0);
turtle.right(4.0);
}
turtle.end_fill();
}
fn draw_right_highlight(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..39 {
turtle.forward(SIZE / 5.0);
turtle.left(2.0);
}
turtle.forward(SIZE * 36.0);
for _ in 0..90 {
turtle.forward(SIZE / 2.5);
turtle.left(2.0);
}
for _ in 0..42 {
turtle.forward(SIZE);
turtle.left(0.9);
}
for _ in 0..26 {
turtle.forward(SIZE / 5.0);
turtle.left(2.0);
}
turtle.end_fill();
}
fn draw_middle_highlight(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..40 {
turtle.forward(SIZE / 4.0);
turtle.left(2.0);
}
turtle.forward(SIZE * 47.0);
turtle.left(8.5);
for _ in 0..90 {
turtle.forward(SIZE / 2.0);
turtle.left(2.0);
}
turtle.forward(SIZE / 2.0);
turtle.left(8.5);
turtle.forward(SIZE * 47.0);
for _ in 0..40 {
turtle.left(2.0);
turtle.forward(SIZE / 4.0);
}
turtle.end_fill();
}
fn draw_left_highlight(turtle: &mut Turtle) {
turtle.begin_fill();
for _ in 0..26 {
turtle.forward(SIZE / 5.0);
turtle.left(2.0);
}
for _ in 0..42 {
turtle.forward(SIZE);
turtle.left(0.9);
}
for _ in 0..90 {
turtle.forward(SIZE / 2.5);
turtle.left(2.0);
}
turtle.forward(SIZE * 36.0);
for _ in 0..39 {
turtle.forward(SIZE / 5.0);
turtle.left(2.0);
}
turtle.end_fill();
}
|
#![crate_name = "seax_svm"]
#![crate_type = "lib"]
#![feature(box_syntax)]
/// Contains the Seax Virtual Machine (SVM) and miscellaneous
/// support code.
pub mod svm {
use svm::slist::List;
use svm::slist::List::{Cons,Nil};
use svm::slist::Stack;
use std::iter::IteratorExt;
use std::fmt;
/// Singly-linked list and stack implementations.
///
/// `List<T>` is a singly-linked cons list with boxed items. `Stack<T>` is
/// defined as a trait providing stack operations(`push()`, `pop()`, and
/// `peek()`), and an implementation for `List`.
#[macro_use]
pub mod slist {
use svm::slist::List::{Cons,Nil};
use std::fmt;
use std::ops::Index;
/// Common functions for an immutable Stack abstract data type.
pub trait Stack<T> {
/// Push an item to the top of the stack, returning a new stack
fn push(self, item : T) -> Self;
/// Pop the top element of the stack. Returns an Option on a T and
/// a new Stack<T> to replace this.
fn pop(self) -> Option<(T, Self)>;
/// Peek at the top item of the stack.
///
/// Returns Some<T> if there is an item on top of the stack,
/// and None if the stack is empty.
fn peek(&self) -> Option<&T>;
/// Returns an empty stack.
fn empty() -> Self;
}
/// Stack implementation using a cons list
impl<T> Stack<T> for List<T> {
/// Push an item to the top of the stack, returning a new stack.
///
/// # Examples:
/// ```
/// use seax_svm::svm::slist::{List,Stack};
///
/// let mut s: List<isize> = Stack::empty();
/// assert_eq!(s.peek(), None);
/// s = s.push(1);
/// assert_eq!(s.peek(), Some(&1));
/// s = s.push(6);
/// assert_eq!(s.peek(), Some(&6));
/// ```
fn push(self, item: T) -> List<T> {
Cons(item, box self)
}
/// Pop the top element of the stack.
///
/// Pop the top element of the stack. Returns an
/// `Option<(T,List<T>)>` containing the top element and a new
/// `List<T>` with that item removed, or `None` if the stack is
/// empty.
///
/// # Examples:
/// ```
/// # use seax_svm::svm::slist::{List,Stack};
///
/// let mut s: List<isize> = Stack::empty();
/// s = s.push(2);
/// s = s.push(1);
/// let pop_result = s.pop().unwrap();
/// s = pop_result.1;
/// assert_eq!(s.peek(), Some(&2));
/// assert_eq!(pop_result.0, 1);
/// ```
fn pop(self) -> Option<(T,List<T>)> {
match self {
Cons(item, new_self) => Some((item, *new_self)),
Nil => None
}
}
fn empty() -> List<T> {
Nil
}
/// Peek at the top element of the stack.
///
/// Peek at the top element of the stack. Returns an `Option<&T>`
/// with a borrowed pointer to the top element, or `None` if the
/// stack is empty.
///
/// # Examples:
/// ```
/// # use seax_svm::svm::slist::{List,Stack};
///
/// let mut s: List<isize> = Stack::empty();
/// s = s.push(2);
/// s = s.push(1);
/// let pop_result = s.pop().unwrap();
/// s = pop_result.1;
/// assert_eq!(s.peek(), Some(&2));
/// assert_eq!(pop_result.0, 1);
/// ```
fn peek(&self) -> Option<&T> {
match self {
&Nil => None,
&Cons(ref it,_) => Some(it)
}
}
}
/// Singly-linked cons list.
///
/// This is used internally to represent list primitives in the
/// machine.
#[derive(PartialEq,Clone,Debug)]
pub enum List<T> {
/// Cons cell containing a `T` and a link to the tail
Cons(T, Box<List<T>>),
/// The empty list.
Nil,
}
/// Public implementation for List.
impl<T> List<T> {
/// Creates a new empty list
pub fn new() -> List<T> {
Nil
}
/// Prepends the given item to the list.
///
/// Returns the list containing the new head item.
/// This is an O(1) operation.
pub fn prepend(self, it: T) -> List<T> {
Cons(it, box self)
}
/// Appends an item to the end of the list.
///
/// This is an O(n) operation.
pub fn append(self, it: T) {
unimplemented!()
}
/// Returns the length of the list.
pub fn length (&self) -> usize {
match *self {
Cons(_, ref tail) => 1 + tail.length(),
Nil => 0
}
}
/// Provide a forward iterator
#[inline]
pub fn iter<'a>(&'a self) -> ListIterator<'a, T> {
ListIterator{current: self}
}
}
impl<'a, T> fmt::Display for List<T> where T: fmt::Display{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// TODO: replace toString with this
match *self {
Cons(ref head, ref tail) => write!(f, "({}, {})", head, tail),
Nil => write!(f,"nil")
}
}
}
/// Wraps a List<T> to allow it to be used as an Iterator<T>
pub struct ListIterator<'a, T:'a> {
current: &'a List<T>
}
/// Implementation of Iterator for List. This allows iteration by
/// link hopping.
impl<'a, T> Iterator for ListIterator<'a, T> {
type Item = &'a T;
/// Get the next element from the list. Returns a Some<T>, or Nil
/// if at the end of the list.
fn next(&mut self) -> Option<&'a T> {
match self.current {
&Cons(ref head, box ref tail) => { self.current = tail; Some(head) },
&Nil => None
}
}
}
impl<'a, T> ExactSizeIterator for ListIterator<'a, T> {
fn len(&self) -> usize {
self.current.length()
}
}
impl<T> Index<usize> for List<T> {
type Output = T;
fn index<'a>(&'a self, _index: &usize) -> &'a T {
let mut it = self.iter();
for _ in 0..*_index-1 {
it.next();
}
it.next().unwrap()
}
}
impl<T> Index<isize> for List<T> {
type Output = T;
fn index<'a>(&'a self, _index: &isize) -> &'a T {
let mut it = self.iter();
for _ in 0..*_index-1 {
it.next();
}
it.next().unwrap()
}
}
/// Convenience macro for making lists.
///
/// # Example:
///
/// ```
/// # #[macro_use] extern crate seax_svm;
/// # use seax_svm::svm::slist;
/// # use seax_svm::svm::slist::List::{Cons, Nil};
/// # fn main () {
/// assert_eq!(
/// list!(1i32, 2i32, 3i32),
/// Cons(1i32, Box::new(Cons(2i32, Box::new(Cons(3i32, Box::new(Nil))))))
/// );
/// # }
/// ```
#[macro_export]
macro_rules! list(
( $e:expr, $($rest:expr),+ ) => ( Cons($e, Box::new(list!( $( $rest ),+ )) ));
( $e:expr ) => ( Cons($e, Box::new(Nil)) );
() => ( @Empty )
);
#[cfg(test)]
mod tests {
use super::{List, Stack};
use super::List::{Cons,Nil};
#[test]
fn test_list_length() {
let full_list: List<i32> = list!(1i32, 2i32, 3i32);
let empty_list: List<i32> = List::new();
assert_eq!(full_list.length(), 3);
assert_eq!(empty_list.length(), 0);
}
#[test]
fn test_list_to_string() {
let l: List<i32> = Cons(1, Box::new(Cons(2, Box::new(Cons(3, Box::new(Nil))))));
assert_eq!(l.to_string(), "(1, (2, (3, nil)))");
}
#[test]
fn test_stack_length() {
let full_stack: List<i32> = list!(1i32, 2i32, 3i32);
let empty_stack: List<i32> = Stack::empty();
assert_eq!(full_stack.length(), 3);
assert_eq!(empty_stack.length(), 0);
}
#[test]
fn test_stack_peek() {
let full_stack: List<i32> = list!(1i32, 2i32, 3i32);
let empty_stack: List<i32> = Stack::empty();
assert_eq!(full_stack.peek(), Some(&1));
assert_eq!(empty_stack.peek(), None);
}
#[test]
fn test_stack_push() {
let mut s: List<i32> = Stack::empty();
assert_eq!(s.peek(), None);
s = s.push(1);
assert_eq!(s.peek(), Some(&1));
s = s.push(6);
assert_eq!(s.peek(), Some(&6));
}
#[test]
fn test_stack_pop() {
let mut s: List<i32> = Stack::empty();
assert_eq!(s.peek(), None);
s = s.push(1);
assert_eq!(s.peek(), Some(&1));
s = s.push(6);
assert_eq!(s.peek(), Some(&6));
let pop_result = s.pop().unwrap(); // should not break
s = pop_result.1;
assert_eq!(s.peek(), Some(&1));
assert_eq!(pop_result.0, 6);
}
#[test]
fn test_list_macro() {
let l: List<i32> = list!(1i32, 2i32, 3i32);
assert_eq!(l.to_string(), "(1, (2, (3, nil)))")
}
#[test]
fn test_list_iter() {
let l: List<isize> = list!(1,2,3,4,5,6);
let mut string = String::new();
for item in l.iter() {
string.push_str((item.to_string() + ", ").as_slice());
}
assert_eq!(string.as_slice(), "1, 2, 3, 4, 5, 6, ")
}
}
}
/// SVM cell types.
///
/// A cell in the VM can be either an atom (single item, either unsigned
/// int, signed int, float, or string) or a pointer to a list cell.
#[derive(PartialEq,Clone,Debug)]
pub enum SVMCell {
AtomCell(Atom),
ListCell(Box<List<SVMCell>>)
}
impl fmt::Display for SVMCell {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}]", self)
}
}
/// SVM atom types.
///
/// A VM atom can be either an unsigned int, signed int, float,
/// char, or string.
///
/// TODO: Strings could be implemented as char lists rather than
/// Rust strings.
#[derive(PartialEq,Clone,Debug)]
pub enum Atom {
UInt(usize),
SInt(isize),
Float(f64),
Char(char),
Str(String),
}
impl fmt::Display for Atom {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Atom::UInt(value) => write!(f, "{}us", value),
&Atom::SInt(value) => write!(f, "{}is", value),
&Atom::Float(value) => write!(f, "{}f64", value),
&Atom::Char(value) => write!(f, "'{}'", value),
&Atom::Str(ref value) => write!(f, "\"{}\"", value)
}
}
}
/// SVM instruction types
pub enum SVMInstruction {
/// `nil`
///
/// Pushes an empty list (nil) onto the stack
InstNIL,
/// `ldc`: `L`oa`d` `C`onstant. Loads a constant (atom)
InstLDC(Atom),
/// `ld`: `L`oa`d`. Pushes a variable onto the stack.
///
/// The variable is indicated by the argument, a pair.
/// The pair's `car` specifies the level, the `cdr` the position.
/// So `(1 . 3)` gives the current function's (level 1) third
/// parameter.
InstLD,
/// `ldf`: `L`oa`d` `F`unction.
///
/// Takes one list argument representing a function and constructs
/// a closure (a pair containing the function and the current
/// environment) and pushes that onto the stack.
InstLDF,
/// `join`
///
/// Pops a list reference from the dump and makes this the new value
/// of `C`. This instruction occurs at the end of both alternatives of
/// a `sel`.
InstJOIN,
/// `ap`: `Ap`ply.
///
/// Pops a closure and a list of parameter values from the stack.
/// The closure is applied to the parameters by installing its
/// environment as the current one, pushing the parameter list
/// in front of that, clearing the stack, and setting `C` to the
/// closure's function pointer. The previous values of `S`, `E`,
/// and the next value of `C` are saved on the dump.
InstAP,
/// `ret`: `Ret`urn.
///
/// Pops one return value from the stack, restores
/// `S`, `E`, and `C` from the dump, and pushes
/// the return value onto the now-current stack.
InstRET,
/// `dum`: `Dum`my.
///
/// Pops a dummy environment (an empty list) onto the `E` stack.
InstDUM,
/// `rap`: `R`ecursive `Ap`ply.
/// Works like `ap`, only that it replaces an occurrence of a
/// dummy environment with the current one, thus making recursive
/// functions possible.
InstRAP,
/// `sel`: `Sel`ect
///
/// Expects two list arguments, and pops a value from the stack.
/// The first list is executed if the popped value was non-nil,
/// the second list otherwise. Before one of these list pointers
/// is made the new `C`, a pointer to the instruction following
/// `sel` is saved on the dump.
InstSEL,
/// `add`
///
/// Pops two numbers off of the stack and adds them, pushing the
/// result onto the stack. This will up-convert integers to floating
/// point if necessary.
///
/// TODO: figure out what happens when you try to add things that aren't
/// numbers (maybe the compiler won't let this happen?).
InstADD,
/// `sub`: `Sub`tract
///
/// Pops two numbers off of the stack and subtracts the first from the
/// second, pushing the result onto the stack. This will up-convert
/// integers to floating point if necessary.
///
/// TODO: figure out what happens when you try to subtract things that
/// aren't numbers (maybe the compiler won't let this happen?).
InstSUB,
/// `mul`: `Mul`tiply
///
/// Pops two numbers off of the stack and multiplies them, pushing the
/// result onto the stack. This will up-convert integers to floating
/// point if necessary.
///
/// TODO: figure out what happens when you try to multiply things that
/// aren't numbers (maybe the compiler won't let this happen?).
InstMUL,
/// `div`: `Div`ide
///
/// Pops two numbers off of the stack and divides the first by the second,
/// pushing the result onto the stack. This performs integer division.
///
/// TODO: figure out what happens when you try to divide things that
/// aren't numbers (maybe the compiler won't let this happen?).
InstDIV,
/// `fdiv`: `F`loating-point `div`ide
///
/// Pops two numbers off of the stack and divides the first by the second,
/// pushing the result onto the stack. This performs float division.
///
/// TODO: figure out what happens when you try to divide things that
/// aren't numbers (maybe the compiler won't let this happen?).
///
/// TODO: Not sure if there should be separate float and int divide words
/// I guess the compiler can figure this out
InstFDIV,
/// `mod`: `Mod`ulo
///
/// Pops two numbers off of the stack and divides the first by the second,
/// pushing the remainder onto the stack.
///
/// TODO: figure out what happens when you try to modulo things that
/// aren't numbers (maybe the compiler won't let this happen?).
InstMOD
// TODO: add some hardcoded I/O instructions here so that you can
// do I/O without farming everything out to `stdio`
// TODO: add `cons` and `cdr` words
}
/// Represents a SVM machine state
pub struct State {
stack: List<SVMCell>,
env: List<SVMCell>,
control: List<SVMCell>,
dump: List<SVMCell>
}
impl State {
/// Creates a new empty state
fn new() -> State {
State {
stack: Stack::empty(),
env: Stack::empty(),
control: Stack::empty(),
dump: Stack::empty()
}
}
/// Evaluates an instruction.
///
/// Evaluates an instruction against a state, returning a new state.
pub fn eval(self, inst: SVMInstruction) -> State {
match inst {
SVMInstruction::InstNIL => {
State {
stack: self.stack.push(SVMCell::ListCell(box List::new())),
env: self.env,
control: self.control,
dump: self.dump
}
}
SVMInstruction::InstLDC(atom) => {
State {
stack: self.stack.push(SVMCell::AtomCell(atom)),
env: self.env,
control: self.control,
dump: self.dump
}
},
SVMInstruction::InstLD => {
let pop = self.stack.pop().unwrap();
match pop.0 {
SVMCell::ListCell(box Cons(
SVMCell::AtomCell(
Atom::SInt(level)
),
box Cons(
SVMCell::AtomCell(
Atom::SInt(pos)
),
box Nil
)
)
) => {
let environment = match self.env[level-1] {
SVMCell::ListCell(ref l) => l.clone(),
_ => panic!()
};
State {
stack: pop.1.push(environment[pos-1].clone()),
env: self.env,
control: self.control,
dump: self.dump
}
},
_ => panic!() //TODO: put error on stack instead
}
},
_ => { unimplemented!() }
}
}
}
/*
/// Evaluates a program.
///
/// Evaluates a program represented as an `Iterator` of `SVMInstruction`s.
/// Returns the final machine state at the end of execution
pub fn evalProgram(insts: Iterator<Item=SVMInstruction>) -> State {
insts.fold(State::new(), |last_state: State, inst: SVMInstruction| last_state.eval(inst));
}*/
#[cfg(test)]
mod tests {
use super::slist;
use super::slist::Stack;
use super::slist::List::{Cons,Nil};
use super::State;
use super::{SVMInstruction, SVMCell, Atom};
use super::SVMCell::{AtomCell, ListCell};
use super::Atom::{SInt, Char, Float};
#[test]
fn test_empty_state() {
let state = State::new();
assert_eq!(state.stack.length(), 0);
assert_eq!(state.env.length(), 0);
assert_eq!(state.control.length(), 0);
assert_eq!(state.dump.length(), 0);
}
#[test]
fn test_eval_nil () {
let mut state = State::new();
assert_eq!(state.stack.peek(), None);
state = state.eval(SVMInstruction::InstNIL);
assert_eq!(state.stack.peek(), Some(&SVMCell::ListCell(box Nil)));
}
#[test]
fn test_eval_ldc () {
let mut state = State::new();
assert_eq!(state.stack.peek(), None);
state = state.eval(SVMInstruction::InstLDC(SInt(1)));
assert_eq!(state.stack.peek(), Some(&AtomCell(SInt(1))));
state = state.eval(SVMInstruction::InstLDC(Char('a')));
assert_eq!(state.stack.peek(), Some(&AtomCell(Char('a'))));
state = state.eval(SVMInstruction::InstLDC(Float(1.0f64)));
assert_eq!(state.stack.peek(), Some(&AtomCell(Float(1.0f64))));
}
#[test]
fn test_eval_ld () {
let mut state = State{
stack: list!(ListCell(box list!(AtomCell(SInt(1)),AtomCell(SInt(2))))),
env: Stack::empty(),
control: Stack::empty(),
dump: Stack::empty()
};
}
#[test]
fn test_atom_show () {
let mut a: Atom;
a = Atom::Char('a');
assert_eq!(format!("{}", a), "'a'");
a = Atom::UInt(1us);
assert_eq!(format!("{}", a), "1us");
a = Atom::SInt(42is);
assert_eq!(format!("{}", a), "42is");
a = Atom::Float(5.55f64);
assert_eq!(format!("{}", a), "5.55f64");
a = Atom::Str(String::from_str("help I'm trapped in a SECD virtual machine!"));
assert_eq!(format!("{}", a), "\"help I'm trapped in a SECD virtual machine!\"");
}
}
}
Finish test for LD instruction
#![crate_name = "seax_svm"]
#![crate_type = "lib"]
#![feature(box_syntax)]
/// Contains the Seax Virtual Machine (SVM) and miscellaneous
/// support code.
pub mod svm {
use svm::slist::List;
use svm::slist::List::{Cons,Nil};
use svm::slist::Stack;
use std::iter::IteratorExt;
use std::fmt;
/// Singly-linked list and stack implementations.
///
/// `List<T>` is a singly-linked cons list with boxed items. `Stack<T>` is
/// defined as a trait providing stack operations(`push()`, `pop()`, and
/// `peek()`), and an implementation for `List`.
#[macro_use]
pub mod slist {
use svm::slist::List::{Cons,Nil};
use std::fmt;
use std::ops::Index;
/// Common functions for an immutable Stack abstract data type.
pub trait Stack<T> {
/// Push an item to the top of the stack, returning a new stack
fn push(self, item : T) -> Self;
/// Pop the top element of the stack. Returns an Option on a T and
/// a new Stack<T> to replace this.
fn pop(self) -> Option<(T, Self)>;
/// Peek at the top item of the stack.
///
/// Returns Some<T> if there is an item on top of the stack,
/// and None if the stack is empty.
fn peek(&self) -> Option<&T>;
/// Returns an empty stack.
fn empty() -> Self;
}
/// Stack implementation using a cons list
impl<T> Stack<T> for List<T> {
/// Push an item to the top of the stack, returning a new stack.
///
/// # Examples:
/// ```
/// use seax_svm::svm::slist::{List,Stack};
///
/// let mut s: List<isize> = Stack::empty();
/// assert_eq!(s.peek(), None);
/// s = s.push(1);
/// assert_eq!(s.peek(), Some(&1));
/// s = s.push(6);
/// assert_eq!(s.peek(), Some(&6));
/// ```
fn push(self, item: T) -> List<T> {
Cons(item, box self)
}
/// Pop the top element of the stack.
///
/// Pop the top element of the stack. Returns an
/// `Option<(T,List<T>)>` containing the top element and a new
/// `List<T>` with that item removed, or `None` if the stack is
/// empty.
///
/// # Examples:
/// ```
/// # use seax_svm::svm::slist::{List,Stack};
///
/// let mut s: List<isize> = Stack::empty();
/// s = s.push(2);
/// s = s.push(1);
/// let pop_result = s.pop().unwrap();
/// s = pop_result.1;
/// assert_eq!(s.peek(), Some(&2));
/// assert_eq!(pop_result.0, 1);
/// ```
fn pop(self) -> Option<(T,List<T>)> {
match self {
Cons(item, new_self) => Some((item, *new_self)),
Nil => None
}
}
fn empty() -> List<T> {
Nil
}
/// Peek at the top element of the stack.
///
/// Peek at the top element of the stack. Returns an `Option<&T>`
/// with a borrowed pointer to the top element, or `None` if the
/// stack is empty.
///
/// # Examples:
/// ```
/// # use seax_svm::svm::slist::{List,Stack};
///
/// let mut s: List<isize> = Stack::empty();
/// s = s.push(2);
/// s = s.push(1);
/// let pop_result = s.pop().unwrap();
/// s = pop_result.1;
/// assert_eq!(s.peek(), Some(&2));
/// assert_eq!(pop_result.0, 1);
/// ```
fn peek(&self) -> Option<&T> {
match self {
&Nil => None,
&Cons(ref it,_) => Some(it)
}
}
}
/// Singly-linked cons list.
///
/// This is used internally to represent list primitives in the
/// machine.
#[derive(PartialEq,Clone,Debug)]
pub enum List<T> {
/// Cons cell containing a `T` and a link to the tail
Cons(T, Box<List<T>>),
/// The empty list.
Nil,
}
/// Public implementation for List.
impl<T> List<T> {
/// Creates a new empty list
pub fn new() -> List<T> {
Nil
}
/// Prepends the given item to the list.
///
/// Returns the list containing the new head item.
/// This is an O(1) operation.
pub fn prepend(self, it: T) -> List<T> {
Cons(it, box self)
}
/// Appends an item to the end of the list.
///
/// This is an O(n) operation.
pub fn append(self, it: T) {
unimplemented!()
}
/// Returns the length of the list.
pub fn length (&self) -> usize {
match *self {
Cons(_, ref tail) => 1 + tail.length(),
Nil => 0
}
}
/// Provide a forward iterator
#[inline]
pub fn iter<'a>(&'a self) -> ListIterator<'a, T> {
ListIterator{current: self}
}
}
impl<'a, T> fmt::Display for List<T> where T: fmt::Display{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// TODO: replace toString with this
match *self {
Cons(ref head, ref tail) => write!(f, "({}, {})", head, tail),
Nil => write!(f,"nil")
}
}
}
/// Wraps a List<T> to allow it to be used as an Iterator<T>
pub struct ListIterator<'a, T:'a> {
current: &'a List<T>
}
/// Implementation of Iterator for List. This allows iteration by
/// link hopping.
impl<'a, T> Iterator for ListIterator<'a, T> {
type Item = &'a T;
/// Get the next element from the list. Returns a Some<T>, or Nil
/// if at the end of the list.
fn next(&mut self) -> Option<&'a T> {
match self.current {
&Cons(ref head, box ref tail) => { self.current = tail; Some(head) },
&Nil => None
}
}
}
impl<'a, T> ExactSizeIterator for ListIterator<'a, T> {
fn len(&self) -> usize {
self.current.length()
}
}
impl<T> Index<usize> for List<T> {
type Output = T;
fn index<'a>(&'a self, _index: &usize) -> &'a T {
let mut it = self.iter();
for _ in 0..*_index-1 {
it.next();
}
it.next().unwrap()
}
}
impl<T> Index<isize> for List<T> {
type Output = T;
fn index<'a>(&'a self, _index: &isize) -> &'a T {
let mut it = self.iter();
for _ in 0..*_index-1 {
it.next();
}
it.next().unwrap()
}
}
/// Convenience macro for making lists.
///
/// # Example:
///
/// ```
/// # #[macro_use] extern crate seax_svm;
/// # use seax_svm::svm::slist;
/// # use seax_svm::svm::slist::List::{Cons, Nil};
/// # fn main () {
/// assert_eq!(
/// list!(1i32, 2i32, 3i32),
/// Cons(1i32, Box::new(Cons(2i32, Box::new(Cons(3i32, Box::new(Nil))))))
/// );
/// # }
/// ```
#[macro_export]
macro_rules! list(
( $e:expr, $($rest:expr),+ ) => ( Cons($e, Box::new(list!( $( $rest ),+ )) ));
( $e:expr ) => ( Cons($e, Box::new(Nil)) );
() => ( @Empty )
);
#[cfg(test)]
mod tests {
use super::{List, Stack};
use super::List::{Cons,Nil};
#[test]
fn test_list_length() {
let full_list: List<i32> = list!(1i32, 2i32, 3i32);
let empty_list: List<i32> = List::new();
assert_eq!(full_list.length(), 3);
assert_eq!(empty_list.length(), 0);
}
#[test]
fn test_list_to_string() {
let l: List<i32> = Cons(1, Box::new(Cons(2, Box::new(Cons(3, Box::new(Nil))))));
assert_eq!(l.to_string(), "(1, (2, (3, nil)))");
}
#[test]
fn test_stack_length() {
let full_stack: List<i32> = list!(1i32, 2i32, 3i32);
let empty_stack: List<i32> = Stack::empty();
assert_eq!(full_stack.length(), 3);
assert_eq!(empty_stack.length(), 0);
}
#[test]
fn test_stack_peek() {
let full_stack: List<i32> = list!(1i32, 2i32, 3i32);
let empty_stack: List<i32> = Stack::empty();
assert_eq!(full_stack.peek(), Some(&1));
assert_eq!(empty_stack.peek(), None);
}
#[test]
fn test_stack_push() {
let mut s: List<i32> = Stack::empty();
assert_eq!(s.peek(), None);
s = s.push(1);
assert_eq!(s.peek(), Some(&1));
s = s.push(6);
assert_eq!(s.peek(), Some(&6));
}
#[test]
fn test_stack_pop() {
let mut s: List<i32> = Stack::empty();
assert_eq!(s.peek(), None);
s = s.push(1);
assert_eq!(s.peek(), Some(&1));
s = s.push(6);
assert_eq!(s.peek(), Some(&6));
let pop_result = s.pop().unwrap(); // should not break
s = pop_result.1;
assert_eq!(s.peek(), Some(&1));
assert_eq!(pop_result.0, 6);
}
#[test]
fn test_list_macro() {
let l: List<i32> = list!(1i32, 2i32, 3i32);
assert_eq!(l.to_string(), "(1, (2, (3, nil)))")
}
#[test]
fn test_list_iter() {
let l: List<isize> = list!(1,2,3,4,5,6);
let mut string = String::new();
for item in l.iter() {
string.push_str((item.to_string() + ", ").as_slice());
}
assert_eq!(string.as_slice(), "1, 2, 3, 4, 5, 6, ")
}
}
}
/// SVM cell types.
///
/// A cell in the VM can be either an atom (single item, either unsigned
/// int, signed int, float, or string) or a pointer to a list cell.
#[derive(PartialEq,Clone,Debug)]
pub enum SVMCell {
AtomCell(Atom),
ListCell(Box<List<SVMCell>>)
}
impl fmt::Display for SVMCell {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}]", self)
}
}
/// SVM atom types.
///
/// A VM atom can be either an unsigned int, signed int, float,
/// char, or string.
///
/// TODO: Strings could be implemented as char lists rather than
/// Rust strings.
#[derive(PartialEq,Clone,Debug)]
pub enum Atom {
UInt(usize),
SInt(isize),
Float(f64),
Char(char),
Str(String),
}
impl fmt::Display for Atom {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Atom::UInt(value) => write!(f, "{}us", value),
&Atom::SInt(value) => write!(f, "{}is", value),
&Atom::Float(value) => write!(f, "{}f64", value),
&Atom::Char(value) => write!(f, "'{}'", value),
&Atom::Str(ref value) => write!(f, "\"{}\"", value)
}
}
}
/// SVM instruction types
pub enum SVMInstruction {
/// `nil`
///
/// Pushes an empty list (nil) onto the stack
InstNIL,
/// `ldc`: `L`oa`d` `C`onstant. Loads a constant (atom)
InstLDC(Atom),
/// `ld`: `L`oa`d`. Pushes a variable onto the stack.
///
/// The variable is indicated by the argument, a pair.
/// The pair's `car` specifies the level, the `cdr` the position.
/// So `(1 . 3)` gives the current function's (level 1) third
/// parameter.
InstLD,
/// `ldf`: `L`oa`d` `F`unction.
///
/// Takes one list argument representing a function and constructs
/// a closure (a pair containing the function and the current
/// environment) and pushes that onto the stack.
InstLDF,
/// `join`
///
/// Pops a list reference from the dump and makes this the new value
/// of `C`. This instruction occurs at the end of both alternatives of
/// a `sel`.
InstJOIN,
/// `ap`: `Ap`ply.
///
/// Pops a closure and a list of parameter values from the stack.
/// The closure is applied to the parameters by installing its
/// environment as the current one, pushing the parameter list
/// in front of that, clearing the stack, and setting `C` to the
/// closure's function pointer. The previous values of `S`, `E`,
/// and the next value of `C` are saved on the dump.
InstAP,
/// `ret`: `Ret`urn.
///
/// Pops one return value from the stack, restores
/// `S`, `E`, and `C` from the dump, and pushes
/// the return value onto the now-current stack.
InstRET,
/// `dum`: `Dum`my.
///
/// Pops a dummy environment (an empty list) onto the `E` stack.
InstDUM,
/// `rap`: `R`ecursive `Ap`ply.
/// Works like `ap`, only that it replaces an occurrence of a
/// dummy environment with the current one, thus making recursive
/// functions possible.
InstRAP,
/// `sel`: `Sel`ect
///
/// Expects two list arguments, and pops a value from the stack.
/// The first list is executed if the popped value was non-nil,
/// the second list otherwise. Before one of these list pointers
/// is made the new `C`, a pointer to the instruction following
/// `sel` is saved on the dump.
InstSEL,
/// `add`
///
/// Pops two numbers off of the stack and adds them, pushing the
/// result onto the stack. This will up-convert integers to floating
/// point if necessary.
///
/// TODO: figure out what happens when you try to add things that aren't
/// numbers (maybe the compiler won't let this happen?).
InstADD,
/// `sub`: `Sub`tract
///
/// Pops two numbers off of the stack and subtracts the first from the
/// second, pushing the result onto the stack. This will up-convert
/// integers to floating point if necessary.
///
/// TODO: figure out what happens when you try to subtract things that
/// aren't numbers (maybe the compiler won't let this happen?).
InstSUB,
/// `mul`: `Mul`tiply
///
/// Pops two numbers off of the stack and multiplies them, pushing the
/// result onto the stack. This will up-convert integers to floating
/// point if necessary.
///
/// TODO: figure out what happens when you try to multiply things that
/// aren't numbers (maybe the compiler won't let this happen?).
InstMUL,
/// `div`: `Div`ide
///
/// Pops two numbers off of the stack and divides the first by the second,
/// pushing the result onto the stack. This performs integer division.
///
/// TODO: figure out what happens when you try to divide things that
/// aren't numbers (maybe the compiler won't let this happen?).
InstDIV,
/// `fdiv`: `F`loating-point `div`ide
///
/// Pops two numbers off of the stack and divides the first by the second,
/// pushing the result onto the stack. This performs float division.
///
/// TODO: figure out what happens when you try to divide things that
/// aren't numbers (maybe the compiler won't let this happen?).
///
/// TODO: Not sure if there should be separate float and int divide words
/// I guess the compiler can figure this out
InstFDIV,
/// `mod`: `Mod`ulo
///
/// Pops two numbers off of the stack and divides the first by the second,
/// pushing the remainder onto the stack.
///
/// TODO: figure out what happens when you try to modulo things that
/// aren't numbers (maybe the compiler won't let this happen?).
InstMOD
// TODO: add some hardcoded I/O instructions here so that you can
// do I/O without farming everything out to `stdio`
// TODO: add `cons` and `cdr` words
}
/// Represents a SVM machine state
pub struct State {
stack: List<SVMCell>,
env: List<SVMCell>,
control: List<SVMCell>,
dump: List<SVMCell>
}
impl State {
/// Creates a new empty state
fn new() -> State {
State {
stack: Stack::empty(),
env: Stack::empty(),
control: Stack::empty(),
dump: Stack::empty()
}
}
/// Evaluates an instruction.
///
/// Evaluates an instruction against a state, returning a new state.
pub fn eval(self, inst: SVMInstruction) -> State {
match inst {
SVMInstruction::InstNIL => {
State {
stack: self.stack.push(SVMCell::ListCell(box List::new())),
env: self.env,
control: self.control,
dump: self.dump
}
}
SVMInstruction::InstLDC(atom) => {
State {
stack: self.stack.push(SVMCell::AtomCell(atom)),
env: self.env,
control: self.control,
dump: self.dump
}
},
SVMInstruction::InstLD => {
let pop = self.stack.pop().unwrap();
match pop.0 {
SVMCell::ListCell(box Cons(
SVMCell::AtomCell(
Atom::SInt(level)
),
box Cons(
SVMCell::AtomCell(
Atom::SInt(pos)
),
box Nil
)
)
) => {
let environment = match self.env[level-1] {
SVMCell::ListCell(ref l) => l.clone(),
_ => panic!()
};
State {
stack: pop.1.push(environment[pos-1].clone()),
env: self.env,
control: self.control,
dump: self.dump
}
},
_ => panic!() //TODO: put error on stack instead
}
},
_ => { unimplemented!() }
}
}
}
/*
/// Evaluates a program.
///
/// Evaluates a program represented as an `Iterator` of `SVMInstruction`s.
/// Returns the final machine state at the end of execution
pub fn evalProgram(insts: Iterator<Item=SVMInstruction>) -> State {
insts.fold(State::new(), |last_state: State, inst: SVMInstruction| last_state.eval(inst));
}*/
#[cfg(test)]
mod tests {
use super::slist;
use super::slist::Stack;
use super::slist::List::{Cons,Nil};
use super::State;
use super::{SVMInstruction, SVMCell, Atom};
use super::SVMCell::{AtomCell, ListCell};
use super::Atom::{SInt, Char, Float, Str};
#[test]
fn test_empty_state() {
let state = State::new();
assert_eq!(state.stack.length(), 0);
assert_eq!(state.env.length(), 0);
assert_eq!(state.control.length(), 0);
assert_eq!(state.dump.length(), 0);
}
#[test]
fn test_eval_nil () {
let mut state = State::new();
assert_eq!(state.stack.peek(), None);
state = state.eval(SVMInstruction::InstNIL);
assert_eq!(state.stack.peek(), Some(&SVMCell::ListCell(box Nil)));
}
#[test]
fn test_eval_ldc () {
let mut state = State::new();
assert_eq!(state.stack.peek(), None);
state = state.eval(SVMInstruction::InstLDC(SInt(1)));
assert_eq!(state.stack.peek(), Some(&AtomCell(SInt(1))));
state = state.eval(SVMInstruction::InstLDC(Char('a')));
assert_eq!(state.stack.peek(), Some(&AtomCell(Char('a'))));
state = state.eval(SVMInstruction::InstLDC(Float(1.0f64)));
assert_eq!(state.stack.peek(), Some(&AtomCell(Float(1.0f64))));
}
#[test]
fn test_eval_ld () {
let mut state = State {
stack: list!(ListCell(box list!(AtomCell(SInt(1)),AtomCell(SInt(2))))),
env: list!(ListCell(box list!(AtomCell(Str(String::from_str("load me!"))),AtomCell(Str(String::from_str("don't load me!")))))),
control: Stack::empty(),
dump: Stack::empty()
};
state = state.eval(SVMInstruction::InstLD);
assert_eq!(state.stack.peek(), Some(&AtomCell(Str(String::from_str("load me!")))));
}
#[test]
fn test_atom_show () {
let mut a: Atom;
a = Atom::Char('a');
assert_eq!(format!("{}", a), "'a'");
a = Atom::UInt(1us);
assert_eq!(format!("{}", a), "1us");
a = Atom::SInt(42is);
assert_eq!(format!("{}", a), "42is");
a = Atom::Float(5.55f64);
assert_eq!(format!("{}", a), "5.55f64");
a = Atom::Str(String::from_str("help I'm trapped in a SECD virtual machine!"));
assert_eq!(format!("{}", a), "\"help I'm trapped in a SECD virtual machine!\"");
}
}
}
|
#![crate_name = "seax_svm"]
#![crate_type = "lib"]
#![feature(box_syntax,box_patterns,core)]
/// Contains the Seax Virtual Machine (SVM) and miscellaneous
/// support code.
pub mod svm {
pub use self::slist::List;
pub use self::slist::List::{Cons,Nil};
pub use self::slist::Stack;
pub use self::cell::{SVMCell,Atom,Inst};
use self::cell::SVMCell::*;
use self::cell::Atom::*;
use self::cell::Inst::*;
/// Singly-linked list and stack implementations.
///
/// `List<T>` is a singly-linked cons list with boxed items. `Stack<T>` is
/// defined as a trait providing stack operations(`push()`, `pop()`, and
/// `peek()`), and an implementation for `List`.
#[macro_use]
pub mod slist;
/// SVM cell types.
///
/// A cell in the VM can be either an atom (single item, either unsigned
/// int, signed int, float, or string), a pointer to a list cell, or an
/// instruction.
pub mod cell;
#[cfg(test)]
mod tests;
/// Represents a SVM machine state
#[derive(PartialEq,Clone,Debug)]
pub struct State {
stack: List<SVMCell>,
env: List<SVMCell>,
control: List<SVMCell>,
dump: List<SVMCell>
}
impl State {
/// Creates a new empty state
pub fn new() -> State {
State {
stack: Stack::empty(),
env: Stack::empty(),
control: Stack::empty(),
dump: Stack::empty()
}
}
/// Evaluates an instruction.
///
/// Evaluates an instruction against a state, returning a new state.
/// TODO: rewrite me to use the next instruction on the control stack,
/// rather than a parameter.
pub fn eval(self) -> State {
match self.control.pop() {
// NIL: pop an empty list onto the stack
Some((InstCell(NIL), new_control @ _)) => {
State {
stack: self.stack.push(ListCell(box List::new())),
env: self.env,
control: new_control,
dump: self.dump
}
}
// LDC: load constant
Some((InstCell(LDC), new_control @ _)) => {
let (atom,newer_control) = new_control.pop().unwrap();
State {
stack: self.stack.push(atom),
env: self.env,
control: newer_control,
dump: self.dump
}
},
// LD: load variable
Some((InstCell(LD), new_control @ _)) => {
match new_control.pop() {
Some((ListCell(
box Cons(AtomCell(SInt(level)),
box Cons(AtomCell(SInt(pos)),
box Nil))
), newer_control @ _)) => {
let environment = match self.env[level] {
SVMCell::ListCell(ref l) => l.clone(),
_ => panic!("[LD]: Fatal: expected list in $e, found {:?}",self.env[level])
};
State {
stack: self.stack.push(environment[pos].clone()),
env: self.env,
control: newer_control,
dump: self.dump
}
},
it @ _ => panic!("[LD] Fatal: expected pair, found {:?}", it)
}
},
// LDF: load function
Some((InstCell(LDF), new_control @ _)) => {
let (func, newer_control) = new_control.pop().unwrap();
State {
stack: self.stack.push(ListCell(box list!(func,self.env[0usize].clone()))),
env: self.env,
control: newer_control,
dump: self.dump
}
},
Some((InstCell(JOIN), new_control @ _)) => {
let (top, new_dump) = self.dump.pop().unwrap();
State {
stack: self.stack,
env: self.env,
control: match top {
ListCell(box Nil) => new_control,
_ => new_control.push(top)
},
dump: new_dump
}
},
Some((InstCell(ADD), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(a + b)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[ADD] TypeError: expected compatible operands, found (ADD {:?} {:?})", a, b)
}
},
_ => panic!("[ADD]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(SUB), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(a - b)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[SUB] TypeError: expected compatible operands, found (SUB {:?} {:?})", a, b)
}
},
_ => panic!("[SUB]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(FDIV), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(
match (a, b) {
// same type: coerce to float
(SInt(a), SInt(b)) => Float(a as f64 / b as f64),
(UInt(a), UInt(b)) => Float(a as f64 / b as f64),
(Float(a), Float(b)) => Float(a / b),
// float + int: coerce to float
(Float(a), SInt(b)) => Float(a / b as f64),
(Float(a), UInt(b)) => Float(a / b as f64),
(SInt(a), Float(b)) => Float(a as f64 / b),
(UInt(a), Float(b)) => Float(a as f64 / b),
// uint + sint: coerce to float
(UInt(a), SInt(b)) => Float(a as f64 / b as f64),
(SInt(a), UInt(b)) => Float(a as f64 / b as f64),
// char + any: coerce to int -> float
// but if you ever actually do this, then ...wat?
(Char(a), Char(b)) => Float(a as u8 as f64 / b as u8 as f64),
(Char(a), UInt(b)) => Float(a as u8 as f64 / b as f64),
(Char(a), SInt(b)) => Float(a as u8 as f64 / b as f64),
(Char(a), Float(b)) => Float(a as u8 as f64 / b as f64),
(UInt(a), Char(b)) => Float(a as f64 / b as u8 as f64),
(SInt(a), Char(b)) => Float(a as f64 / b as u8 as f64),
(Float(a), Char(b)) => Float(a as f64 / b as u8 as f64)
}
)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[FDIV] TypeError: expected compatible operands, found (FDIV {:?} {:?})", a, b)
}
},
_ => panic!("[FDIV]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(DIV), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(a / b)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[DIV] TypeError: expected compatible operands, found (DIV {:?} {:?})", a, b)
}
},
_ => panic!("[DIV]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(MUL), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(a * b)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[MUL] TypeError: expected compatible operands, found (MUL {:?} {:?})", a, b)
}
},
_ => panic!("[MUL]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(MOD), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(a % b)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[MOD] TypeError: expected compatible operands, found (MOD {:?} {:?})", a, b)
}
},
_ => panic!("[MOD]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(EQ), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
let (op2, newer_stack) = new_stack.pop().unwrap();
match (op1,op2) {
(AtomCell(a), AtomCell(b)) => State {
stack: newer_stack.push(
match a == b {
true => ListCell(box list!(AtomCell(SInt(1)))),
false => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
},
(_,_) => unimplemented!()
}
},
Some((InstCell(GT), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
let (op2, newer_stack) = new_stack.pop().unwrap();
match (op1,op2) {
(AtomCell(a), AtomCell(b)) => State {
stack: newer_stack.push(
match a > b {
true => ListCell(box list!(AtomCell(SInt(1)))),
false => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
},
(_,_) => unimplemented!()
}
},
Some((InstCell(GTE), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
let (op2, newer_stack) = new_stack.pop().unwrap();
match (op1,op2) {
(AtomCell(a), AtomCell(b)) => State { stack: newer_stack.push(
match a >= b {
true => ListCell(box list!(AtomCell(SInt(1)))),
false => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
},
(_,_) => unimplemented!()
}
},
Some((InstCell(LT), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
let (op2, newer_stack) = new_stack.pop().unwrap();
match (op1,op2) {
(AtomCell(a), AtomCell(b)) => State {
stack: newer_stack.push(
match a < b {
true => ListCell(box list!(AtomCell(SInt(1)))),
false => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
},
(_,_) => unimplemented!()
}
},
Some((InstCell(LTE), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
let (op2, newer_stack) = new_stack.pop().unwrap();
match (op1,op2) {
(AtomCell(a), AtomCell(b)) => State {
stack: newer_stack.push(
match a <= b {
true => ListCell(box list!(AtomCell(SInt(1)))),
false => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
},
(_,_) => unimplemented!()
}
},
Some((InstCell(ATOM), new_control @ _)) => {
let (target, new_stack) = self.stack.pop().unwrap();
State {
stack: new_stack.push(
match target {
AtomCell(_) => ListCell(box list!(AtomCell(SInt(1)))),
_ => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
}
},
Some((InstCell(AP), new_control @ _)) => {
match self.stack.pop().unwrap() {
(ListCell(box Cons(ListCell(box func), box Cons(ListCell(box params), box Nil))), new_stack) => State {
stack: new_stack,
env: params,
control: func,
dump: self.dump.push(ListCell(box self.env)).push(ListCell(box new_control))
},
(_, thing) => panic!("[AP]: Fatal: Expected closure on stack, got:\n{:?}", thing)
}
},
Some((InstCell(RAP), new_control @ _)) => {
match self.stack.pop().unwrap() {
(ListCell(box Cons(ListCell(box func), box Cons(ListCell(box params), box Nil))), new_stack) => {
match new_stack.pop() {
Some((v @ ListCell(_), newer_stack)) => {
State {
stack: Stack::empty(),
env: params.push(v),
control: func,
dump: self.dump
.push(ListCell(box new_control))
.push(ListCell(box self.env.pop().unwrap().1))
.push(ListCell(box newer_stack))
}
},
Some((thing, _)) => panic!("[AP]: Fatal: Expected closure on stack, got:\n{:?}", thing),
None => panic!("[AP]: Fatal: expected non-empty stack")
}
},
(_, thing) => panic!("[AP]: Fatal: Expected closure on stack, got:\n{:?}", thing)
}
},
Some((InstCell(RET), _)) => {
let (head, _) = self.stack.pop().unwrap();
let (new_stack, new_dump) = {
match self.dump.pop().unwrap() {
(ListCell(s), d @ _) => (*s, d),
it @ (AtomCell(_),_) => (list!(it.0), it.1),
_ => panic!("[RET]: Expected non-empty stack")
}
};
let (new_env, newer_dump) = {
match new_dump.pop().unwrap() {
(ListCell(e), d @ _) => (*e, d),
_ => panic!("[RET]: Expected new environment on dump stack")
}
};
let (newer_control, newest_dump) = {
match newer_dump.pop().unwrap() {
(ListCell(c), d @ _) => (*c, d),
it @ (InstCell(_),_) => (list!(it.0), it.1),
_ => panic!("[RET]: Expected new control stack on dump stack")
}
};
State {
stack: new_stack.push(head),
env: new_env,
control: newer_control,
dump: newest_dump
}
},
Some((InstCell(DUM), new_control @ _)) => {
State {
stack: self.stack,
env: self.env.push(ListCell(list!())),
control: new_control,
dump: self.dump
}
},
Some((InstCell(SEL), new_control @ _)) => {
match new_control.pop() {
Some((ListCell(box true_case), newer_control)) => {
match newer_control.pop() {
Some((ListCell(box false_case), newest_control)) => {
match self.stack.pop() {
Some((ListCell(box Nil), new_stack)) => { // false case
State {
stack: new_stack,
env: self.env,
control: false_case,
dump: self.dump.push(ListCell(box newest_control))
}
},
Some((_, new_stack)) => { // true case
State {
stack: new_stack,
env: self.env,
control: true_case,
dump: self.dump.push(ListCell(box newest_control))
}
},
None => panic!("[SEL]: expected non-empty stack")
}
},
Some((thing, _)) => panic!("[SEL]: expected list on control, found {:?}", thing),
None => panic!("[SEL]: expected list on control, found nothing")
}
},
Some((thing, _)) => panic!("[SEL]: expected list on control, found {:?}", thing),
None => panic!("[SEL]: expected list on control, found nothing")
}
},
Some((InstCell(CAR), new_control @ _)) => {
match self.stack.pop() {
Some((ListCell(box Cons(car, _)), new_stack)) => State {
stack: new_stack.push(car),
env: self.env,
control: new_control,
dump: self.dump
},
Some((ListCell(box Nil), _)) => panic!("[CAR]: expected non-empty list, found Nil"),
Some((thing, _)) => panic!("[CAR]: expected non-empty list, found {:?}", thing),
None => panic!("[CAR]: Expected non-empty list, found nothing")
}
},
Some((InstCell(CDR), new_control @ _)) => {
match self.stack.pop() {
Some((ListCell(box Cons(_, cdr)), new_stack)) => State {
stack: new_stack.push(ListCell(cdr)),
env: self.env,
control: new_control,
dump: self.dump
},
Some((ListCell(box Nil), _)) => panic!("[CDR]: expected non-empty list, found Nil"),
Some((thing, _)) => panic!("[CDR]: expected non-empty list, found {:?}", thing),
None => panic!("[CDR]: Expected non-empty list, found nothing")
}
},
Some((InstCell(CONS), new_control @ _)) => {
match self.stack.pop() {
Some((thing, new_stack)) => {
match new_stack.pop() {
Some((ListCell(list), newer_stack)) => {
State {
stack: newer_stack.push(ListCell(box Cons(thing, list))),
env: self.env,
control: new_control,
dump: self.dump
}
},
Some((thing_else, _)) => panic!("[CONS]: Expected a list on the stack, found {:?}", thing_else),
None => panic!("[CONS]: Expected a list on the stack, found nothing.")
}
},
None => panic!("[CONS]: Expected an item on the stack, found nothing")
}
},
Some((InstCell(NULL), new_control @ _)) => {
unimplemented!()
},
None => {panic!("[eval]: expected an instruction on control stack")}
Some((thing, new_control)) => {
panic!("[fatal]: Tried to evaluate an unsupported cell type {:?}.\n[fatal]: State dump:\n[fatal]:\tstack: {:?}\n[fatal]:\tenv: {:?}\n[fatal]:\tcontrol: {:?}\n[fatal]:\tdump: {:?}",
thing, self.stack, self.env, new_control.push(thing.clone()), self.dump) }
}
}
}
/// Evaluates a program.
///
/// Evaluates a program (control stack) and returns the final state.
/// TODO: add (optional?) parameters for stdin and stdout
pub fn eval_program(program: List<SVMCell>) -> List<SVMCell> {
let mut machine = State {
stack: Stack::empty(),
env: Stack::empty(),
control: program,
dump: Stack::empty()
};
// while there are more instructions,
while machine.control.peek() != None { //TODO: this is kinda heavyweight
machine = machine.eval() // continue evaling
};
machine.stack
}
}
Implement NULL
Closes #9
#![crate_name = "seax_svm"]
#![crate_type = "lib"]
#![feature(box_syntax,box_patterns,core)]
/// Contains the Seax Virtual Machine (SVM) and miscellaneous
/// support code.
pub mod svm {
pub use self::slist::List;
pub use self::slist::List::{Cons,Nil};
pub use self::slist::Stack;
pub use self::cell::{SVMCell,Atom,Inst};
use self::cell::SVMCell::*;
use self::cell::Atom::*;
use self::cell::Inst::*;
/// Singly-linked list and stack implementations.
///
/// `List<T>` is a singly-linked cons list with boxed items. `Stack<T>` is
/// defined as a trait providing stack operations(`push()`, `pop()`, and
/// `peek()`), and an implementation for `List`.
#[macro_use]
pub mod slist;
/// SVM cell types.
///
/// A cell in the VM can be either an atom (single item, either unsigned
/// int, signed int, float, or string), a pointer to a list cell, or an
/// instruction.
pub mod cell;
#[cfg(test)]
mod tests;
/// Represents a SVM machine state
#[derive(PartialEq,Clone,Debug)]
pub struct State {
stack: List<SVMCell>,
env: List<SVMCell>,
control: List<SVMCell>,
dump: List<SVMCell>
}
impl State {
/// Creates a new empty state
pub fn new() -> State {
State {
stack: Stack::empty(),
env: Stack::empty(),
control: Stack::empty(),
dump: Stack::empty()
}
}
/// Evaluates an instruction.
///
/// Evaluates an instruction against a state, returning a new state.
/// TODO: rewrite me to use the next instruction on the control stack,
/// rather than a parameter.
pub fn eval(self) -> State {
match self.control.pop() {
// NIL: pop an empty list onto the stack
Some((InstCell(NIL), new_control @ _)) => {
State {
stack: self.stack.push(ListCell(box List::new())),
env: self.env,
control: new_control,
dump: self.dump
}
}
// LDC: load constant
Some((InstCell(LDC), new_control @ _)) => {
let (atom,newer_control) = new_control.pop().unwrap();
State {
stack: self.stack.push(atom),
env: self.env,
control: newer_control,
dump: self.dump
}
},
// LD: load variable
Some((InstCell(LD), new_control @ _)) => {
match new_control.pop() {
Some((ListCell(
box Cons(AtomCell(SInt(level)),
box Cons(AtomCell(SInt(pos)),
box Nil))
), newer_control @ _)) => {
let environment = match self.env[level] {
SVMCell::ListCell(ref l) => l.clone(),
_ => panic!("[LD]: Fatal: expected list in $e, found {:?}",self.env[level])
};
State {
stack: self.stack.push(environment[pos].clone()),
env: self.env,
control: newer_control,
dump: self.dump
}
},
it @ _ => panic!("[LD] Fatal: expected pair, found {:?}", it)
}
},
// LDF: load function
Some((InstCell(LDF), new_control @ _)) => {
let (func, newer_control) = new_control.pop().unwrap();
State {
stack: self.stack.push(ListCell(box list!(func,self.env[0usize].clone()))),
env: self.env,
control: newer_control,
dump: self.dump
}
},
Some((InstCell(JOIN), new_control @ _)) => {
let (top, new_dump) = self.dump.pop().unwrap();
State {
stack: self.stack,
env: self.env,
control: match top {
ListCell(box Nil) => new_control,
_ => new_control.push(top)
},
dump: new_dump
}
},
Some((InstCell(ADD), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(a + b)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[ADD] TypeError: expected compatible operands, found (ADD {:?} {:?})", a, b)
}
},
_ => panic!("[ADD]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(SUB), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(a - b)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[SUB] TypeError: expected compatible operands, found (SUB {:?} {:?})", a, b)
}
},
_ => panic!("[SUB]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(FDIV), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(
match (a, b) {
// same type: coerce to float
(SInt(a), SInt(b)) => Float(a as f64 / b as f64),
(UInt(a), UInt(b)) => Float(a as f64 / b as f64),
(Float(a), Float(b)) => Float(a / b),
// float + int: coerce to float
(Float(a), SInt(b)) => Float(a / b as f64),
(Float(a), UInt(b)) => Float(a / b as f64),
(SInt(a), Float(b)) => Float(a as f64 / b),
(UInt(a), Float(b)) => Float(a as f64 / b),
// uint + sint: coerce to float
(UInt(a), SInt(b)) => Float(a as f64 / b as f64),
(SInt(a), UInt(b)) => Float(a as f64 / b as f64),
// char + any: coerce to int -> float
// but if you ever actually do this, then ...wat?
(Char(a), Char(b)) => Float(a as u8 as f64 / b as u8 as f64),
(Char(a), UInt(b)) => Float(a as u8 as f64 / b as f64),
(Char(a), SInt(b)) => Float(a as u8 as f64 / b as f64),
(Char(a), Float(b)) => Float(a as u8 as f64 / b as f64),
(UInt(a), Char(b)) => Float(a as f64 / b as u8 as f64),
(SInt(a), Char(b)) => Float(a as f64 / b as u8 as f64),
(Float(a), Char(b)) => Float(a as f64 / b as u8 as f64)
}
)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[FDIV] TypeError: expected compatible operands, found (FDIV {:?} {:?})", a, b)
}
},
_ => panic!("[FDIV]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(DIV), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(a / b)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[DIV] TypeError: expected compatible operands, found (DIV {:?} {:?})", a, b)
}
},
_ => panic!("[DIV]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(MUL), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(a * b)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[MUL] TypeError: expected compatible operands, found (MUL {:?} {:?})", a, b)
}
},
_ => panic!("[MUL]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(MOD), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
match op1 {
AtomCell(a) => {
let (op2, newer_stack) = new_stack.pop().unwrap();
match op2 {
AtomCell(b) => State {
stack: newer_stack.push(AtomCell(a % b)),
env: self.env,
control: new_control,
dump: self.dump
},
b => panic!("[MOD] TypeError: expected compatible operands, found (MOD {:?} {:?})", a, b)
}
},
_ => panic!("[MOD]: Expected first operand to be atom, found list or instruction"),
}
},
Some((InstCell(EQ), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
let (op2, newer_stack) = new_stack.pop().unwrap();
match (op1,op2) {
(AtomCell(a), AtomCell(b)) => State {
stack: newer_stack.push(
match a == b {
true => ListCell(box list!(AtomCell(SInt(1)))),
false => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
},
(_,_) => unimplemented!()
}
},
Some((InstCell(GT), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
let (op2, newer_stack) = new_stack.pop().unwrap();
match (op1,op2) {
(AtomCell(a), AtomCell(b)) => State {
stack: newer_stack.push(
match a > b {
true => ListCell(box list!(AtomCell(SInt(1)))),
false => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
},
(_,_) => unimplemented!()
}
},
Some((InstCell(GTE), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
let (op2, newer_stack) = new_stack.pop().unwrap();
match (op1,op2) {
(AtomCell(a), AtomCell(b)) => State { stack: newer_stack.push(
match a >= b {
true => ListCell(box list!(AtomCell(SInt(1)))),
false => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
},
(_,_) => unimplemented!()
}
},
Some((InstCell(LT), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
let (op2, newer_stack) = new_stack.pop().unwrap();
match (op1,op2) {
(AtomCell(a), AtomCell(b)) => State {
stack: newer_stack.push(
match a < b {
true => ListCell(box list!(AtomCell(SInt(1)))),
false => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
},
(_,_) => unimplemented!()
}
},
Some((InstCell(LTE), new_control @ _)) => {
let (op1, new_stack) = self.stack.pop().unwrap();
let (op2, newer_stack) = new_stack.pop().unwrap();
match (op1,op2) {
(AtomCell(a), AtomCell(b)) => State {
stack: newer_stack.push(
match a <= b {
true => ListCell(box list!(AtomCell(SInt(1)))),
false => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
},
(_,_) => unimplemented!()
}
},
Some((InstCell(ATOM), new_control @ _)) => {
let (target, new_stack) = self.stack.pop().unwrap();
State {
stack: new_stack.push(
match target {
AtomCell(_) => ListCell(box list!(AtomCell(SInt(1)))),
_ => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
}
},
Some((InstCell(AP), new_control @ _)) => {
match self.stack.pop().unwrap() {
(ListCell(box Cons(ListCell(box func), box Cons(ListCell(box params), box Nil))), new_stack) => State {
stack: new_stack,
env: params,
control: func,
dump: self.dump.push(ListCell(box self.env)).push(ListCell(box new_control))
},
(_, thing) => panic!("[AP]: Fatal: Expected closure on stack, got:\n{:?}", thing)
}
},
Some((InstCell(RAP), new_control @ _)) => {
match self.stack.pop().unwrap() {
(ListCell(box Cons(ListCell(box func), box Cons(ListCell(box params), box Nil))), new_stack) => {
match new_stack.pop() {
Some((v @ ListCell(_), newer_stack)) => {
State {
stack: Stack::empty(),
env: params.push(v),
control: func,
dump: self.dump
.push(ListCell(box new_control))
.push(ListCell(box self.env.pop().unwrap().1))
.push(ListCell(box newer_stack))
}
},
Some((thing, _)) => panic!("[AP]: Fatal: Expected closure on stack, got:\n{:?}", thing),
None => panic!("[AP]: Fatal: expected non-empty stack")
}
},
(_, thing) => panic!("[AP]: Fatal: Expected closure on stack, got:\n{:?}", thing)
}
},
Some((InstCell(RET), _)) => {
let (head, _) = self.stack.pop().unwrap();
let (new_stack, new_dump) = {
match self.dump.pop().unwrap() {
(ListCell(s), d @ _) => (*s, d),
it @ (AtomCell(_),_) => (list!(it.0), it.1),
_ => panic!("[RET]: Expected non-empty stack")
}
};
let (new_env, newer_dump) = {
match new_dump.pop().unwrap() {
(ListCell(e), d @ _) => (*e, d),
_ => panic!("[RET]: Expected new environment on dump stack")
}
};
let (newer_control, newest_dump) = {
match newer_dump.pop().unwrap() {
(ListCell(c), d @ _) => (*c, d),
it @ (InstCell(_),_) => (list!(it.0), it.1),
_ => panic!("[RET]: Expected new control stack on dump stack")
}
};
State {
stack: new_stack.push(head),
env: new_env,
control: newer_control,
dump: newest_dump
}
},
Some((InstCell(DUM), new_control @ _)) => {
State {
stack: self.stack,
env: self.env.push(ListCell(list!())),
control: new_control,
dump: self.dump
}
},
Some((InstCell(SEL), new_control @ _)) => {
match new_control.pop() {
Some((ListCell(box true_case), newer_control)) => {
match newer_control.pop() {
Some((ListCell(box false_case), newest_control)) => {
match self.stack.pop() {
Some((ListCell(box Nil), new_stack)) => { // false case
State {
stack: new_stack,
env: self.env,
control: false_case,
dump: self.dump.push(ListCell(box newest_control))
}
},
Some((_, new_stack)) => { // true case
State {
stack: new_stack,
env: self.env,
control: true_case,
dump: self.dump.push(ListCell(box newest_control))
}
},
None => panic!("[SEL]: expected non-empty stack")
}
},
Some((thing, _)) => panic!("[SEL]: expected list on control, found {:?}", thing),
None => panic!("[SEL]: expected list on control, found nothing")
}
},
Some((thing, _)) => panic!("[SEL]: expected list on control, found {:?}", thing),
None => panic!("[SEL]: expected list on control, found nothing")
}
},
Some((InstCell(CAR), new_control @ _)) => {
match self.stack.pop() {
Some((ListCell(box Cons(car, _)), new_stack)) => State {
stack: new_stack.push(car),
env: self.env,
control: new_control,
dump: self.dump
},
Some((ListCell(box Nil), _)) => panic!("[CAR]: expected non-empty list, found Nil"),
Some((thing, _)) => panic!("[CAR]: expected non-empty list, found {:?}", thing),
None => panic!("[CAR]: Expected non-empty list, found nothing")
}
},
Some((InstCell(CDR), new_control @ _)) => {
match self.stack.pop() {
Some((ListCell(box Cons(_, cdr)), new_stack)) => State {
stack: new_stack.push(ListCell(cdr)),
env: self.env,
control: new_control,
dump: self.dump
},
Some((ListCell(box Nil), _)) => panic!("[CDR]: expected non-empty list, found Nil"),
Some((thing, _)) => panic!("[CDR]: expected non-empty list, found {:?}", thing),
None => panic!("[CDR]: Expected non-empty list, found nothing")
}
},
Some((InstCell(CONS), new_control @ _)) => {
match self.stack.pop() {
Some((thing, new_stack)) => {
match new_stack.pop() {
Some((ListCell(list), newer_stack)) => {
State {
stack: newer_stack.push(ListCell(box Cons(thing, list))),
env: self.env,
control: new_control,
dump: self.dump
}
},
Some((thing_else, _)) => panic!("[CONS]: Expected a list on the stack, found {:?}", thing_else),
None => panic!("[CONS]: Expected a list on the stack, found nothing.")
}
},
None => panic!("[CONS]: Expected an item on the stack, found nothing")
}
},
Some((InstCell(NULL), new_control @ _)) => {
let (target, new_stack) = self.stack.pop().unwrap();
State {
stack: new_stack.push(
match target {
ListCell(box Nil) => ListCell(box list!(AtomCell(SInt(1)))),
_ => ListCell(box Nil)
}
),
env: self.env,
control: new_control,
dump: self.dump
}
},
None => {panic!("[eval]: expected an instruction on control stack")}
Some((thing, new_control)) => {
panic!("[fatal]: Tried to evaluate an unsupported cell type {:?}.\n[fatal]: State dump:\n[fatal]:\tstack: {:?}\n[fatal]:\tenv: {:?}\n[fatal]:\tcontrol: {:?}\n[fatal]:\tdump: {:?}",
thing, self.stack, self.env, new_control.push(thing.clone()), self.dump) }
}
}
}
/// Evaluates a program.
///
/// Evaluates a program (control stack) and returns the final state.
/// TODO: add (optional?) parameters for stdin and stdout
pub fn eval_program(program: List<SVMCell>) -> List<SVMCell> {
let mut machine = State {
stack: Stack::empty(),
env: Stack::empty(),
control: program,
dump: Stack::empty()
};
// while there are more instructions,
while machine.control.peek() != None { //TODO: this is kinda heavyweight
machine = machine.eval() // continue evaling
};
machine.stack
}
}
|
// Copyright (c) <2015> <lummax>
// Licensed under MIT (http://opensource.org/licenses/MIT)
extern crate wayland_client;
use wayland_client::client::{FromPrimitive, Display,
Registry, RegistryEventHandler,
Seat, SeatEventHandler, SeatCapability,
Shm, ShmEventHandler, ShmFormat,
Output, OutputEventHandler, OutputSubpixel,
OutputTransform, OutputMode};
use std::collections::HashMap;
#[derive(Default)]
struct SeatData {
name: String,
capabilities: Vec<String>,
}
#[derive(Default)]
struct ShmData {
formats: Vec<String>,
}
#[derive(Default)]
struct OutputData {
x: i32,
y: i32,
physical_width: i32,
physical_height: i32,
subpixel: String,
make: String,
model: String,
transform: String,
modes: Vec<OutputModeData>,
}
struct OutputModeData {
width: i32,
height: i32,
refresh: i32,
flags: String,
}
struct Info {
registry: Registry,
seat: Option<Seat>,
shm: Option<Shm>,
output: Option<Output>,
pub roundtrip: bool,
data: HashMap<String, String>,
seat_data: SeatData,
shm_data: ShmData,
output_data: OutputData,
}
impl Info {
fn new(registry: Registry) -> Info {
return Info {
registry: registry,
seat: None,
shm: None,
output: None,
roundtrip: true,
data: HashMap::new(),
seat_data: Default::default(),
shm_data: Default::default(),
output_data: Default::default(),
};
}
}
impl RegistryEventHandler for Info {
fn get_registry(&mut self) -> &mut Registry {
return &mut self.registry;
}
fn on_global(&mut self, name: u32, interface: String, version: u32) {
self.data.insert(interface.clone(),
format!("interface: '{}', version: {}, name: {}",
interface, version, name));
if interface == "wl_seat" {
self.roundtrip = true;
self.seat = self.registry.bind(name, version).ok();
SeatEventHandler::connect_dispatcher(self);
} else if interface == "wl_shm" {
self.roundtrip = true;
self.shm = self.registry.bind(name, version).ok();
ShmEventHandler::connect_dispatcher(self);
} else if interface == "wl_output" {
self.roundtrip = true;
self.output = self.registry.bind(name, version).ok();
OutputEventHandler::connect_dispatcher(self);
}
}
}
impl SeatEventHandler for Info {
fn get_seat(&mut self) -> &mut Seat {
return self.seat.as_mut().unwrap();
}
fn on_capabilities(&mut self, capabilities: u32) {
if capabilities & (SeatCapability::Pointer as u32) != 0 {
self.seat_data.capabilities.push("pointer".to_string());
}
if capabilities & (SeatCapability::Keyboard as u32) != 0 {
self.seat_data.capabilities.push("keyboard".to_string());
}
if capabilities & (SeatCapability::Touch as u32) != 0 {
self.seat_data.capabilities.push("touch".to_string());
}
}
fn on_name(&mut self, name: String) {
self.seat_data.name = name;
}
}
impl ShmEventHandler for Info {
fn get_shm(&mut self) -> &mut Shm {
return self.shm.as_mut().unwrap();
}
fn on_format(&mut self, format: u32) {
self.shm_data.formats.push(format!("{:?}", ShmFormat::from_u32(format).unwrap()));
}
}
impl OutputEventHandler for Info {
fn get_output(&mut self) -> &mut Output {
return self.output.as_mut().unwrap();
}
fn on_geometry(&mut self, x: i32, y: i32, physical_width: i32,
physical_height: i32, subpixel: i32, make: String, model:
String, transform: i32) {
println!("on_geometry({}, {}, {}, {}, {:?}, {}, {}, {:?})", x, y,
physical_width, physical_height,
OutputSubpixel::from_u32(subpixel as u32).unwrap(), make, model,
OutputTransform::from_u32(transform as u32).unwrap());
}
fn on_mode(&mut self, flags: u32, width: i32, height: i32, refresh: i32) {
println!("on_mode({}, {}, {}, {})", flags, width, height, refresh);
}
fn on_done(&mut self) {
println!("on_done()");
}
fn on_scale(&mut self, factor: i32) {
println!("on_scale({})", factor);
}
}
fn main() {
let mut display = Display::connect(None).unwrap();
let mut info = Info::new(display.get_registry().unwrap());
RegistryEventHandler::connect_dispatcher(&mut info);
while info.roundtrip {
info.roundtrip = false;
display.roundtrip();
}
}
weston-info: gather output data
// Copyright (c) <2015> <lummax>
// Licensed under MIT (http://opensource.org/licenses/MIT)
extern crate wayland_client;
use wayland_client::client::{FromPrimitive, Display,
Registry, RegistryEventHandler,
Seat, SeatEventHandler, SeatCapability,
Shm, ShmEventHandler, ShmFormat,
Output, OutputEventHandler, OutputSubpixel,
OutputTransform, OutputMode};
use std::collections::HashMap;
#[derive(Default)]
struct SeatData {
name: String,
capabilities: Vec<String>,
}
#[derive(Default)]
struct ShmData {
formats: Vec<String>,
}
#[derive(Default)]
struct OutputData {
x: i32,
y: i32,
physical_width: i32,
physical_height: i32,
subpixel: String,
make: String,
model: String,
transform: String,
modes: Vec<OutputModeData>,
}
struct OutputModeData {
width: i32,
height: i32,
refresh: i32,
flags: String,
}
struct Info {
registry: Registry,
seat: Option<Seat>,
shm: Option<Shm>,
output: Option<Output>,
pub roundtrip: bool,
data: HashMap<String, String>,
seat_data: SeatData,
shm_data: ShmData,
output_data: OutputData,
}
impl Info {
fn new(registry: Registry) -> Info {
return Info {
registry: registry,
seat: None,
shm: None,
output: None,
roundtrip: true,
data: HashMap::new(),
seat_data: Default::default(),
shm_data: Default::default(),
output_data: Default::default(),
};
}
}
impl RegistryEventHandler for Info {
fn get_registry(&mut self) -> &mut Registry {
return &mut self.registry;
}
fn on_global(&mut self, name: u32, interface: String, version: u32) {
self.data.insert(interface.clone(),
format!("interface: '{}', version: {}, name: {}",
interface, version, name));
if interface == "wl_seat" {
self.roundtrip = true;
self.seat = self.registry.bind(name, version).ok();
SeatEventHandler::connect_dispatcher(self);
} else if interface == "wl_shm" {
self.roundtrip = true;
self.shm = self.registry.bind(name, version).ok();
ShmEventHandler::connect_dispatcher(self);
} else if interface == "wl_output" {
self.roundtrip = true;
self.output = self.registry.bind(name, version).ok();
OutputEventHandler::connect_dispatcher(self);
}
}
}
impl SeatEventHandler for Info {
fn get_seat(&mut self) -> &mut Seat {
return self.seat.as_mut().unwrap();
}
fn on_capabilities(&mut self, capabilities: u32) {
if capabilities & (SeatCapability::Pointer as u32) != 0 {
self.seat_data.capabilities.push("pointer".to_string());
}
if capabilities & (SeatCapability::Keyboard as u32) != 0 {
self.seat_data.capabilities.push("keyboard".to_string());
}
if capabilities & (SeatCapability::Touch as u32) != 0 {
self.seat_data.capabilities.push("touch".to_string());
}
}
fn on_name(&mut self, name: String) {
self.seat_data.name = name;
}
}
impl ShmEventHandler for Info {
fn get_shm(&mut self) -> &mut Shm {
return self.shm.as_mut().unwrap();
}
fn on_format(&mut self, format: u32) {
self.shm_data.formats.push(format!("{:?}", ShmFormat::from_u32(format).unwrap()));
}
}
impl OutputEventHandler for Info {
fn get_output(&mut self) -> &mut Output {
return self.output.as_mut().unwrap();
}
fn on_geometry(&mut self, x: i32, y: i32, physical_width: i32,
physical_height: i32, subpixel: i32, make: String, model:
String, transform: i32) {
self.output_data.x = x;
self.output_data.y = y;
self.output_data.physical_width = physical_width;
self.output_data.physical_height = physical_height;
self.output_data.subpixel = format!("{:?}", OutputSubpixel::from_u32(subpixel as u32).unwrap());
self.output_data.make = make;
self.output_data.model = model;
self.output_data.transform = format!("{:?}", OutputTransform::from_u32(transform as u32).unwrap());
}
fn on_mode(&mut self, flags: u32, width: i32, height: i32, refresh: i32) {
let mut flags_ = Vec::new();
if flags & (OutputMode::Current as u32) != 0 {
flags_.push("current".to_string());
}
if flags & (OutputMode::Preferred as u32) != 0 {
flags_.push("preferred".to_string());
}
self.output_data.modes.push(OutputModeData {
width: width,
height: height,
refresh: refresh,
flags: flags_.connect(" "),
})
}
}
fn main() {
let mut display = Display::connect(None).unwrap();
let mut info = Info::new(display.get_registry().unwrap());
RegistryEventHandler::connect_dispatcher(&mut info);
while info.roundtrip {
info.roundtrip = false;
display.roundtrip();
}
}
|
use std::io::Read;
use errors::{Result, ErrorKind};
use types::{Tag, WireType};
use message::Message;
use byteorder::ReadBytesExt;
use byteorder::LittleEndian as LE;
/// A struct to read protocol binary files
pub struct Reader<R> {
inner: R,
len: usize,
}
impl<R: Read> Reader<R> {
/// Creates a new protocol buffer reader with the maximum len of bytes to read
pub fn from_reader(r: R, len: usize) -> Reader<R> {
Reader { inner: r, len: len }
}
/// Reads next tag, `None` if all bytes have been read
pub fn next_tag(&mut self) -> Option<Result<Tag>> {
if self.len == 0 {
None
} else {
Some(self.read_varint().map(|i| (i as u32).into()))
}
}
/// Reads next tag, `None` if all bytes have been read
pub fn next_tag_value(&mut self) -> Option<Result<u32>> {
if self.len == 0 {
None
} else {
Some(self.read_varint().map(|i| (i as u32)))
}
}
fn read_varint(&mut self) -> Result<u64> {
let mut r: u64 = 0;
let mut i = 0;
for _ in 0..10 {
self.len -= 1;
let b = self.inner.read_u8()?;
// TODO: may overflow if i == 9
r |= ((b & 0x7f) as u64) << i;
if b < 0x80 {
return Ok(r);
}
i += 7;
}
if i == 70 {
Err(ErrorKind::Varint.into())
} else {
Err(ErrorKind::Eof.into())
}
}
pub fn read_int32(&mut self) -> Result<i32> {
self.read_varint().map(|i| i as i32)
}
pub fn read_int64(&mut self) -> Result<i64> {
self.read_varint().map(|i| i as i64)
}
pub fn read_uint32(&mut self) -> Result<u32> {
self.read_varint().map(|i| i as u32)
}
pub fn read_uint64(&mut self) -> Result<u64> {
self.read_varint()
}
pub fn read_sint32(&mut self) -> Result<i32> {
unimplemented!()
}
pub fn read_sint64(&mut self) -> Result<i64> {
unimplemented!()
}
pub fn read_fixed64(&mut self) -> Result<u64> {
self.len -= 8;
self.inner.read_u64::<LE>().map_err(|e| e.into())
}
pub fn read_fixed32(&mut self) -> Result<u32> {
self.len -= 4;
self.inner.read_u32::<LE>().map_err(|e| e.into())
}
pub fn read_sfixed64(&mut self) -> Result<i64> {
self.len -= 8;
self.inner.read_i64::<LE>().map_err(|e| e.into())
}
pub fn read_sfixed32(&mut self) -> Result<i32> {
self.len -= 4;
self.inner.read_i32::<LE>().map_err(|e| e.into())
}
pub fn read_float(&mut self) -> Result<f32> {
self.len -= 4;
self.inner.read_f32::<LE>().map_err(|e| e.into())
}
pub fn read_double(&mut self) -> Result<f64> {
self.len -= 8;
self.inner.read_f64::<LE>().map_err(|e| e.into())
}
pub fn read_bool(&mut self) -> Result<bool> {
self.read_varint().map(|i| i != 0)
}
pub fn read_enum<E: From<u64>>(&mut self) -> Result<E> {
self.read_varint().map(|i| i.into())
}
pub fn read_bytes(&mut self) -> Result<Vec<u8>> {
let len = self.read_varint()? as usize;
self.len -= len;
let mut vec = Vec::with_capacity(len);
unsafe { vec.set_len(len); }
self.inner.read_exact(&mut vec[..])?;
Ok(vec)
}
pub fn read_string(&mut self) -> Result<String> {
let vec = self.read_bytes()?;
String::from_utf8(vec).map_err(|e| e.into())
}
pub fn read_packed_repeated_field(&mut self) -> Result<()> {
unimplemented!()
}
pub fn read_message<M: Message>(&mut self) -> Result<M> {
let len = self.read_varint()? as usize;
let cur_len = self.len;
self.len = len;
let msg = M::from_reader(self)?;
self.len = cur_len - len;
Ok(msg)
}
pub fn read_unknown(&mut self, wire_type: WireType) -> Result<()> {
let len = match wire_type {
WireType::Varint => return self.read_varint().map(|_| ()),
WireType::Fixed64 => 8,
WireType::LengthDelimited => {
let len = self.read_varint()? as usize;
if len == 0 { return Ok(()); }
len
},
WireType::StartGroup |
WireType::EndGroup => return Err(ErrorKind::Deprecated("group").into()),
WireType::Fixed32 => 4,
WireType::Unknown => return Err(ErrorKind::UnknownWireType.into()),
};
let mut buf = Vec::with_capacity(len);
unsafe { buf.set_len(len); }
self.inner.read_exact(&mut buf)?;
self.len -= len;
Ok(())
}
pub fn len(&self) -> usize {
self.len
}
}
#[test]
fn test_varint() {
let data: &[u8] = &[0x96, 0x01];
let mut r = Reader::from_reader(data, data.len());
assert_eq!(150, r.read_varint().unwrap());
assert!(r.next_tag().is_none());
}
#[test]
fn test_next_field() {
let data: &[u8] = &[0x08, 0x96, 0x01];
let mut r = Reader::from_reader(data, data.len());
let tag = r.next_tag().unwrap().unwrap();
assert_eq!((1, WireType::Varint), tag.unpack());
assert_eq!(150, r.read_varint().unwrap());
assert!(r.next_tag().is_none());
}
require Read instead of BufRead
use std::io::Read;
use errors::{Result, ErrorKind};
use types::{Tag, WireType};
use message::Message;
use byteorder::ReadBytesExt;
use byteorder::LittleEndian as LE;
/// A struct to read protocol binary files
pub struct Reader<R> {
inner: R,
len: usize,
}
impl<R: Read> Reader<R> {
/// Creates a new protocol buffer reader with the maximum len of bytes to read
pub fn from_reader(r: R, len: usize) -> Reader<R> {
Reader { inner: r, len: len }
}
/// Reads next tag, `None` if all bytes have been read
pub fn next_tag(&mut self) -> Option<Result<Tag>> {
if self.len == 0 {
None
} else {
Some(self.read_varint().map(|i| (i as u32).into()))
}
}
/// Reads next tag, `None` if all bytes have been read
pub fn next_tag_value(&mut self) -> Option<Result<u32>> {
if self.len == 0 {
None
} else {
Some(self.read_varint().map(|i| (i as u32)))
}
}
fn read_varint(&mut self) -> Result<u64> {
let mut r: u64 = 0;
let mut i = 0;
for _ in 0..10 {
self.len -= 1;
let b = self.inner.read_u8()?;
// TODO: may overflow if i == 9
r |= ((b & 0x7f) as u64) << i;
if b < 0x80 {
return Ok(r);
}
i += 7;
}
if i == 70 {
Err(ErrorKind::Varint.into())
} else {
Err(ErrorKind::Eof.into())
}
}
pub fn read_int32(&mut self) -> Result<i32> {
self.read_varint().map(|i| i as i32)
}
pub fn read_int64(&mut self) -> Result<i64> {
self.read_varint().map(|i| i as i64)
}
pub fn read_uint32(&mut self) -> Result<u32> {
self.read_varint().map(|i| i as u32)
}
pub fn read_uint64(&mut self) -> Result<u64> {
self.read_varint()
}
pub fn read_sint32(&mut self) -> Result<i32> {
unimplemented!()
}
pub fn read_sint64(&mut self) -> Result<i64> {
unimplemented!()
}
pub fn read_fixed64(&mut self) -> Result<u64> {
self.len -= 8;
self.inner.read_u64::<LE>().map_err(|e| e.into())
}
pub fn read_fixed32(&mut self) -> Result<u32> {
self.len -= 4;
self.inner.read_u32::<LE>().map_err(|e| e.into())
}
pub fn read_sfixed64(&mut self) -> Result<i64> {
self.len -= 8;
self.inner.read_i64::<LE>().map_err(|e| e.into())
}
pub fn read_sfixed32(&mut self) -> Result<i32> {
self.len -= 4;
self.inner.read_i32::<LE>().map_err(|e| e.into())
}
pub fn read_float(&mut self) -> Result<f32> {
self.len -= 4;
self.inner.read_f32::<LE>().map_err(|e| e.into())
}
pub fn read_double(&mut self) -> Result<f64> {
self.len -= 8;
self.inner.read_f64::<LE>().map_err(|e| e.into())
}
pub fn read_bool(&mut self) -> Result<bool> {
self.read_varint().map(|i| i != 0)
}
pub fn read_enum<E: From<u64>>(&mut self) -> Result<E> {
self.read_varint().map(|i| i.into())
}
pub fn read_bytes(&mut self) -> Result<Vec<u8>> {
let len = self.read_varint()? as usize;
self.len -= len;
let mut vec = Vec::with_capacity(len);
unsafe { vec.set_len(len); }
self.inner.read_exact(&mut vec[..])?;
Ok(vec)
}
pub fn read_string(&mut self) -> Result<String> {
let vec = self.read_bytes()?;
String::from_utf8(vec).map_err(|e| e.into())
}
pub fn read_packed_repeated_field(&mut self) -> Result<()> {
unimplemented!()
}
pub fn read_message<M: Message>(&mut self) -> Result<M> {
let len = self.read_varint()? as usize;
let cur_len = self.len;
self.len = len;
let msg = M::from_reader(self)?;
self.len = cur_len - len;
Ok(msg)
}
pub fn read_unknown(&mut self, wire_type: WireType) -> Result<()> {
match wire_type {
WireType::Varint => { self.read_varint()?; },
WireType::Fixed64 => {
self.len -= 8;
let _ = self.inner.read_exact(&mut [0; 8])?;
}
WireType::Fixed32 => {
self.len -= 4;
let _ = self.inner.read_exact(&mut [0; 4])?;
}
WireType::LengthDelimited => {
let len = self.read_varint()? as usize;
if len == 0 { return Ok(()); }
self.len -= len;
let mut buf = Vec::with_capacity(len);
unsafe { buf.set_len(len); }
self.inner.read_exact(&mut buf)?;
},
WireType::StartGroup |
WireType::EndGroup => { return Err(ErrorKind::Deprecated("group").into()); },
WireType::Unknown => { return Err(ErrorKind::UnknownWireType.into()); },
}
Ok(())
}
pub fn len(&self) -> usize {
self.len
}
}
#[test]
fn test_varint() {
let data: &[u8] = &[0x96, 0x01];
let mut r = Reader::from_reader(data, data.len());
assert_eq!(150, r.read_varint().unwrap());
assert!(r.next_tag().is_none());
}
#[test]
fn test_next_field() {
let data: &[u8] = &[0x08, 0x96, 0x01];
let mut r = Reader::from_reader(data, data.len());
let tag = r.next_tag().unwrap().unwrap();
assert_eq!((1, WireType::Varint), tag.unpack());
assert_eq!(150, r.read_varint().unwrap());
assert!(r.next_tag().is_none());
}
|
use collections::HashMap;
use std::from_str::from_str;
use std::slice;
use std::str;
use super::Error;
use super::compile::{Inst, compile};
use super::parse::parse;
use super::vm::{CaptureIndices, run};
/// Regexp is a compiled regular expression.
pub struct Regexp {
orig: ~str,
prog: Vec<Inst>,
names: Vec<Option<~str>>,
}
impl Regexp {
/// Creates a new compiled regular expression. Once compiled, it can be
/// used repeatedly to search, split or replace text in a string.
pub fn new(regex: &str) -> Result<Regexp, Error> {
let ast = try!(parse(regex));
let (insts, cap_names) = compile(ast);
Ok(Regexp {
orig: regex.to_owned(),
prog: insts,
names: cap_names,
})
}
/// Executes the VM on the string given and converts the positions
/// returned from Unicode character indices to byte indices.
fn run(&self, text: &str) -> Option<CaptureIndices> {
let locs = run(self.prog.as_slice(), text);
locs.map(|ulocs| to_byte_indices(text, ulocs))
}
/// Returns true if and only if the regexp matches the string given.
pub fn is_match(&self, text: &str) -> bool {
self.run(text).is_some()
}
/// Returns the start and end byte range of the leftmost-longest match in
/// `text`. If no match exists, then `None` is returned.
pub fn find(&self, text: &str) -> Option<(uint, uint)> {
self.run(text).map(|locs| *locs.get(0))
}
/// Iterates through each successive non-overlapping match in `text`,
/// returning the start and end byte indices with respect to `text`.
pub fn find_iter<'r>(&'r self, text: &str) -> FindMatches<'r> {
FindMatches {
re: self,
text: text.to_owned(),
last_end: 0,
last_match: 0,
}
}
/// Returns the capture groups corresponding to the leftmost-longest
/// match in `text`. Capture group `0` always corresponds to the entire
/// match. If no match is found, then `None` is returned.
pub fn captures(&self, text: &str) -> Option<Captures> {
let locs =
match self.run(text) {
None => return None,
Some(locs) => locs,
};
let &(_, e) = locs.get(0);
let max_match = text.slice(0, e).to_owned();
Some(Captures::from_locs(max_match, self.names.as_slice(), locs))
}
/// Returns an iterator over all the non-overlapping capture groups matched
/// in `text`. This is operationally the same as `find_iter` (except it
/// yields capture groups and not positions).
pub fn captures_iter<'r>(&'r self, text: &str) -> FindCaptures<'r> {
FindCaptures {
re: self,
text: text.to_owned(),
last_match: 0,
last_end: 0,
}
}
/// Returns an iterator of substrings of `text` delimited by a match
/// of the regular expression.
/// Namely, each element of the iterator corresponds to text that *isn't*
/// matched by the regular expression.
pub fn split<'r, 't>(&'r self, text: &'t str) -> RegexpSplits<'r, 't> {
RegexpSplits {
finder: self.find_iter(text),
text: text,
last: 0,
}
}
/// Returns an iterator of `limit` substrings of `text` delimited by a
/// match of the regular expression. (A `limit` of `0` will return no
/// substrings.)
/// Namely, each element of the iterator corresponds to text that *isn't*
/// matched by the regular expression.
pub fn splitn<'r, 't>(&'r self, text: &'t str, limit: uint)
-> RegexpSplitsN<'r, 't> {
RegexpSplitsN {
splits: self.split(text),
cur: 0,
limit: limit,
}
}
/// Replaces the leftmost-longest match with the replacement provided.
/// The replacement can be a regular string (where `$N` and `$name` are
/// expanded to match capture groups) or a function that takes the matches'
/// `Captures` and returns the replaced string.
///
/// If no match is found, then a copy of the string is returned unchanged.
pub fn replace<R: Replacer>(&self, text: &str, rep: R) -> ~str {
let caps =
match self.captures(text) {
None => return ~"",
Some(caps) => caps,
};
let mut new = str::with_capacity(text.len());
let (s, e) = caps.pos(0);
new.push_str(text.slice(0, s));
new.push_str(rep.replace(&caps));
new.push_str(text.slice(e, text.len()));
new
}
/// Replaces all non-overlapping matches in `text` with the
/// replacement provided. This is the same as calling `replacen` with
/// `limit` set to `0`.
pub fn replace_all<R: Replacer>(&self, text: &str, rep: R) -> ~str {
self.replacen(text, 0, rep)
}
/// Replaces at most `limit` non-overlapping matches in `text` with the
/// replacement provided. If `limit` is 0, then all non-overlapping matches
/// are replaced.
pub fn replacen<R: Replacer>
(&self, text: &str, limit: uint, rep: R) -> ~str {
let mut new = str::with_capacity(text.len());
let mut last_match = 0u;
let mut i = 0;
for cap in self.captures_iter(text) {
// It'd be nicer to use the 'take' iterator instead, but it seemed
// awkward given that '0' => no limit.
if limit > 0 && i >= limit {
break
}
i += 1;
let (s, e) = cap.pos(0);
new.push_str(text.slice(last_match, s));
new.push_str(rep.replace(&cap));
last_match = e;
}
new.push_str(text.slice(last_match, text.len()));
new
}
}
/// NoExpand can be used with `replace` and `replace_all` to do a literal
/// string replacement without expanding `$name` to their corresponding
/// capture groups.
pub struct NoExpand<'r>(pub &'r str);
/// Expands all instances of `$name` in `text` to the corresponding capture
/// group `name`.
///
/// `name` may be an integer corresponding to the index of the
/// capture group (counted by order of opening parenthesis where `0` is the
/// entire match) or it can be a name (consisting of letters, digits or
/// underscores) corresponding to a named capture group.
///
/// If `name` isn't a valid capture group (whether the name doesn't exist or
/// isn't a valid index), then it is replaced with the empty string.
///
/// To write a literal `$` use `$$`.
pub fn expand(caps: &Captures, text: &str) -> ~str {
// How evil can you get?
// FIXME: Don't use regexes for this. It's completely unnecessary.
// FIXME: Marginal improvement: get a syntax extension re! to prevent
// recompilation every time.
let re = Regexp::new(r"(^|[^$])\$(\w+)").unwrap();
re.replace_all(text, |refs: &Captures| -> ~str {
let (pre, name) = (refs.at(1), refs.at(2));
pre + match from_str::<uint>(name) {
None => caps.name(name).to_owned(),
Some(i) => caps.at(i).to_owned(),
}
})
}
/// Replacer describes types that can be used to replace matches in a string.
pub trait Replacer {
fn replace(&self, caps: &Captures) -> ~str;
}
impl<'r> Replacer for NoExpand<'r> {
fn replace(&self, _: &Captures) -> ~str {
let NoExpand(s) = *self;
s.to_owned()
}
}
impl<'r> Replacer for &'r str {
fn replace(&self, caps: &Captures) -> ~str {
expand(caps, *self)
}
}
impl<'r> Replacer for 'r |&Captures| -> ~str {
fn replace(&self, caps: &Captures) -> ~str {
(*self)(caps)
}
}
/// Yields all substrings delimited by a regular expression match.
pub struct RegexpSplits<'r, 't> {
finder: FindMatches<'r>,
text: &'t str,
last: uint,
}
impl<'r, 't> Iterator<&'t str> for RegexpSplits<'r, 't> {
fn next(&mut self) -> Option<&'t str> {
match self.finder.next() {
None => {
if self.last >= self.text.len() {
None
} else {
let s = self.text.slice(self.last, self.text.len());
self.last = self.text.len();
Some(s)
}
}
Some((s, e)) => {
let text = self.text.slice(self.last, s);
self.last = e;
Some(text)
}
}
}
}
/// Yields at most `N` substrings delimited by a regular expression match.
///
/// The last substring will be whatever remains after splitting.
pub struct RegexpSplitsN<'r, 't> {
splits: RegexpSplits<'r, 't>,
cur: uint,
limit: uint,
}
impl<'r, 't> Iterator<&'t str> for RegexpSplitsN<'r, 't> {
fn next(&mut self) -> Option<&'t str> {
if self.cur >= self.limit {
None
} else {
self.cur += 1;
if self.cur >= self.limit {
Some(self.splits.text.slice(self.splits.last,
self.splits.text.len()))
} else {
self.splits.next()
}
}
}
}
/// Captures represents a group of captured strings for a single match.
///
/// The 0th capture always corresponds to the entire match. Each subsequent
/// index corresponds to the next capture group in the regex.
/// If a capture group is named, then the matched string is *also* available
/// via the `name` method. (Note that the 0th capture is always unnamed and so
/// must be accessed with the `at` method.)
pub struct Captures {
max_match: ~str,
locs: CaptureIndices,
named: HashMap<~str, uint>,
offset: uint,
}
impl Captures {
/// Creates a new group of captures from the matched string, a list of
/// capture names and a list of locations.
fn from_locs(s: ~str, names: &[Option<~str>],
locs: CaptureIndices) -> Captures {
let mut named = HashMap::new();
for (i, name) in names.iter().enumerate() {
match name {
&None => {},
&Some(ref name) => {
named.insert(name.to_owned(), i);
}
}
}
Captures {
max_match: s,
locs: locs,
named: named,
offset: 0,
}
}
/// Adds offset to each location in the captures so that `pos` always
/// returns byte indices in the original string.
fn adjust_locations(&mut self, offset: uint) {
self.offset = offset;
}
/// Returns the matched string for the capture group `i`.
/// If `i` isn't a valid capture group, then the empty string is returned.
pub fn at<'r>(&'r self, i: uint) -> &'r str {
if i >= self.locs.len() {
return ""
}
let &(s, e) = self.locs.get(i);
self.max_match.slice(s, e)
}
/// Returns the matched string for the capture group named `name`.
/// If `name` isn't a valid capture group, then the empty string is
/// returned.
pub fn name<'r>(&'r self, name: &str) -> &'r str {
match self.named.find(&name.to_owned()) {
None => "",
Some(i) => self.at(*i),
}
}
/// Returns the start and end positions of the Nth capture group.
/// Returns `(0, 0)` if `i` is not a valid capture group.
/// The positions returned are *always* byte indices with respect to the
/// original string matched.
pub fn pos(&self, i: uint) -> (uint, uint) {
if i >= self.locs.len() {
return (0u, 0u)
}
let (s, e) = *self.locs.get(i);
(s + self.offset, e + self.offset)
}
/// Creates an iterator of all the capture groups in order of appearance
/// in the regular expression.
pub fn iter<'r>(&'r self) -> SubCaptures<'r> {
SubCaptures { idx: 0, caps: self, }
}
/// Creates an iterator of all the capture group positions in order of
/// appearance in the regular expression. Positions are byte indices
/// in terms of the original string matched.
pub fn iter_pos<'r>(&'r self) -> SubCapturesPos<'r> {
SubCapturesPos { idx: 0, caps: self, }
}
}
impl Container for Captures {
fn len(&self) -> uint {
self.locs.len()
}
}
/// An iterator over capture groups for a particular match of a regular
/// expression.
pub struct SubCaptures<'r> {
idx: uint,
caps: &'r Captures,
}
impl<'r> Iterator<&'r str> for SubCaptures<'r> {
fn next(&mut self) -> Option<&'r str> {
if self.idx < self.caps.len() {
self.idx += 1;
Some(self.caps.at(self.idx - 1))
} else {
None
}
}
}
/// An iterator over capture group positions for a particular match of a
/// regular expression.
///
/// Positions are byte indices in terms of the original string matched.
pub struct SubCapturesPos<'r> {
idx: uint,
caps: &'r Captures,
}
impl<'r> Iterator<(uint, uint)> for SubCapturesPos<'r> {
fn next(&mut self) -> Option<(uint, uint)> {
if self.idx < self.caps.len() {
self.idx += 1;
Some(self.caps.pos(self.idx - 1))
} else {
None
}
}
}
/// An iterator that yields all non-overlapping capture groups matching a
/// particular regular expression.
pub struct FindCaptures<'r> {
re: &'r Regexp,
text: ~str,
last_match: uint,
last_end: uint,
}
impl<'r> Iterator<Captures> for FindCaptures<'r> {
fn next(&mut self) -> Option<Captures> {
if self.last_end > self.text.len() {
return None
}
let caps = {
let t = self.text.slice(self.last_end, self.text.len());
self.re.captures(t)
};
match caps {
None => None,
Some(mut caps) => {
caps.adjust_locations(self.last_end);
// Don't accept empty matches immediately following a match.
// i.e., no infinite loops please.
if caps.at(0).len() == 0 && self.last_end == self.last_match {
self.last_end += 1;
return self.next()
}
self.last_end += caps.max_match.len();
self.last_match = self.last_end;
Some(caps)
}
}
}
}
/// An iterator over all non-overlapping matches for a particular string.
///
/// The iterator yields a tuple of integers corresponding to the start and end
/// of the match. The indices are byte offsets.
pub struct FindMatches<'r> {
re: &'r Regexp,
text: ~str,
last_match: uint,
last_end: uint,
}
impl<'r> Iterator<(uint, uint)> for FindMatches<'r> {
fn next(&mut self) -> Option<(uint, uint)> {
if self.last_end > self.text.len() {
return None
}
let find = {
let t = self.text.slice(self.last_end, self.text.len());
self.re.find(t)
};
match find {
None => None,
Some((mut s, mut e)) => {
s += self.last_end;
e += self.last_end;
// Don't accept empty matches immediately following a match.
// i.e., no infinite loops please.
if self.last_end == e && self.last_end == self.last_match {
self.last_end += 1;
return self.next()
}
self.last_end = e;
self.last_match = self.last_end;
Some((s, e))
}
}
}
}
fn to_byte_indices(s: &str, ulocs: CaptureIndices) -> CaptureIndices {
// FIXME: This seems incredibly slow and unfortunate and I think it can
// be removed completely.
// I wonder if there is a way to get the VM to return byte indices easily.
// Preferably if it can be done without disrupting the fact that everything
// works at the Unicode `char` granularity.
// (Maybe keep track of byte index as we move through string?)
let mut blocs = Vec::from_elem(ulocs.len(), (0u, 0u));
let biggest = ulocs.get(0).val1(); // first capture is always biggest
for (s_uloc, (bloc, _)) in s.char_indices().enumerate() {
if s_uloc > biggest {
// We can stop processing the string once we know we're done
// mapping to byte indices.
break
}
for (loci, &(suloc, euloc)) in ulocs.iter().enumerate() {
if suloc == s_uloc {
*blocs.get_mut(loci).mut0() = bloc;
}
if euloc == s_uloc {
*blocs.get_mut(loci).mut1() = bloc;
}
}
}
// We also need to make sure that ending positions that correspond to
// the character length of 's' are mapped to the byte length.
let char_len = s.char_len();
for (loci, &(suloc, euloc)) in ulocs.iter().enumerate() {
if suloc == char_len {
*blocs.get_mut(loci).mut0() = s.len();
}
if euloc == char_len {
*blocs.get_mut(loci).mut1() = s.len();
}
}
blocs
}
doco touchup
use collections::HashMap;
use std::from_str::from_str;
use std::slice;
use std::str;
use super::Error;
use super::compile::{Inst, compile};
use super::parse::parse;
use super::vm::{CaptureIndices, run};
/// Regexp is a compiled regular expression.
pub struct Regexp {
orig: ~str,
prog: Vec<Inst>,
names: Vec<Option<~str>>,
}
impl Regexp {
/// Creates a new compiled regular expression. Once compiled, it can be
/// used repeatedly to search, split or replace text in a string.
pub fn new(regex: &str) -> Result<Regexp, Error> {
let ast = try!(parse(regex));
let (insts, cap_names) = compile(ast);
Ok(Regexp {
orig: regex.to_owned(),
prog: insts,
names: cap_names,
})
}
/// Executes the VM on the string given and converts the positions
/// returned from Unicode character indices to byte indices.
fn run(&self, text: &str) -> Option<CaptureIndices> {
let locs = run(self.prog.as_slice(), text);
locs.map(|ulocs| to_byte_indices(text, ulocs))
}
/// Returns true if and only if the regexp matches the string given.
pub fn is_match(&self, text: &str) -> bool {
self.run(text).is_some()
}
/// Returns the start and end byte range of the leftmost-longest match in
/// `text`. If no match exists, then `None` is returned.
pub fn find(&self, text: &str) -> Option<(uint, uint)> {
self.run(text).map(|locs| *locs.get(0))
}
/// Iterates through each successive non-overlapping match in `text`,
/// returning the start and end byte indices with respect to `text`.
pub fn find_iter<'r>(&'r self, text: &str) -> FindMatches<'r> {
FindMatches {
re: self,
text: text.to_owned(),
last_end: 0,
last_match: 0,
}
}
/// Returns the capture groups corresponding to the leftmost-longest
/// match in `text`. Capture group `0` always corresponds to the entire
/// match. If no match is found, then `None` is returned.
pub fn captures(&self, text: &str) -> Option<Captures> {
let locs =
match self.run(text) {
None => return None,
Some(locs) => locs,
};
let &(_, e) = locs.get(0);
let max_match = text.slice(0, e).to_owned();
Some(Captures::from_locs(max_match, self.names.as_slice(), locs))
}
/// Returns an iterator over all the non-overlapping capture groups matched
/// in `text`. This is operationally the same as `find_iter` (except it
/// yields capture groups and not positions).
pub fn captures_iter<'r>(&'r self, text: &str) -> FindCaptures<'r> {
FindCaptures {
re: self,
text: text.to_owned(),
last_match: 0,
last_end: 0,
}
}
/// Returns an iterator of substrings of `text` delimited by a match
/// of the regular expression.
/// Namely, each element of the iterator corresponds to text that *isn't*
/// matched by the regular expression.
pub fn split<'r, 't>(&'r self, text: &'t str) -> RegexpSplits<'r, 't> {
RegexpSplits {
finder: self.find_iter(text),
text: text,
last: 0,
}
}
/// Returns an iterator of `limit` substrings of `text` delimited by a
/// match of the regular expression. (A `limit` of `0` will return no
/// substrings.)
/// Namely, each element of the iterator corresponds to text that *isn't*
/// matched by the regular expression.
pub fn splitn<'r, 't>(&'r self, text: &'t str, limit: uint)
-> RegexpSplitsN<'r, 't> {
RegexpSplitsN {
splits: self.split(text),
cur: 0,
limit: limit,
}
}
/// Replaces the leftmost-longest match with the replacement provided.
/// The replacement can be a regular string (where `$N` and `$name` are
/// expanded to match capture groups) or a function that takes the matches'
/// `Captures` and returns the replaced string.
///
/// If no match is found, then a copy of the string is returned unchanged.
pub fn replace<R: Replacer>(&self, text: &str, rep: R) -> ~str {
let caps =
match self.captures(text) {
None => return ~"",
Some(caps) => caps,
};
let mut new = str::with_capacity(text.len());
let (s, e) = caps.pos(0);
new.push_str(text.slice(0, s));
new.push_str(rep.replace(&caps));
new.push_str(text.slice(e, text.len()));
new
}
/// Replaces all non-overlapping matches in `text` with the
/// replacement provided. This is the same as calling `replacen` with
/// `limit` set to `0`.
pub fn replace_all<R: Replacer>(&self, text: &str, rep: R) -> ~str {
self.replacen(text, 0, rep)
}
/// Replaces at most `limit` non-overlapping matches in `text` with the
/// replacement provided. If `limit` is 0, then all non-overlapping matches
/// are replaced.
pub fn replacen<R: Replacer>
(&self, text: &str, limit: uint, rep: R) -> ~str {
let mut new = str::with_capacity(text.len());
let mut last_match = 0u;
let mut i = 0;
for cap in self.captures_iter(text) {
// It'd be nicer to use the 'take' iterator instead, but it seemed
// awkward given that '0' => no limit.
if limit > 0 && i >= limit {
break
}
i += 1;
let (s, e) = cap.pos(0);
new.push_str(text.slice(last_match, s));
new.push_str(rep.replace(&cap));
last_match = e;
}
new.push_str(text.slice(last_match, text.len()));
new
}
}
/// NoExpand indicates literal string replacement.
///
/// It can be used with `replace` and `replace_all` to do a literal
/// string replacement without expanding `$name` to their corresponding
/// capture groups.
pub struct NoExpand<'r>(pub &'r str);
/// Expands all instances of `$name` in `text` to the corresponding capture
/// group `name`.
///
/// `name` may be an integer corresponding to the index of the
/// capture group (counted by order of opening parenthesis where `0` is the
/// entire match) or it can be a name (consisting of letters, digits or
/// underscores) corresponding to a named capture group.
///
/// If `name` isn't a valid capture group (whether the name doesn't exist or
/// isn't a valid index), then it is replaced with the empty string.
///
/// To write a literal `$` use `$$`.
pub fn expand(caps: &Captures, text: &str) -> ~str {
// How evil can you get?
// FIXME: Don't use regexes for this. It's completely unnecessary.
// FIXME: Marginal improvement: get a syntax extension re! to prevent
// recompilation every time.
let re = Regexp::new(r"(^|[^$])\$(\w+)").unwrap();
re.replace_all(text, |refs: &Captures| -> ~str {
let (pre, name) = (refs.at(1), refs.at(2));
pre + match from_str::<uint>(name) {
None => caps.name(name).to_owned(),
Some(i) => caps.at(i).to_owned(),
}
})
}
/// Replacer describes types that can be used to replace matches in a string.
pub trait Replacer {
fn replace(&self, caps: &Captures) -> ~str;
}
impl<'r> Replacer for NoExpand<'r> {
fn replace(&self, _: &Captures) -> ~str {
let NoExpand(s) = *self;
s.to_owned()
}
}
impl<'r> Replacer for &'r str {
fn replace(&self, caps: &Captures) -> ~str {
expand(caps, *self)
}
}
impl<'r> Replacer for 'r |&Captures| -> ~str {
fn replace(&self, caps: &Captures) -> ~str {
(*self)(caps)
}
}
/// Yields all substrings delimited by a regular expression match.
pub struct RegexpSplits<'r, 't> {
finder: FindMatches<'r>,
text: &'t str,
last: uint,
}
impl<'r, 't> Iterator<&'t str> for RegexpSplits<'r, 't> {
fn next(&mut self) -> Option<&'t str> {
match self.finder.next() {
None => {
if self.last >= self.text.len() {
None
} else {
let s = self.text.slice(self.last, self.text.len());
self.last = self.text.len();
Some(s)
}
}
Some((s, e)) => {
let text = self.text.slice(self.last, s);
self.last = e;
Some(text)
}
}
}
}
/// Yields at most `N` substrings delimited by a regular expression match.
///
/// The last substring will be whatever remains after splitting.
pub struct RegexpSplitsN<'r, 't> {
splits: RegexpSplits<'r, 't>,
cur: uint,
limit: uint,
}
impl<'r, 't> Iterator<&'t str> for RegexpSplitsN<'r, 't> {
fn next(&mut self) -> Option<&'t str> {
if self.cur >= self.limit {
None
} else {
self.cur += 1;
if self.cur >= self.limit {
Some(self.splits.text.slice(self.splits.last,
self.splits.text.len()))
} else {
self.splits.next()
}
}
}
}
/// Captures represents a group of captured strings for a single match.
///
/// The 0th capture always corresponds to the entire match. Each subsequent
/// index corresponds to the next capture group in the regex.
/// If a capture group is named, then the matched string is *also* available
/// via the `name` method. (Note that the 0th capture is always unnamed and so
/// must be accessed with the `at` method.)
pub struct Captures {
max_match: ~str,
locs: CaptureIndices,
named: HashMap<~str, uint>,
offset: uint,
}
impl Captures {
/// Creates a new group of captures from the matched string, a list of
/// capture names and a list of locations.
fn from_locs(s: ~str, names: &[Option<~str>],
locs: CaptureIndices) -> Captures {
let mut named = HashMap::new();
for (i, name) in names.iter().enumerate() {
match name {
&None => {},
&Some(ref name) => {
named.insert(name.to_owned(), i);
}
}
}
Captures {
max_match: s,
locs: locs,
named: named,
offset: 0,
}
}
/// Adds offset to each location in the captures so that `pos` always
/// returns byte indices in the original string.
fn adjust_locations(&mut self, offset: uint) {
self.offset = offset;
}
/// Returns the matched string for the capture group `i`.
/// If `i` isn't a valid capture group, then the empty string is returned.
pub fn at<'r>(&'r self, i: uint) -> &'r str {
if i >= self.locs.len() {
return ""
}
let &(s, e) = self.locs.get(i);
self.max_match.slice(s, e)
}
/// Returns the matched string for the capture group named `name`.
/// If `name` isn't a valid capture group, then the empty string is
/// returned.
pub fn name<'r>(&'r self, name: &str) -> &'r str {
match self.named.find(&name.to_owned()) {
None => "",
Some(i) => self.at(*i),
}
}
/// Returns the start and end positions of the Nth capture group.
/// Returns `(0, 0)` if `i` is not a valid capture group.
/// The positions returned are *always* byte indices with respect to the
/// original string matched.
pub fn pos(&self, i: uint) -> (uint, uint) {
if i >= self.locs.len() {
return (0u, 0u)
}
let (s, e) = *self.locs.get(i);
(s + self.offset, e + self.offset)
}
/// Creates an iterator of all the capture groups in order of appearance
/// in the regular expression.
pub fn iter<'r>(&'r self) -> SubCaptures<'r> {
SubCaptures { idx: 0, caps: self, }
}
/// Creates an iterator of all the capture group positions in order of
/// appearance in the regular expression. Positions are byte indices
/// in terms of the original string matched.
pub fn iter_pos<'r>(&'r self) -> SubCapturesPos<'r> {
SubCapturesPos { idx: 0, caps: self, }
}
}
impl Container for Captures {
fn len(&self) -> uint {
self.locs.len()
}
}
/// An iterator over capture groups for a particular match of a regular
/// expression.
pub struct SubCaptures<'r> {
idx: uint,
caps: &'r Captures,
}
impl<'r> Iterator<&'r str> for SubCaptures<'r> {
fn next(&mut self) -> Option<&'r str> {
if self.idx < self.caps.len() {
self.idx += 1;
Some(self.caps.at(self.idx - 1))
} else {
None
}
}
}
/// An iterator over capture group positions for a particular match of a
/// regular expression.
///
/// Positions are byte indices in terms of the original string matched.
pub struct SubCapturesPos<'r> {
idx: uint,
caps: &'r Captures,
}
impl<'r> Iterator<(uint, uint)> for SubCapturesPos<'r> {
fn next(&mut self) -> Option<(uint, uint)> {
if self.idx < self.caps.len() {
self.idx += 1;
Some(self.caps.pos(self.idx - 1))
} else {
None
}
}
}
/// An iterator that yields all non-overlapping capture groups matching a
/// particular regular expression.
pub struct FindCaptures<'r> {
re: &'r Regexp,
text: ~str,
last_match: uint,
last_end: uint,
}
impl<'r> Iterator<Captures> for FindCaptures<'r> {
fn next(&mut self) -> Option<Captures> {
if self.last_end > self.text.len() {
return None
}
let caps = {
let t = self.text.slice(self.last_end, self.text.len());
self.re.captures(t)
};
match caps {
None => None,
Some(mut caps) => {
caps.adjust_locations(self.last_end);
// Don't accept empty matches immediately following a match.
// i.e., no infinite loops please.
if caps.at(0).len() == 0 && self.last_end == self.last_match {
self.last_end += 1;
return self.next()
}
self.last_end += caps.max_match.len();
self.last_match = self.last_end;
Some(caps)
}
}
}
}
/// An iterator over all non-overlapping matches for a particular string.
///
/// The iterator yields a tuple of integers corresponding to the start and end
/// of the match. The indices are byte offsets.
pub struct FindMatches<'r> {
re: &'r Regexp,
text: ~str,
last_match: uint,
last_end: uint,
}
impl<'r> Iterator<(uint, uint)> for FindMatches<'r> {
fn next(&mut self) -> Option<(uint, uint)> {
if self.last_end > self.text.len() {
return None
}
let find = {
let t = self.text.slice(self.last_end, self.text.len());
self.re.find(t)
};
match find {
None => None,
Some((mut s, mut e)) => {
s += self.last_end;
e += self.last_end;
// Don't accept empty matches immediately following a match.
// i.e., no infinite loops please.
if self.last_end == e && self.last_end == self.last_match {
self.last_end += 1;
return self.next()
}
self.last_end = e;
self.last_match = self.last_end;
Some((s, e))
}
}
}
}
fn to_byte_indices(s: &str, ulocs: CaptureIndices) -> CaptureIndices {
// FIXME: This seems incredibly slow and unfortunate and I think it can
// be removed completely.
// I wonder if there is a way to get the VM to return byte indices easily.
// Preferably if it can be done without disrupting the fact that everything
// works at the Unicode `char` granularity.
// (Maybe keep track of byte index as we move through string?)
let mut blocs = Vec::from_elem(ulocs.len(), (0u, 0u));
let biggest = ulocs.get(0).val1(); // first capture is always biggest
for (s_uloc, (bloc, _)) in s.char_indices().enumerate() {
if s_uloc > biggest {
// We can stop processing the string once we know we're done
// mapping to byte indices.
break
}
for (loci, &(suloc, euloc)) in ulocs.iter().enumerate() {
if suloc == s_uloc {
*blocs.get_mut(loci).mut0() = bloc;
}
if euloc == s_uloc {
*blocs.get_mut(loci).mut1() = bloc;
}
}
}
// We also need to make sure that ending positions that correspond to
// the character length of 's' are mapped to the byte length.
let char_len = s.char_len();
for (loci, &(suloc, euloc)) in ulocs.iter().enumerate() {
if suloc == char_len {
*blocs.get_mut(loci).mut0() = s.len();
}
if euloc == char_len {
*blocs.get_mut(loci).mut1() = s.len();
}
}
blocs
}
|
use std::collections::{BTreeMap, HashMap, VecDeque};
use std::fmt;
use std::rc::Rc;
use std::borrow::Borrow;
use serde::Serialize;
use serde_json::value::Value as Json;
use template::{BlockParam, Directive as DirectiveTemplate, HelperTemplate, Parameter, Template,
TemplateElement, TemplateMapping};
use template::TemplateElement::*;
use registry::Registry;
use context::{Context, JsonRender};
use helpers::HelperDef;
use error::RenderError;
use partial;
use output::{Output, StringOutput};
static DEFAULT_VALUE: Json = Json::Null;
/// The context of a render call
///
/// this context stores information of a render and a writer where generated
/// content is written to.
///
pub struct RenderContext<'a> {
partials: HashMap<String, Rc<Template>>,
path: String,
local_path_root: VecDeque<String>,
local_variables: HashMap<String, Json>,
local_helpers: &'a mut HashMap<String, Rc<Box<HelperDef + 'static>>>,
default_var: Json,
block_context: VecDeque<Context>,
/// the context
context: Context,
/// current template name
pub current_template: Option<String>,
/// root template name
pub root_template: Option<String>,
pub disable_escape: bool,
}
impl<'a> RenderContext<'a> {
/// Create a render context from a `Write`
pub fn new(
ctx: Context,
local_helpers: &'a mut HashMap<String, Rc<Box<HelperDef + 'static>>>,
) -> RenderContext<'a> {
RenderContext {
partials: HashMap::new(),
path: ".".to_string(),
local_path_root: VecDeque::new(),
local_variables: HashMap::new(),
local_helpers: local_helpers,
default_var: Json::Null,
block_context: VecDeque::new(),
context: ctx,
current_template: None,
root_template: None,
disable_escape: false,
}
}
pub fn derive(&mut self) -> RenderContext {
RenderContext {
partials: self.partials.clone(),
path: self.path.clone(),
local_path_root: self.local_path_root.clone(),
local_variables: self.local_variables.clone(),
current_template: self.current_template.clone(),
root_template: self.root_template.clone(),
default_var: self.default_var.clone(),
block_context: self.block_context.clone(),
disable_escape: self.disable_escape,
local_helpers: self.local_helpers,
context: self.context.clone(),
}
}
pub fn with_context(&mut self, ctx: Context) -> RenderContext {
RenderContext {
partials: self.partials.clone(),
path: ".".to_owned(),
local_path_root: VecDeque::new(),
local_variables: self.local_variables.clone(),
current_template: self.current_template.clone(),
root_template: self.root_template.clone(),
default_var: self.default_var.clone(),
block_context: VecDeque::new(),
disable_escape: self.disable_escape,
local_helpers: self.local_helpers,
context: ctx,
}
}
pub fn get_partial(&self, name: &str) -> Option<Rc<Template>> {
self.partials.get(name).map(|t| t.clone())
}
pub fn set_partial(&mut self, name: String, result: Rc<Template>) {
self.partials.insert(name, result);
}
pub fn get_path(&self) -> &String {
&self.path
}
pub fn set_path(&mut self, path: String) {
self.path = path;
}
pub fn get_local_path_root(&self) -> &VecDeque<String> {
&self.local_path_root
}
pub fn push_local_path_root(&mut self, path: String) {
self.local_path_root.push_front(path)
}
pub fn pop_local_path_root(&mut self) {
self.local_path_root.pop_front();
}
pub fn set_local_var(&mut self, name: String, value: Json) {
self.local_variables.insert(name, value);
}
pub fn clear_local_vars(&mut self) {
self.local_variables.clear();
}
pub fn promote_local_vars(&mut self) {
let mut new_map: HashMap<String, Json> = HashMap::new();
for key in self.local_variables.keys() {
let mut new_key = String::new();
new_key.push_str("@../");
new_key.push_str(&key[1..]);
let v = self.local_variables.get(key).unwrap().clone();
new_map.insert(new_key, v);
}
self.local_variables = new_map;
}
pub fn demote_local_vars(&mut self) {
let mut new_map: HashMap<String, Json> = HashMap::new();
for key in self.local_variables.keys() {
if key.starts_with("@../") {
let mut new_key = String::new();
new_key.push('@');
new_key.push_str(&key[4..]);
let v = self.local_variables.get(key).unwrap().clone();
new_map.insert(new_key, v);
}
}
self.local_variables = new_map;
}
pub fn get_local_var(&self, name: &String) -> Option<&Json> {
self.local_variables.get(name)
}
pub fn push_block_context<T>(&mut self, ctx: &T) -> Result<(), RenderError>
where
T: Serialize,
{
let r = self.block_context.push_front(Context::wraps(ctx)?);
Ok(r)
}
pub fn pop_block_context(&mut self) {
self.block_context.pop_front();
}
pub fn evaluate_in_block_context(
&self,
local_path: &str,
) -> Result<Option<&Json>, RenderError> {
for bc in self.block_context.iter() {
let v = bc.navigate(".", &self.local_path_root, local_path)?;
if v.is_some() {
return Ok(v);
}
}
Ok(None)
}
pub fn is_current_template(&self, p: &str) -> bool {
self.current_template
.as_ref()
.map(|s| s == p)
.unwrap_or(false)
}
pub fn context(&self) -> &Context {
&self.context
}
pub fn context_mut(&mut self) -> &mut Context {
&mut self.context
}
pub fn register_local_helper(
&mut self,
name: &str,
def: Box<HelperDef + 'static>,
) -> Option<Rc<Box<HelperDef + 'static>>> {
self.local_helpers.insert(name.to_string(), Rc::new(def))
}
pub fn unregister_local_helper(&mut self, name: &str) {
self.local_helpers.remove(name);
}
pub fn get_local_helper(&self, name: &str) -> Option<Rc<Box<HelperDef + 'static>>> {
self.local_helpers.get(name).map(|r| r.clone())
}
pub fn evaluate(&self, path: &str, strict: bool) -> Result<&Json, RenderError> {
let value_container =
self.context
.navigate(self.get_path(), self.get_local_path_root(), path);
if strict {
value_container
.and_then(|v| v.ok_or(RenderError::new("Value not found in strict mode.")))
} else {
value_container.map(|v| v.unwrap_or(&DEFAULT_VALUE))
}
}
pub fn evaluate_absolute(&self, path: &str, strict: bool) -> Result<&Json, RenderError> {
let value_container = self.context.navigate(".", &VecDeque::new(), path);
if strict {
value_container
.and_then(|v| v.ok_or(RenderError::new("Value not found in strict mode.")))
} else {
value_container.map(|v| v.unwrap_or(&DEFAULT_VALUE))
}
}
}
impl<'a> fmt::Debug for RenderContext<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("RenderContext")
.field("context", &self.context)
.field("path", &self.path)
.field("partials", &self.partials)
.field("local_variables", &self.local_variables)
.field("root_template", &self.root_template)
.field("current_template", &self.current_template)
.field("block_context", &self.block_context)
.field("local_path_root", &self.local_path_root)
.field("disable_eacape", &self.disable_escape)
.finish()
}
}
/// Json wrapper that holds the Json value and reference path information
///
#[derive(Debug)]
pub struct ContextJson {
path: Option<String>,
value: Json,
}
impl ContextJson {
/// Returns relative path when the value is referenced
/// If the value is from a literal, the path is `None`
pub fn path(&self) -> Option<&String> {
self.path.as_ref()
}
/// Return root level of this path if any
pub fn path_root(&self) -> Option<&str> {
self.path
.as_ref()
.and_then(|p| p.split(|c| c == '.' || c == '/').nth(0))
}
/// Returns the value
pub fn value(&self) -> &Json {
&self.value
}
}
/// Render-time Helper data when using in a helper definition
#[derive(Debug)]
pub struct Helper<'a> {
name: &'a str,
params: Vec<ContextJson>,
hash: BTreeMap<String, ContextJson>,
block_param: &'a Option<BlockParam>,
template: Option<&'a Template>,
inverse: Option<&'a Template>,
block: bool,
}
impl<'a, 'b> Helper<'a> {
fn from_template(
ht: &'a HelperTemplate,
registry: &Registry,
rc: &'b mut RenderContext,
) -> Result<Helper<'a>, RenderError> {
let mut evaluated_params = Vec::new();
for p in ht.params.iter() {
let r = try!(p.expand(registry, rc));
evaluated_params.push(r);
}
let mut evaluated_hash = BTreeMap::new();
for (k, p) in ht.hash.iter() {
let r = try!(p.expand(registry, rc));
evaluated_hash.insert(k.clone(), r);
}
Ok(Helper {
name: &ht.name,
params: evaluated_params,
hash: evaluated_hash,
block_param: &ht.block_param,
template: ht.template.as_ref(),
inverse: ht.inverse.as_ref(),
block: ht.block,
})
}
/// Returns helper name
pub fn name(&self) -> &str {
&self.name
}
/// Returns all helper params, resolved within the context
pub fn params(&self) -> &Vec<ContextJson> {
&self.params
}
/// Returns nth helper param, resolved within the context.
///
/// ## Example
///
/// To get the first param in `{{my_helper abc}}` or `{{my_helper 2}}`,
/// use `h.param(0)` in helper definition.
/// Variable `abc` is auto resolved in current context.
///
/// ```
/// use handlebars::*;
///
/// fn my_helper(h: &Helper, rc: &mut RenderContext) -> Result<(), RenderError> {
/// let v = h.param(0).map(|v| v.value()).unwrap();
/// // ..
/// Ok(())
/// }
/// ```
pub fn param(&self, idx: usize) -> Option<&ContextJson> {
self.params.get(idx)
}
/// Returns hash, resolved within the context
pub fn hash(&self) -> &BTreeMap<String, ContextJson> {
&self.hash
}
/// Return hash value of a given key, resolved within the context
///
/// ## Example
///
/// To get the first param in `{{my_helper v=abc}}` or `{{my_helper v=2}}`,
/// use `h.hash_get("v")` in helper definition.
/// Variable `abc` is auto resolved in current context.
///
/// ```
/// use handlebars::*;
///
/// fn my_helper(h: &Helper, rc: &mut RenderContext) -> Result<(), RenderError> {
/// let v = h.hash_get("v").map(|v| v.value()).unwrap();
/// // ..
/// Ok(())
/// }
/// ```
pub fn hash_get(&self, key: &str) -> Option<&ContextJson> {
self.hash.get(key)
}
/// Returns the default inner template if the helper is a block helper.
///
/// Typically you will render the template via: `template.render(registry, render_context)`
///
pub fn template(&self) -> Option<&Template> {
self.template
}
/// Returns the template of `else` branch if any
pub fn inverse(&self) -> Option<&Template> {
self.inverse
}
/// Returns if the helper is a block one `{{#helper}}{{/helper}}` or not `{{helper 123}}`
pub fn is_block(&self) -> bool {
self.block
}
/// Returns block param if any
pub fn block_param(&self) -> Option<&str> {
if let Some(BlockParam::Single(Parameter::Name(ref s))) = *self.block_param {
Some(s)
} else {
None
}
}
/// Return block param pair (for example |key, val|) if any
pub fn block_param_pair(&self) -> Option<(&str, &str)> {
if let Some(BlockParam::Pair((Parameter::Name(ref s1), Parameter::Name(ref s2)))) =
*self.block_param
{
Some((s1, s2))
} else {
None
}
}
}
/// Render-time Decorator data when using in a decorator definition
#[derive(Debug)]
pub struct Directive<'a> {
name: String,
params: Vec<ContextJson>,
hash: BTreeMap<String, ContextJson>,
template: Option<&'a Template>,
}
impl<'a, 'b> Directive<'a> {
fn from_template(
dt: &'a DirectiveTemplate,
registry: &Registry,
rc: &'b mut RenderContext,
) -> Result<Directive<'a>, RenderError> {
let name = try!(dt.name.expand_as_name(registry, rc));
let mut evaluated_params = Vec::new();
for p in dt.params.iter() {
let r = try!(p.expand(registry, rc));
evaluated_params.push(r);
}
let mut evaluated_hash = BTreeMap::new();
for (k, p) in dt.hash.iter() {
let r = try!(p.expand(registry, rc));
evaluated_hash.insert(k.clone(), r);
}
Ok(Directive {
name: name,
params: evaluated_params,
hash: evaluated_hash,
template: dt.template.as_ref(),
})
}
/// Returns helper name
pub fn name(&self) -> &str {
&self.name
}
/// Returns all helper params, resolved within the context
pub fn params(&self) -> &Vec<ContextJson> {
&self.params
}
/// Returns nth helper param, resolved within the context
pub fn param(&self, idx: usize) -> Option<&ContextJson> {
self.params.get(idx)
}
/// Returns hash, resolved within the context
pub fn hash(&self) -> &BTreeMap<String, ContextJson> {
&self.hash
}
/// Return hash value of a given key, resolved within the context
pub fn hash_get(&self, key: &str) -> Option<&ContextJson> {
self.hash.get(key)
}
/// Returns the default inner template if any
pub fn template(&self) -> Option<&Template> {
self.template
}
}
/// Render trait
pub trait Renderable {
/// render into RenderContext's `writer`
fn render(
&self,
registry: &Registry,
rc: &mut RenderContext,
out: &mut Output,
) -> Result<(), RenderError>;
/// render into string
fn renders(&self, registry: &Registry, rc: &mut RenderContext) -> Result<String, RenderError> {
let mut so = StringOutput::new();
try!(self.render(registry, rc, &mut so));
so.to_string().map_err(RenderError::from)
}
}
/// Evaluate directive or decorator
pub trait Evaluable {
fn eval(&self, registry: &Registry, rc: &mut RenderContext) -> Result<(), RenderError>;
}
fn call_helper_for_value(
hd: &Box<HelperDef>,
ht: &Helper,
registry: &Registry,
rc: &mut RenderContext,
) -> Result<ContextJson, RenderError> {
// test if helperDef has json result
if let Some(inner_value) = hd.call_inner(ht, registry, rc)? {
Ok(ContextJson {
path: None,
value: inner_value,
})
} else {
// parse value from output
let mut so = StringOutput::new();
rc.disable_escape = true;
hd.call(ht, registry, rc, &mut so)?;
rc.disable_escape = false;
let string = so.to_string().map_err(RenderError::from)?;
Ok(ContextJson {
path: None,
value: Json::String(string),
})
}
}
impl Parameter {
pub fn expand_as_name(
&self,
registry: &Registry,
rc: &mut RenderContext,
) -> Result<String, RenderError> {
match self {
&Parameter::Name(ref name) => Ok(name.to_owned()),
&Parameter::Subexpression(_) => self.expand(registry, rc).map(|v| v.value.render()),
&Parameter::Literal(ref j) => Ok(j.render()),
}
}
pub fn expand(
&self,
registry: &Registry,
rc: &mut RenderContext,
) -> Result<ContextJson, RenderError> {
match self {
&Parameter::Name(ref name) => {
let local_value = rc.get_local_var(&name);
if let Some(value) = local_value {
Ok(ContextJson {
path: Some(name.to_owned()),
value: value.clone(),
})
} else {
let block_context_value = rc.evaluate_in_block_context(name)?;
let value = if block_context_value.is_none() {
rc.evaluate(name, registry.strict_mode())?
} else {
block_context_value.unwrap()
};
Ok(ContextJson {
path: Some(name.to_owned()),
value: value.clone(),
})
}
}
&Parameter::Literal(ref j) => Ok(ContextJson {
path: None,
value: j.clone(),
}),
&Parameter::Subexpression(ref t) => match t.into_element() {
Expression(ref expr) => expr.expand(registry, rc),
HelperExpression(ref ht) => {
let helper = Helper::from_template(ht, registry, rc)?;
if let Some(ref d) = rc.get_local_helper(&ht.name) {
call_helper_for_value(d.borrow(), &helper, registry, rc)
} else {
registry
.get_helper(&ht.name)
.or(registry.get_helper(if ht.block {
"blockHelperMissing"
} else {
"helperMissing"
}))
.ok_or(RenderError::new(format!(
"Helper not defined: {:?}",
ht.name
)))
.and_then(|d| call_helper_for_value(d, &helper, registry, rc))
}
}
_ => unreachable!(),
},
}
}
}
impl Renderable for Template {
fn render(
&self,
registry: &Registry,
rc: &mut RenderContext,
out: &mut Output,
) -> Result<(), RenderError> {
rc.current_template = self.name.clone();
let iter = self.elements.iter();
let mut idx = 0;
for t in iter {
try!(t.render(registry, rc, out).map_err(|mut e| {
// add line/col number if the template has mapping data
if e.line_no.is_none() {
if let Some(ref mapping) = self.mapping {
if let Some(&TemplateMapping(line, col)) = mapping.get(idx) {
e.line_no = Some(line);
e.column_no = Some(col);
}
}
}
if e.template_name.is_none() {
e.template_name = self.name.clone();
}
e
}));
idx = idx + 1;
}
Ok(())
}
}
impl Evaluable for Template {
fn eval(&self, registry: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> {
let iter = self.elements.iter();
let mut idx = 0;
for t in iter {
try!(t.eval(registry, rc).map_err(|mut e| {
if e.line_no.is_none() {
if let Some(ref mapping) = self.mapping {
if let Some(&TemplateMapping(line, col)) = mapping.get(idx) {
e.line_no = Some(line);
e.column_no = Some(col);
}
}
}
e.template_name = self.name.clone();
e
}));
idx = idx + 1;
}
Ok(())
}
}
impl Renderable for TemplateElement {
fn render(
&self,
registry: &Registry,
rc: &mut RenderContext,
out: &mut Output,
) -> Result<(), RenderError> {
match *self {
RawString(ref v) => {
try!(out.write(v.as_ref()));
Ok(())
}
HtmlComment(ref v) => {
out.write("<!--")?;
out.write(v)?;
out.write("-->")?;
}
Expression(ref v) => {
let context_json = try!(v.expand(registry, rc));
let rendered = context_json.value.render();
let output = if !rc.disable_escape {
registry.get_escape_fn()(&rendered)
} else {
rendered
};
try!(out.write(output.as_ref()));
Ok(())
}
HTMLExpression(ref v) => {
let context_json = try!(v.expand(registry, rc));
let rendered = context_json.value.render();
try!(out.write(rendered.as_ref()));
Ok(())
}
HelperExpression(ref ht) | HelperBlock(ref ht) => {
let helper = try!(Helper::from_template(ht, registry, rc));
if let Some(ref d) = rc.get_local_helper(&ht.name) {
d.call(&helper, registry, rc, out)
} else {
registry
.get_helper(&ht.name)
.or(registry.get_helper(if ht.block {
"blockHelperMissing"
} else {
"helperMissing"
}))
.ok_or(RenderError::new(format!(
"Helper not defined: {:?}",
ht.name
)))
.and_then(|d| d.call(&helper, registry, rc, out))
}
}
DirectiveExpression(_) | DirectiveBlock(_) => self.eval(registry, rc),
PartialExpression(ref dt) | PartialBlock(ref dt) => {
Directive::from_template(dt, registry, rc)
.and_then(|di| partial::expand_partial(&di, registry, rc, out))
}
_ => Ok(()),
}
}
}
impl Evaluable for TemplateElement {
fn eval(&self, registry: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> {
match *self {
DirectiveExpression(ref dt) | DirectiveBlock(ref dt) => {
Directive::from_template(dt, registry, rc).and_then(|di| {
match registry.get_decorator(&di.name) {
Some(d) => (**d).call(&di, registry, rc),
None => Err(RenderError::new(format!(
"Directive not defined: {:?}",
dt.name
))),
}
})
}
_ => Ok(()),
}
}
}
#[test]
fn test_raw_string() {
let r = Registry::new();
let mut out = StringOutput::new();
let ctx = Context::null();
let mut hlps = HashMap::new();
{
let mut rc = RenderContext::new(ctx, &mut hlps);
let raw_string = RawString("<h1>hello world</h1>".to_string());
raw_string.render(&r, &mut rc, &mut out).ok().unwrap();
}
assert_eq!(out.to_string().unwrap(), "<h1>hello world</h1>".to_string());
}
#[test]
fn test_expression() {
let r = Registry::new();
let mut out = StringOutput::new();
let mut hlps = HashMap::new();
let mut m: HashMap<String, String> = HashMap::new();
let value = "<p></p>".to_string();
m.insert("hello".to_string(), value);
let ctx = Context::wraps(&m).unwrap();
{
let mut rc = RenderContext::new(ctx, &mut hlps);
let element = Expression(Parameter::Name("hello".into()));
element.render(&r, &mut rc, &mut out).ok().unwrap();
}
assert_eq!(out.to_string().unwrap(), "<p></p>".to_string());
}
#[test]
fn test_html_expression() {
let r = Registry::new();
let mut out = StringOutput::new();
let mut hlps = HashMap::new();
let mut m: HashMap<String, String> = HashMap::new();
let value = "world";
m.insert("hello".to_string(), value.to_string());
let ctx = Context::wraps(&m).unwrap();
{
let mut rc = RenderContext::new(ctx, &mut hlps);
let element = HTMLExpression(Parameter::Name("hello".into()));
element.render(&r, &mut rc, &mut out).ok().unwrap();
}
assert_eq!(out.to_string().unwrap(), value.to_string());
}
#[test]
fn test_template() {
let r = Registry::new();
let mut out = StringOutput::new();
let mut hlps = HashMap::new();
let mut m: HashMap<String, String> = HashMap::new();
let value = "world".to_string();
m.insert("hello".to_string(), value);
let ctx = Context::wraps(&m).unwrap();
{
let mut rc = RenderContext::new(ctx, &mut hlps);
let mut elements: Vec<TemplateElement> = Vec::new();
let e1 = RawString("<h1>".to_string());
elements.push(e1);
let e2 = Expression(Parameter::Name("hello".into()));
elements.push(e2);
let e3 = RawString("</h1>".to_string());
elements.push(e3);
let e4 = Comment("".to_string());
elements.push(e4);
let template = Template {
elements: elements,
name: None,
mapping: None,
};
template.render(&r, &mut rc, &mut out).ok().unwrap();
}
assert_eq!(out.to_string().unwrap(), "<h1>world</h1>".to_string());
}
#[test]
fn test_render_context_promotion_and_demotion() {
use context::to_json;
let ctx = Context::null();
let mut hlps = HashMap::new();
let mut render_context = RenderContext::new(ctx, &mut hlps);
render_context.set_local_var("@index".to_string(), to_json(&0));
render_context.promote_local_vars();
assert_eq!(
render_context
.get_local_var(&"@../index".to_string())
.unwrap(),
&to_json(&0)
);
render_context.demote_local_vars();
assert_eq!(
render_context.get_local_var(&"@index".to_string()).unwrap(),
&to_json(&0)
);
}
#[test]
fn test_render_subexpression() {
use ::support::str::StringWriter;
let r = Registry::new();
let mut sw = StringWriter::new();
let mut m: HashMap<String, String> = HashMap::new();
m.insert("hello".to_string(), "world".to_string());
m.insert("world".to_string(), "nice".to_string());
m.insert("const".to_string(), "truthy".to_string());
{
if let Err(e) =
r.render_template_to_write("<h1>{{#if (const)}}{{(hello)}}{{/if}}</h1>", &m, &mut sw)
{
panic!("{}", e);
}
}
assert_eq!(sw.to_string(), "<h1>world</h1>".to_string());
}
#[test]
fn test_render_subexpression_issue_115() {
use ::support::str::StringWriter;
let mut r = Registry::new();
r.register_helper(
"format",
Box::new(
|h: &Helper, _: &Registry, _: &mut RenderContext, out: &mut Output| -> Result<(), RenderError> {
out
.write(
format!("{}", h.param(0).unwrap().value().render()).as_ref())
.map(|_| ())
.map_err(RenderError::from)
},
),
);
let mut sw = StringWriter::new();
let mut m: HashMap<String, String> = HashMap::new();
m.insert("a".to_string(), "123".to_string());
{
if let Err(e) = r.render_template_to_write("{{format (format a)}}", &m, &mut sw) {
panic!("{}", e);
}
}
assert_eq!(sw.to_string(), "123".to_string());
}
#[test]
fn test_render_error_line_no() {
let mut r = Registry::new();
let m: HashMap<String, String> = HashMap::new();
let name = "invalid_template";
assert!(
r.register_template_string(name, "<h1>\n{{#if true}}\n {{#each}}{{/each}}\n{{/if}}")
.is_ok()
);
if let Err(e) = r.render(name, &m) {
assert_eq!(e.line_no.unwrap(), 3);
assert_eq!(e.column_no.unwrap(), 3);
assert_eq!(e.template_name, Some(name.to_owned()));
} else {
panic!("Error expected");
}
}
#[test]
fn test_partial_failback_render() {
let mut r = Registry::new();
assert!(
r.register_template_string("parent", "<html>{{> layout}}</html>")
.is_ok()
);
assert!(r.register_template_string(
"child",
"{{#*inline \"layout\"}}content{{/inline}}{{#> parent}}{{> seg}}{{/parent}}"
).is_ok());
assert!(r.register_template_string("seg", "1234").is_ok());
let r = r.render("child", &true).expect("should work");
assert_eq!(r, "<html>content</html>");
}
#[test]
fn test_key_with_slash() {
let mut r = Registry::new();
assert!(
r.register_template_string("t", "{{#each .}}{{@key}}: {{this}}\n{{/each}}")
.is_ok()
);
let r = r.render("t", &json!({"/foo": "bar"})).expect("should work");
assert_eq!(r, "/foo: bar\n");
}
#[test]
fn test_html_comment() {
let r = Registry::new();
assert_eq!(
r.render_template("Hello {{this}} {{! test me }}", &0)
.unwrap(),
"Hello 0 <!-- test me -->"
);
}
(feat) WIP cow renderContext
use std::collections::{BTreeMap, HashMap, VecDeque};
use std::fmt;
use std::rc::Rc;
use std::borrow::{Borrow, Cow};
use serde::Serialize;
use serde_json::value::Value as Json;
use template::{BlockParam, Directive as DirectiveTemplate, HelperTemplate, Parameter, Template,
TemplateElement, TemplateMapping};
use template::TemplateElement::*;
use registry::Registry;
use context::{Context, JsonRender};
use helpers::HelperDef;
use error::RenderError;
use partial;
use output::{Output, StringOutput};
static DEFAULT_VALUE: Json = Json::Null;
/// The context of a render call
///
/// this context stores information of a render and a writer where generated
/// content is written to.
///
pub struct RenderContext<'a> {
partials: Cow<'a, HashMap<String, Rc<Template>>>,
path: Cow<'a, String>,
local_path_root: Cow<'a, VecDeque<String>>,
local_variables: Cow<'a, HashMap<String, Json>>,
local_helpers: &'a mut HashMap<String, Rc<Box<HelperDef + 'static>>>,
default_var: Json,
block_context: Cow<'a, VecDeque<Context>>,
/// the context
context: Context,
/// current template name
pub current_template: Cow<'a, Option<String>>,
/// root template name
pub root_template: Cow<'a, Option<String>>,
pub disable_escape: bool,
}
impl<'a> RenderContext<'a> {
/// Create a render context from a `Write`
pub fn new(
ctx: Context,
local_helpers: &'a mut HashMap<String, Rc<Box<HelperDef + 'static>>>,
) -> RenderContext<'a> {
RenderContext {
partials: Cow::from(HashMap::new()),
path: Cow::from(".".to_string()),
local_path_root: Cow::from(VecDeque::new()),
local_variables: Cow::from(HashMap::new()),
local_helpers: local_helpers,
default_var: Json::Null,
block_context: Cow::from(VecDeque::new()),
context: ctx,
current_template: Cow::from(None),
root_template: Cow::from(None),
disable_escape: false,
}
}
pub fn derive(&mut self) -> RenderContext {
RenderContext {
partials: Cow::Borrowed(&*self.partials),
path: Cow::Borrowed(&*self.path),
local_path_root: Cow::Borrowed(&*self.local_path_root),
local_variables: Cow::Borrowed(&*self.local_variables),
current_template: Cow::Borrowed(&*self.current_template),
root_template: Cow::Borrowed(&*self.root_template),
default_var: self.default_var.clone(),
block_context: Cow::Borrowed(&*self.block_context),
disable_escape: self.disable_escape,
local_helpers: self.local_helpers,
context: self.context.clone(),
}
}
pub fn with_context(&mut self, ctx: Context) -> RenderContext {
RenderContext {
partials: Cow::Borrowed(&*self.partials),
path: Cow::from(".".to_owned()),
local_path_root: Cow::from(VecDeque::new()),
local_variables: Cow::Borrowed(&*self.local_variables),
current_template: Cow::Borrowed(&*self.current_template),
root_template: Cow::Borrowed(&*self.root_template),
default_var: self.default_var.clone(),
block_context: Cow::from(VecDeque::new()),
disable_escape: self.disable_escape,
local_helpers: self.local_helpers,
context: ctx,
}
}
pub fn get_partial(&self, name: &str) -> Option<Rc<Template>> {
self.partials.get(name).map(|t| t.clone())
}
pub fn set_partial(&mut self, name: String, result: Rc<Template>) {
self.partials.to_mut().insert(name, result);
}
pub fn get_path(&self) -> &String {
&self.path
}
pub fn set_path(&mut self, path: String) {
self.path = Cow::from(path)
}
pub fn get_local_path_root(&self) -> &VecDeque<String> {
&self.local_path_root
}
pub fn push_local_path_root(&mut self, path: String) {
self.local_path_root.to_mut().push_front(path)
}
pub fn pop_local_path_root(&mut self) {
self.local_path_root.to_mut().pop_front();
}
pub fn set_local_var(&mut self, name: String, value: Json) {
self.local_variables.to_mut().insert(name, value);
}
pub fn clear_local_vars(&mut self) {
self.local_variables.to_mut().clear();
}
pub fn promote_local_vars(&mut self) {
let mut new_map: HashMap<String, Json> = HashMap::new();
for key in self.local_variables.keys() {
let mut new_key = String::new();
new_key.push_str("@../");
new_key.push_str(&key[1..]);
let v = self.local_variables.get(key).unwrap().clone();
new_map.insert(new_key, v);
}
self.local_variables = new_map;
}
pub fn demote_local_vars(&mut self) {
let mut new_map: HashMap<String, Json> = HashMap::new();
for key in self.local_variables.keys() {
if key.starts_with("@../") {
let mut new_key = String::new();
new_key.push('@');
new_key.push_str(&key[4..]);
let v = self.local_variables.get(key).unwrap().clone();
new_map.insert(new_key, v);
}
}
self.local_variables = new_map;
}
pub fn get_local_var(&self, name: &String) -> Option<&Json> {
self.local_variables.get(name)
}
pub fn push_block_context<T>(&mut self, ctx: &T) -> Result<(), RenderError>
where
T: Serialize,
{
let r = self.block_context.push_front(Context::wraps(ctx)?);
Ok(r)
}
pub fn pop_block_context(&mut self) {
self.block_context.pop_front();
}
pub fn evaluate_in_block_context(
&self,
local_path: &str,
) -> Result<Option<&Json>, RenderError> {
for bc in self.block_context.iter() {
let v = bc.navigate(".", &self.local_path_root, local_path)?;
if v.is_some() {
return Ok(v);
}
}
Ok(None)
}
pub fn is_current_template(&self, p: &str) -> bool {
self.current_template
.as_ref()
.map(|s| s == p)
.unwrap_or(false)
}
pub fn context(&self) -> &Context {
&self.context
}
pub fn context_mut(&mut self) -> &mut Context {
&mut self.context
}
pub fn register_local_helper(
&mut self,
name: &str,
def: Box<HelperDef + 'static>,
) -> Option<Rc<Box<HelperDef + 'static>>> {
self.local_helpers.insert(name.to_string(), Rc::new(def))
}
pub fn unregister_local_helper(&mut self, name: &str) {
self.local_helpers.remove(name);
}
pub fn get_local_helper(&self, name: &str) -> Option<Rc<Box<HelperDef + 'static>>> {
self.local_helpers.get(name).map(|r| r.clone())
}
pub fn evaluate(&self, path: &str, strict: bool) -> Result<&Json, RenderError> {
let value_container =
self.context
.navigate(self.get_path(), self.get_local_path_root(), path);
if strict {
value_container
.and_then(|v| v.ok_or(RenderError::new("Value not found in strict mode.")))
} else {
value_container.map(|v| v.unwrap_or(&DEFAULT_VALUE))
}
}
pub fn evaluate_absolute(&self, path: &str, strict: bool) -> Result<&Json, RenderError> {
let value_container = self.context.navigate(".", &VecDeque::new(), path);
if strict {
value_container
.and_then(|v| v.ok_or(RenderError::new("Value not found in strict mode.")))
} else {
value_container.map(|v| v.unwrap_or(&DEFAULT_VALUE))
}
}
}
impl<'a> fmt::Debug for RenderContext<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("RenderContext")
.field("context", &self.context)
.field("path", &self.path)
.field("partials", &self.partials)
.field("local_variables", &self.local_variables)
.field("root_template", &self.root_template)
.field("current_template", &self.current_template)
.field("block_context", &self.block_context)
.field("local_path_root", &self.local_path_root)
.field("disable_eacape", &self.disable_escape)
.finish()
}
}
/// Json wrapper that holds the Json value and reference path information
///
#[derive(Debug)]
pub struct ContextJson {
path: Option<String>,
value: Json,
}
impl ContextJson {
/// Returns relative path when the value is referenced
/// If the value is from a literal, the path is `None`
pub fn path(&self) -> Option<&String> {
self.path.as_ref()
}
/// Return root level of this path if any
pub fn path_root(&self) -> Option<&str> {
self.path
.as_ref()
.and_then(|p| p.split(|c| c == '.' || c == '/').nth(0))
}
/// Returns the value
pub fn value(&self) -> &Json {
&self.value
}
}
/// Render-time Helper data when using in a helper definition
#[derive(Debug)]
pub struct Helper<'a> {
name: &'a str,
params: Vec<ContextJson>,
hash: BTreeMap<String, ContextJson>,
block_param: &'a Option<BlockParam>,
template: Option<&'a Template>,
inverse: Option<&'a Template>,
block: bool,
}
impl<'a, 'b> Helper<'a> {
fn from_template(
ht: &'a HelperTemplate,
registry: &Registry,
rc: &'b mut RenderContext,
) -> Result<Helper<'a>, RenderError> {
let mut evaluated_params = Vec::new();
for p in ht.params.iter() {
let r = try!(p.expand(registry, rc));
evaluated_params.push(r);
}
let mut evaluated_hash = BTreeMap::new();
for (k, p) in ht.hash.iter() {
let r = try!(p.expand(registry, rc));
evaluated_hash.insert(k.clone(), r);
}
Ok(Helper {
name: &ht.name,
params: evaluated_params,
hash: evaluated_hash,
block_param: &ht.block_param,
template: ht.template.as_ref(),
inverse: ht.inverse.as_ref(),
block: ht.block,
})
}
/// Returns helper name
pub fn name(&self) -> &str {
&self.name
}
/// Returns all helper params, resolved within the context
pub fn params(&self) -> &Vec<ContextJson> {
&self.params
}
/// Returns nth helper param, resolved within the context.
///
/// ## Example
///
/// To get the first param in `{{my_helper abc}}` or `{{my_helper 2}}`,
/// use `h.param(0)` in helper definition.
/// Variable `abc` is auto resolved in current context.
///
/// ```
/// use handlebars::*;
///
/// fn my_helper(h: &Helper, rc: &mut RenderContext) -> Result<(), RenderError> {
/// let v = h.param(0).map(|v| v.value()).unwrap();
/// // ..
/// Ok(())
/// }
/// ```
pub fn param(&self, idx: usize) -> Option<&ContextJson> {
self.params.get(idx)
}
/// Returns hash, resolved within the context
pub fn hash(&self) -> &BTreeMap<String, ContextJson> {
&self.hash
}
/// Return hash value of a given key, resolved within the context
///
/// ## Example
///
/// To get the first param in `{{my_helper v=abc}}` or `{{my_helper v=2}}`,
/// use `h.hash_get("v")` in helper definition.
/// Variable `abc` is auto resolved in current context.
///
/// ```
/// use handlebars::*;
///
/// fn my_helper(h: &Helper, rc: &mut RenderContext) -> Result<(), RenderError> {
/// let v = h.hash_get("v").map(|v| v.value()).unwrap();
/// // ..
/// Ok(())
/// }
/// ```
pub fn hash_get(&self, key: &str) -> Option<&ContextJson> {
self.hash.get(key)
}
/// Returns the default inner template if the helper is a block helper.
///
/// Typically you will render the template via: `template.render(registry, render_context)`
///
pub fn template(&self) -> Option<&Template> {
self.template
}
/// Returns the template of `else` branch if any
pub fn inverse(&self) -> Option<&Template> {
self.inverse
}
/// Returns if the helper is a block one `{{#helper}}{{/helper}}` or not `{{helper 123}}`
pub fn is_block(&self) -> bool {
self.block
}
/// Returns block param if any
pub fn block_param(&self) -> Option<&str> {
if let Some(BlockParam::Single(Parameter::Name(ref s))) = *self.block_param {
Some(s)
} else {
None
}
}
/// Return block param pair (for example |key, val|) if any
pub fn block_param_pair(&self) -> Option<(&str, &str)> {
if let Some(BlockParam::Pair((Parameter::Name(ref s1), Parameter::Name(ref s2)))) =
*self.block_param
{
Some((s1, s2))
} else {
None
}
}
}
/// Render-time Decorator data when using in a decorator definition
#[derive(Debug)]
pub struct Directive<'a> {
name: String,
params: Vec<ContextJson>,
hash: BTreeMap<String, ContextJson>,
template: Option<&'a Template>,
}
impl<'a, 'b> Directive<'a> {
fn from_template(
dt: &'a DirectiveTemplate,
registry: &Registry,
rc: &'b mut RenderContext,
) -> Result<Directive<'a>, RenderError> {
let name = try!(dt.name.expand_as_name(registry, rc));
let mut evaluated_params = Vec::new();
for p in dt.params.iter() {
let r = try!(p.expand(registry, rc));
evaluated_params.push(r);
}
let mut evaluated_hash = BTreeMap::new();
for (k, p) in dt.hash.iter() {
let r = try!(p.expand(registry, rc));
evaluated_hash.insert(k.clone(), r);
}
Ok(Directive {
name: name,
params: evaluated_params,
hash: evaluated_hash,
template: dt.template.as_ref(),
})
}
/// Returns helper name
pub fn name(&self) -> &str {
&self.name
}
/// Returns all helper params, resolved within the context
pub fn params(&self) -> &Vec<ContextJson> {
&self.params
}
/// Returns nth helper param, resolved within the context
pub fn param(&self, idx: usize) -> Option<&ContextJson> {
self.params.get(idx)
}
/// Returns hash, resolved within the context
pub fn hash(&self) -> &BTreeMap<String, ContextJson> {
&self.hash
}
/// Return hash value of a given key, resolved within the context
pub fn hash_get(&self, key: &str) -> Option<&ContextJson> {
self.hash.get(key)
}
/// Returns the default inner template if any
pub fn template(&self) -> Option<&Template> {
self.template
}
}
/// Render trait
pub trait Renderable {
/// render into RenderContext's `writer`
fn render(
&self,
registry: &Registry,
rc: &mut RenderContext,
out: &mut Output,
) -> Result<(), RenderError>;
/// render into string
fn renders(&self, registry: &Registry, rc: &mut RenderContext) -> Result<String, RenderError> {
let mut so = StringOutput::new();
try!(self.render(registry, rc, &mut so));
so.to_string().map_err(RenderError::from)
}
}
/// Evaluate directive or decorator
pub trait Evaluable {
fn eval(&self, registry: &Registry, rc: &mut RenderContext) -> Result<(), RenderError>;
}
fn call_helper_for_value(
hd: &Box<HelperDef>,
ht: &Helper,
registry: &Registry,
rc: &mut RenderContext,
) -> Result<ContextJson, RenderError> {
// test if helperDef has json result
if let Some(inner_value) = hd.call_inner(ht, registry, rc)? {
Ok(ContextJson {
path: None,
value: inner_value,
})
} else {
// parse value from output
let mut so = StringOutput::new();
rc.disable_escape = true;
hd.call(ht, registry, rc, &mut so)?;
rc.disable_escape = false;
let string = so.to_string().map_err(RenderError::from)?;
Ok(ContextJson {
path: None,
value: Json::String(string),
})
}
}
impl Parameter {
pub fn expand_as_name(
&self,
registry: &Registry,
rc: &mut RenderContext,
) -> Result<String, RenderError> {
match self {
&Parameter::Name(ref name) => Ok(name.to_owned()),
&Parameter::Subexpression(_) => self.expand(registry, rc).map(|v| v.value.render()),
&Parameter::Literal(ref j) => Ok(j.render()),
}
}
pub fn expand(
&self,
registry: &Registry,
rc: &mut RenderContext,
) -> Result<ContextJson, RenderError> {
match self {
&Parameter::Name(ref name) => {
let local_value = rc.get_local_var(&name);
if let Some(value) = local_value {
Ok(ContextJson {
path: Some(name.to_owned()),
value: value.clone(),
})
} else {
let block_context_value = rc.evaluate_in_block_context(name)?;
let value = if block_context_value.is_none() {
rc.evaluate(name, registry.strict_mode())?
} else {
block_context_value.unwrap()
};
Ok(ContextJson {
path: Some(name.to_owned()),
value: value.clone(),
})
}
}
&Parameter::Literal(ref j) => Ok(ContextJson {
path: None,
value: j.clone(),
}),
&Parameter::Subexpression(ref t) => match t.into_element() {
Expression(ref expr) => expr.expand(registry, rc),
HelperExpression(ref ht) => {
let helper = Helper::from_template(ht, registry, rc)?;
if let Some(ref d) = rc.get_local_helper(&ht.name) {
call_helper_for_value(d.borrow(), &helper, registry, rc)
} else {
registry
.get_helper(&ht.name)
.or(registry.get_helper(if ht.block {
"blockHelperMissing"
} else {
"helperMissing"
}))
.ok_or(RenderError::new(format!(
"Helper not defined: {:?}",
ht.name
)))
.and_then(|d| call_helper_for_value(d, &helper, registry, rc))
}
}
_ => unreachable!(),
},
}
}
}
impl Renderable for Template {
fn render(
&self,
registry: &Registry,
rc: &mut RenderContext,
out: &mut Output,
) -> Result<(), RenderError> {
rc.current_template = self.name.clone();
let iter = self.elements.iter();
let mut idx = 0;
for t in iter {
try!(t.render(registry, rc, out).map_err(|mut e| {
// add line/col number if the template has mapping data
if e.line_no.is_none() {
if let Some(ref mapping) = self.mapping {
if let Some(&TemplateMapping(line, col)) = mapping.get(idx) {
e.line_no = Some(line);
e.column_no = Some(col);
}
}
}
if e.template_name.is_none() {
e.template_name = self.name.clone();
}
e
}));
idx = idx + 1;
}
Ok(())
}
}
impl Evaluable for Template {
fn eval(&self, registry: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> {
let iter = self.elements.iter();
let mut idx = 0;
for t in iter {
try!(t.eval(registry, rc).map_err(|mut e| {
if e.line_no.is_none() {
if let Some(ref mapping) = self.mapping {
if let Some(&TemplateMapping(line, col)) = mapping.get(idx) {
e.line_no = Some(line);
e.column_no = Some(col);
}
}
}
e.template_name = self.name.clone();
e
}));
idx = idx + 1;
}
Ok(())
}
}
impl Renderable for TemplateElement {
fn render(
&self,
registry: &Registry,
rc: &mut RenderContext,
out: &mut Output,
) -> Result<(), RenderError> {
match *self {
RawString(ref v) => {
try!(out.write(v.as_ref()));
Ok(())
}
HtmlComment(ref v) => {
out.write("<!--")?;
out.write(v)?;
out.write("-->")?;
}
Expression(ref v) => {
let context_json = try!(v.expand(registry, rc));
let rendered = context_json.value.render();
let output = if !rc.disable_escape {
registry.get_escape_fn()(&rendered)
} else {
rendered
};
try!(out.write(output.as_ref()));
Ok(())
}
HTMLExpression(ref v) => {
let context_json = try!(v.expand(registry, rc));
let rendered = context_json.value.render();
try!(out.write(rendered.as_ref()));
Ok(())
}
HelperExpression(ref ht) | HelperBlock(ref ht) => {
let helper = try!(Helper::from_template(ht, registry, rc));
if let Some(ref d) = rc.get_local_helper(&ht.name) {
d.call(&helper, registry, rc, out)
} else {
registry
.get_helper(&ht.name)
.or(registry.get_helper(if ht.block {
"blockHelperMissing"
} else {
"helperMissing"
}))
.ok_or(RenderError::new(format!(
"Helper not defined: {:?}",
ht.name
)))
.and_then(|d| d.call(&helper, registry, rc, out))
}
}
DirectiveExpression(_) | DirectiveBlock(_) => self.eval(registry, rc),
PartialExpression(ref dt) | PartialBlock(ref dt) => {
Directive::from_template(dt, registry, rc)
.and_then(|di| partial::expand_partial(&di, registry, rc, out))
}
_ => Ok(()),
}
}
}
impl Evaluable for TemplateElement {
fn eval(&self, registry: &Registry, rc: &mut RenderContext) -> Result<(), RenderError> {
match *self {
DirectiveExpression(ref dt) | DirectiveBlock(ref dt) => {
Directive::from_template(dt, registry, rc).and_then(|di| {
match registry.get_decorator(&di.name) {
Some(d) => (**d).call(&di, registry, rc),
None => Err(RenderError::new(format!(
"Directive not defined: {:?}",
dt.name
))),
}
})
}
_ => Ok(()),
}
}
}
#[test]
fn test_raw_string() {
let r = Registry::new();
let mut out = StringOutput::new();
let ctx = Context::null();
let mut hlps = HashMap::new();
{
let mut rc = RenderContext::new(ctx, &mut hlps);
let raw_string = RawString("<h1>hello world</h1>".to_string());
raw_string.render(&r, &mut rc, &mut out).ok().unwrap();
}
assert_eq!(out.to_string().unwrap(), "<h1>hello world</h1>".to_string());
}
#[test]
fn test_expression() {
let r = Registry::new();
let mut out = StringOutput::new();
let mut hlps = HashMap::new();
let mut m: HashMap<String, String> = HashMap::new();
let value = "<p></p>".to_string();
m.insert("hello".to_string(), value);
let ctx = Context::wraps(&m).unwrap();
{
let mut rc = RenderContext::new(ctx, &mut hlps);
let element = Expression(Parameter::Name("hello".into()));
element.render(&r, &mut rc, &mut out).ok().unwrap();
}
assert_eq!(out.to_string().unwrap(), "<p></p>".to_string());
}
#[test]
fn test_html_expression() {
let r = Registry::new();
let mut out = StringOutput::new();
let mut hlps = HashMap::new();
let mut m: HashMap<String, String> = HashMap::new();
let value = "world";
m.insert("hello".to_string(), value.to_string());
let ctx = Context::wraps(&m).unwrap();
{
let mut rc = RenderContext::new(ctx, &mut hlps);
let element = HTMLExpression(Parameter::Name("hello".into()));
element.render(&r, &mut rc, &mut out).ok().unwrap();
}
assert_eq!(out.to_string().unwrap(), value.to_string());
}
#[test]
fn test_template() {
let r = Registry::new();
let mut out = StringOutput::new();
let mut hlps = HashMap::new();
let mut m: HashMap<String, String> = HashMap::new();
let value = "world".to_string();
m.insert("hello".to_string(), value);
let ctx = Context::wraps(&m).unwrap();
{
let mut rc = RenderContext::new(ctx, &mut hlps);
let mut elements: Vec<TemplateElement> = Vec::new();
let e1 = RawString("<h1>".to_string());
elements.push(e1);
let e2 = Expression(Parameter::Name("hello".into()));
elements.push(e2);
let e3 = RawString("</h1>".to_string());
elements.push(e3);
let e4 = Comment("".to_string());
elements.push(e4);
let template = Template {
elements: elements,
name: None,
mapping: None,
};
template.render(&r, &mut rc, &mut out).ok().unwrap();
}
assert_eq!(out.to_string().unwrap(), "<h1>world</h1>".to_string());
}
#[test]
fn test_render_context_promotion_and_demotion() {
use context::to_json;
let ctx = Context::null();
let mut hlps = HashMap::new();
let mut render_context = RenderContext::new(ctx, &mut hlps);
render_context.set_local_var("@index".to_string(), to_json(&0));
render_context.promote_local_vars();
assert_eq!(
render_context
.get_local_var(&"@../index".to_string())
.unwrap(),
&to_json(&0)
);
render_context.demote_local_vars();
assert_eq!(
render_context.get_local_var(&"@index".to_string()).unwrap(),
&to_json(&0)
);
}
#[test]
fn test_render_subexpression() {
use ::support::str::StringWriter;
let r = Registry::new();
let mut sw = StringWriter::new();
let mut m: HashMap<String, String> = HashMap::new();
m.insert("hello".to_string(), "world".to_string());
m.insert("world".to_string(), "nice".to_string());
m.insert("const".to_string(), "truthy".to_string());
{
if let Err(e) =
r.render_template_to_write("<h1>{{#if (const)}}{{(hello)}}{{/if}}</h1>", &m, &mut sw)
{
panic!("{}", e);
}
}
assert_eq!(sw.to_string(), "<h1>world</h1>".to_string());
}
#[test]
fn test_render_subexpression_issue_115() {
use ::support::str::StringWriter;
let mut r = Registry::new();
r.register_helper(
"format",
Box::new(
|h: &Helper, _: &Registry, _: &mut RenderContext, out: &mut Output| -> Result<(), RenderError> {
out
.write(
format!("{}", h.param(0).unwrap().value().render()).as_ref())
.map(|_| ())
.map_err(RenderError::from)
},
),
);
let mut sw = StringWriter::new();
let mut m: HashMap<String, String> = HashMap::new();
m.insert("a".to_string(), "123".to_string());
{
if let Err(e) = r.render_template_to_write("{{format (format a)}}", &m, &mut sw) {
panic!("{}", e);
}
}
assert_eq!(sw.to_string(), "123".to_string());
}
#[test]
fn test_render_error_line_no() {
let mut r = Registry::new();
let m: HashMap<String, String> = HashMap::new();
let name = "invalid_template";
assert!(
r.register_template_string(name, "<h1>\n{{#if true}}\n {{#each}}{{/each}}\n{{/if}}")
.is_ok()
);
if let Err(e) = r.render(name, &m) {
assert_eq!(e.line_no.unwrap(), 3);
assert_eq!(e.column_no.unwrap(), 3);
assert_eq!(e.template_name, Some(name.to_owned()));
} else {
panic!("Error expected");
}
}
#[test]
fn test_partial_failback_render() {
let mut r = Registry::new();
assert!(
r.register_template_string("parent", "<html>{{> layout}}</html>")
.is_ok()
);
assert!(r.register_template_string(
"child",
"{{#*inline \"layout\"}}content{{/inline}}{{#> parent}}{{> seg}}{{/parent}}"
).is_ok());
assert!(r.register_template_string("seg", "1234").is_ok());
let r = r.render("child", &true).expect("should work");
assert_eq!(r, "<html>content</html>");
}
#[test]
fn test_key_with_slash() {
let mut r = Registry::new();
assert!(
r.register_template_string("t", "{{#each .}}{{@key}}: {{this}}\n{{/each}}")
.is_ok()
);
let r = r.render("t", &json!({"/foo": "bar"})).expect("should work");
assert_eq!(r, "/foo: bar\n");
}
#[test]
fn test_html_comment() {
let r = Registry::new();
assert_eq!(
r.render_template("Hello {{this}} {{! test me }}", &0)
.unwrap(),
"Hello 0 <!-- test me -->"
);
}
|
use std::{ mem, slice, string };
use handle::Owned;
use raw;
use raw::{ IV, UV, NV };
use raw::{ SVt_PVAV, SVt_PVHV, SVt_PVCV, SVt_PVGV };
use array::{ AV };
use convert::{ IntoSV, FromSV };
pub struct SV(Owned<raw::SV>);
impl SV {
method! {
/// Return true if SV is a real scalar value.
simple fn is_scalar() -> bool = sv_type() < SVt_PVAV
}
method! {
/// Return true if SV contains array.
simple fn is_array() -> bool = sv_type() == SVt_PVAV
}
method! {
/// Return true if SV contains hash.
simple fn is_hash() -> bool = sv_type() == SVt_PVHV
}
method! {
/// Return true if SV contains subroutine.
simple fn is_code() -> bool = sv_type() == SVt_PVCV
}
method! {
/// Return true if SV contains glob.
simple fn is_glob() -> bool = sv_type() == SVt_PVGV
}
method! {
/// Return true if SV contains a signed integer.
///
/// Perl macro: ['SvIOK'](http://perldoc.perl.org/perlapi.html#SvIOK).
simple fn iv_ok() -> bool = sv_iok() != 0
}
method! {
/// Coerce the given SV to an integer and return it.
///
/// Perl macro: [`SvIV`](http://perldoc.perl.org/perlapi.html#SvIV).
simple fn iv() -> IV = sv_iv()
}
method! {
/// Return true if SV contains a unsigned integer.
///
/// Perl macro: ['SvUOK'](http://perldoc.perl.org/perlapi.html#SvUOK).
simple fn uv_ok() -> bool = sv_uok() != 0
}
method! {
/// Coerce the given SV to an unsigned integer and return it.
///
/// Perl macro: [`SvUV`](http://perldoc.perl.org/perlapi.html#SvUV).
simple fn uv() -> UV = sv_uv()
}
method! {
/// Return true if SV contains a floating point value.
///
/// Perl macro: ['SvNOK'](http://perldoc.perl.org/perlapi.html#SvNOK).
simple fn nv_ok() -> bool = sv_nok() != 0
}
method! {
/// Coerce the given SV to a floating point value and return it.
///
/// Perl macro: [`SvNV`](http://perldoc.perl.org/perlapi.html#SvNV).
simple fn nv() -> NV = sv_nv()
}
method! {
/// Return true if SV contains a string.
///
/// Perl macro: ['SvPOK'](http://perldoc.perl.org/perlapi.html#SvPOK).
simple fn pv_ok() -> bool = sv_pok() != 0
}
method! {
/// Return UTF8 flag on the SV.
///
/// You should use this after a call to `pv()` or `str()`, in case any call to string
/// overloading updates the internal flag.
///
/// Perl macro: [`SvUTF8`](http://perldoc.perl.org/perlapi.html#SvUTF8).
simple fn utf8() -> bool = sv_utf8() != 0
}
/// Return a copy of string in the SV as vector of bytes.
///
/// Perl macro: [`SvPV`](http://perldoc.perl.org/perlapi.html#SvPV).
pub fn pv(&self) -> Vec<u8> {
unsafe {
let mut len = 0;
let ptr = self.pthx().sv_pv(self.as_ptr(), &mut len);
slice::from_raw_parts(ptr as *const u8, len as usize).to_owned()
}
}
/// Return a copy of string in the SV.
///
/// Perl macro: [`SvPV`](http://perldoc.perl.org/perlapi.html#SvPV).
pub fn str(&self) -> Result<String, string::FromUtf8Error> {
String::from_utf8(self.pv())
}
method! {
/// Return true if SV contains a Perl reference.
///
/// Perl macro: ['SvROK'](http://perldoc.perl.org/perlapi.html#SvROK).
simple fn rv_ok() -> bool = sv_rok() != 0
}
unsafe fn deref_raw(&self) -> *mut raw::SV {
self.pthx().sv_rv(self.as_ptr())
}
/// Dereference RV.
///
/// Return `None` if `self` is not a valid Perl reference.
pub fn deref(&self) -> Option<SV> {
if self.rv_ok() {
Some(unsafe { SV::from_raw_borrowed(self.pthx(), self.deref_raw()) })
} else {
None
}
}
/// Cast SV into AV.
pub fn as_av(self) -> Option<AV> {
if self.is_array() {
Some(unsafe { AV::from_raw_owned(self.pthx(), self.into_raw() as *mut _) })
} else {
None
}
}
/// Consume SV and convert into raw pointer.
///
/// Does not decrement reference count. Returned pointer must be correctly disposed of to avoid
/// memory leaks.
pub fn into_raw(self) -> *mut raw::SV {
let raw = self.0.as_ptr();
mem::forget(self);
raw
}
/// Construct new instance from a raw SV pointer without incrementing reference counter.
///
/// Owned SV pointers are returned by assorted
/// [`newSV`](http://perldoc.perl.org/perlapi.html#newSV) functions.
pub unsafe fn from_raw_owned(pthx: raw::Interpreter, raw: *mut raw::SV) -> SV {
SV(Owned::from_raw_owned(pthx, raw))
}
/// Construct new instance from a raw SV pointer and increment reference counter.
///
/// Borrowed SV pointers exist on stack and are returned by functions like
/// ['av_fetch`](http://perldoc.perl.org/perlapi.html#av_fetch) or
/// ['hv_fetch'](http://perldoc.perl.org/perlapi.html#av_fetch).
pub unsafe fn from_raw_borrowed(pthx: raw::Interpreter, raw: *mut raw::SV) -> SV {
SV(Owned::from_raw_borrowed(pthx, raw))
}
fn pthx(&self) -> raw::Interpreter { self.0.pthx() }
fn as_ptr(&self) -> *mut raw::SV { self.0.as_ptr() }
}
impl FromSV for IV {
unsafe fn from_sv(pthx: raw::Interpreter, raw: *mut raw::SV) -> IV {
pthx.sv_iv(raw)
}
}
impl FromSV for SV {
unsafe fn from_sv(pthx: raw::Interpreter, raw: *mut raw::SV) -> SV {
SV::from_raw_borrowed(pthx, raw)
}
}
impl IntoSV for IV {
fn into_sv(self, pthx: raw::Interpreter) -> SV {
unsafe { SV::from_raw_owned(pthx, pthx.new_sv_iv(self)) }
}
}
impl IntoSV for UV {
fn into_sv(self, pthx: raw::Interpreter) -> SV {
unsafe { SV::from_raw_owned(pthx, pthx.new_sv_uv(self)) }
}
}
impl IntoSV for NV {
fn into_sv(self, pthx: raw::Interpreter) -> SV {
unsafe { SV::from_raw_owned(pthx, pthx.new_sv_nv(self)) }
}
}
impl IntoSV for bool {
fn into_sv(self, pthx: raw::Interpreter) -> SV {
unsafe {
let raw = if self { pthx.sv_yes() } else { pthx.sv_no() };
SV::from_raw_owned(pthx, raw)
}
}
}
impl<'a> IntoSV for &'a str {
fn into_sv(self, pthx: raw::Interpreter) -> SV {
unsafe {
let svp = pthx.new_sv_pvn(self.as_ptr() as *const i8,
self.len() as raw::STRLEN,
raw::SVf_UTF8 as raw::U32);
SV::from_raw_owned(pthx, svp)
}
}
}
Fix docstring.
use std::{ mem, slice, string };
use handle::Owned;
use raw;
use raw::{ IV, UV, NV };
use raw::{ SVt_PVAV, SVt_PVHV, SVt_PVCV, SVt_PVGV };
use array::{ AV };
use convert::{ IntoSV, FromSV };
pub struct SV(Owned<raw::SV>);
impl SV {
method! {
/// Return true if SV is a real scalar value.
simple fn is_scalar() -> bool = sv_type() < SVt_PVAV
}
method! {
/// Return true if SV contains array.
simple fn is_array() -> bool = sv_type() == SVt_PVAV
}
method! {
/// Return true if SV contains hash.
simple fn is_hash() -> bool = sv_type() == SVt_PVHV
}
method! {
/// Return true if SV contains subroutine.
simple fn is_code() -> bool = sv_type() == SVt_PVCV
}
method! {
/// Return true if SV contains glob.
simple fn is_glob() -> bool = sv_type() == SVt_PVGV
}
method! {
/// Return true if SV contains a signed integer.
///
/// Perl macro: ['SvIOK'](http://perldoc.perl.org/perlapi.html#SvIOK).
simple fn iv_ok() -> bool = sv_iok() != 0
}
method! {
/// Coerce the given SV to an integer and return it.
///
/// Perl macro: [`SvIV`](http://perldoc.perl.org/perlapi.html#SvIV).
simple fn iv() -> IV = sv_iv()
}
method! {
/// Return true if SV contains a unsigned integer.
///
/// Perl macro: ['SvUOK'](http://perldoc.perl.org/perlapi.html#SvUOK).
simple fn uv_ok() -> bool = sv_uok() != 0
}
method! {
/// Coerce the given SV to an unsigned integer and return it.
///
/// Perl macro: [`SvUV`](http://perldoc.perl.org/perlapi.html#SvUV).
simple fn uv() -> UV = sv_uv()
}
method! {
/// Return true if SV contains a floating point value.
///
/// Perl macro: ['SvNOK'](http://perldoc.perl.org/perlapi.html#SvNOK).
simple fn nv_ok() -> bool = sv_nok() != 0
}
method! {
/// Coerce the given SV to a floating point value and return it.
///
/// Perl macro: [`SvNV`](http://perldoc.perl.org/perlapi.html#SvNV).
simple fn nv() -> NV = sv_nv()
}
method! {
/// Return true if SV contains a string.
///
/// Perl macro: ['SvPOK'](http://perldoc.perl.org/perlapi.html#SvPOK).
simple fn pv_ok() -> bool = sv_pok() != 0
}
method! {
/// Return UTF8 flag on the SV.
///
/// You should use this after a call to `pv()` or `str()`, in case any call to string
/// overloading updates the internal flag.
///
/// Perl macro: [`SvUTF8`](http://perldoc.perl.org/perlapi.html#SvUTF8).
simple fn utf8() -> bool = sv_utf8() != 0
}
/// Return a copy of string in the SV as a vector of bytes.
///
/// Perl macro: [`SvPV`](http://perldoc.perl.org/perlapi.html#SvPV).
pub fn pv(&self) -> Vec<u8> {
unsafe {
let mut len = 0;
let ptr = self.pthx().sv_pv(self.as_ptr(), &mut len);
slice::from_raw_parts(ptr as *const u8, len as usize).to_owned()
}
}
/// Return a copy of string in the SV.
///
/// Perl macro: [`SvPV`](http://perldoc.perl.org/perlapi.html#SvPV).
pub fn str(&self) -> Result<String, string::FromUtf8Error> {
String::from_utf8(self.pv())
}
method! {
/// Return true if SV contains a Perl reference.
///
/// Perl macro: ['SvROK'](http://perldoc.perl.org/perlapi.html#SvROK).
simple fn rv_ok() -> bool = sv_rok() != 0
}
unsafe fn deref_raw(&self) -> *mut raw::SV {
self.pthx().sv_rv(self.as_ptr())
}
/// Dereference RV.
///
/// Return `None` if `self` is not a valid Perl reference.
pub fn deref(&self) -> Option<SV> {
if self.rv_ok() {
Some(unsafe { SV::from_raw_borrowed(self.pthx(), self.deref_raw()) })
} else {
None
}
}
/// Cast SV into AV.
pub fn as_av(self) -> Option<AV> {
if self.is_array() {
Some(unsafe { AV::from_raw_owned(self.pthx(), self.into_raw() as *mut _) })
} else {
None
}
}
/// Consume SV and convert into raw pointer.
///
/// Does not decrement reference count. Returned pointer must be correctly disposed of to avoid
/// memory leaks.
pub fn into_raw(self) -> *mut raw::SV {
let raw = self.0.as_ptr();
mem::forget(self);
raw
}
/// Construct new instance from a raw SV pointer without incrementing reference counter.
///
/// Owned SV pointers are returned by assorted
/// [`newSV`](http://perldoc.perl.org/perlapi.html#newSV) functions.
pub unsafe fn from_raw_owned(pthx: raw::Interpreter, raw: *mut raw::SV) -> SV {
SV(Owned::from_raw_owned(pthx, raw))
}
/// Construct new instance from a raw SV pointer and increment reference counter.
///
/// Borrowed SV pointers exist on stack and are returned by functions like
/// ['av_fetch`](http://perldoc.perl.org/perlapi.html#av_fetch) or
/// ['hv_fetch'](http://perldoc.perl.org/perlapi.html#av_fetch).
pub unsafe fn from_raw_borrowed(pthx: raw::Interpreter, raw: *mut raw::SV) -> SV {
SV(Owned::from_raw_borrowed(pthx, raw))
}
fn pthx(&self) -> raw::Interpreter { self.0.pthx() }
fn as_ptr(&self) -> *mut raw::SV { self.0.as_ptr() }
}
impl FromSV for IV {
unsafe fn from_sv(pthx: raw::Interpreter, raw: *mut raw::SV) -> IV {
pthx.sv_iv(raw)
}
}
impl FromSV for SV {
unsafe fn from_sv(pthx: raw::Interpreter, raw: *mut raw::SV) -> SV {
SV::from_raw_borrowed(pthx, raw)
}
}
impl IntoSV for IV {
fn into_sv(self, pthx: raw::Interpreter) -> SV {
unsafe { SV::from_raw_owned(pthx, pthx.new_sv_iv(self)) }
}
}
impl IntoSV for UV {
fn into_sv(self, pthx: raw::Interpreter) -> SV {
unsafe { SV::from_raw_owned(pthx, pthx.new_sv_uv(self)) }
}
}
impl IntoSV for NV {
fn into_sv(self, pthx: raw::Interpreter) -> SV {
unsafe { SV::from_raw_owned(pthx, pthx.new_sv_nv(self)) }
}
}
impl IntoSV for bool {
fn into_sv(self, pthx: raw::Interpreter) -> SV {
unsafe {
let raw = if self { pthx.sv_yes() } else { pthx.sv_no() };
SV::from_raw_owned(pthx, raw)
}
}
}
impl<'a> IntoSV for &'a str {
fn into_sv(self, pthx: raw::Interpreter) -> SV {
unsafe {
let svp = pthx.new_sv_pvn(self.as_ptr() as *const i8,
self.len() as raw::STRLEN,
raw::SVf_UTF8 as raw::U32);
SV::from_raw_owned(pthx, svp)
}
}
}
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* This module implements the Scrypt key derivation function as specified in [1].
*
* # References
* [1] - C. Percival. Stronger Key Derivation Via Sequential Memory-Hard Functions.
* http://www.tarsnap.com/scrypt/scrypt.pdf
*/
use std::iter::repeat;
use std::io;
use std::num::{Int, ToPrimitive};
use std::mem::size_of;
use std::slice::bytes::copy_memory;
use rand::{OsRng, Rng};
use serialize::base64;
use serialize::base64::{FromBase64, ToBase64};
use cryptoutil::{read_u32_le, read_u32v_le, write_u32_le};
use hmac::Hmac;
use pbkdf2::pbkdf2;
use sha2::Sha256;
use util::fixed_time_eq;
// The salsa20/8 core function.
fn salsa20_8(input: &[u8], output: &mut [u8]) {
let mut x = [0u32; 16];
read_u32v_le(&mut x, input);
let rounds = 8;
macro_rules! run_round (
($($set_idx:expr, $idx_a:expr, $idx_b:expr, $rot:expr);*) => { {
$( x[$set_idx] ^= (x[$idx_a] + x[$idx_b]).rotate_left($rot); )*
} }
);
for _ in (0..rounds / 2) {
run_round!(
0x4, 0x0, 0xc, 7;
0x8, 0x4, 0x0, 9;
0xc, 0x8, 0x4, 13;
0x0, 0xc, 0x8, 18;
0x9, 0x5, 0x1, 7;
0xd, 0x9, 0x5, 9;
0x1, 0xd, 0x9, 13;
0x5, 0x1, 0xd, 18;
0xe, 0xa, 0x6, 7;
0x2, 0xe, 0xa, 9;
0x6, 0x2, 0xe, 13;
0xa, 0x6, 0x2, 18;
0x3, 0xf, 0xb, 7;
0x7, 0x3, 0xf, 9;
0xb, 0x7, 0x3, 13;
0xf, 0xb, 0x7, 18;
0x1, 0x0, 0x3, 7;
0x2, 0x1, 0x0, 9;
0x3, 0x2, 0x1, 13;
0x0, 0x3, 0x2, 18;
0x6, 0x5, 0x4, 7;
0x7, 0x6, 0x5, 9;
0x4, 0x7, 0x6, 13;
0x5, 0x4, 0x7, 18;
0xb, 0xa, 0x9, 7;
0x8, 0xb, 0xa, 9;
0x9, 0x8, 0xb, 13;
0xa, 0x9, 0x8, 18;
0xc, 0xf, 0xe, 7;
0xd, 0xc, 0xf, 9;
0xe, 0xd, 0xc, 13;
0xf, 0xe, 0xd, 18
)
}
for i in (0..16) {
write_u32_le(
&mut output[i * 4..(i + 1) * 4],
x[i] + read_u32_le(&input[i * 4..(i + 1) * 4]));
}
}
fn xor(x: &[u8], y: &[u8], output: &mut [u8]) {
for ((out, &x_i), &y_i) in output.iter_mut().zip(x.iter()).zip(y.iter()) {
*out = x_i ^ y_i;
}
}
// Execute the BlockMix operation
// input - the input vector. The length must be a multiple of 128.
// output - the output vector. Must be the same length as input.
fn scrypt_block_mix(input: &[u8], output: &mut [u8]) {
let mut x = [0u8; 64];
copy_memory(&mut x, &input[input.len() - 64..]);
let mut t = [0u8; 64];
for (i, chunk) in input.chunks(64).enumerate() {
xor(&x, chunk, &mut t);
salsa20_8(&t, &mut x);
let pos = if i % 2 == 0 { (i / 2) * 64 } else { (i / 2) * 64 + input.len() / 2 };
copy_memory(&mut output[pos..pos + 64], &x);
}
}
// Execute the ROMix operation in-place.
// b - the data to operate on
// v - a temporary variable to store the vector V
// t - a temporary variable to store the result of the xor
// n - the scrypt parameter N
fn scrypt_ro_mix(b: &mut [u8], v: &mut [u8], t: &mut [u8], n: usize) {
fn integerify(x: &[u8], n: usize) -> usize {
// n is a power of 2, so n - 1 gives us a bitmask that we can use to perform a calculation
// mod n using a simple bitwise and.
let mask = n - 1;
// This cast is safe since we're going to get the value mod n (which is a power of 2), so we
// don't have to care about truncating any of the high bits off
let result = (read_u32_le(&x[x.len() - 64..x.len() - 60]) as usize) & mask;
result
}
let len = b.len();
for chunk in v.chunks_mut(len) {
copy_memory(chunk, b);
scrypt_block_mix(chunk, b);
}
for _ in (0..n) {
let j = integerify(b, n);
xor(b, &v[j * len..(j + 1) * len], t);
scrypt_block_mix(t, b);
}
}
/**
* The Scrypt parameter values.
*/
#[derive(Clone, Copy)]
pub struct ScryptParams {
log_n: u8,
r: u32,
p: u32
}
impl ScryptParams {
/**
* Create a new instance of ScryptParams.
*
* # Arguments
*
* * log_n - The log2 of the Scrypt parameter N
* * r - The Scrypt parameter r
* * p - The Scrypt parameter p
*
*/
pub fn new(log_n: u8, r: u32, p: u32) -> ScryptParams {
assert!(r > 0);
assert!(p > 0);
assert!(log_n > 0);
assert!((log_n as usize) < size_of::<usize>() * 8);
let r = r.to_usize().unwrap();
let p = p.to_usize().unwrap();
let n: usize = 1 << log_n;
// check that r * 128 doesn't overflow
let r128 = match r.checked_mul(128) {
Some(x) => x,
None => panic!("Invalid Scrypt parameters.")
};
// check that n * r * 128 doesn't overflow
match r128.checked_mul(n) {
Some(_) => { },
None => panic!("Invalid Scrypt parameters.")
};
// check that p * r * 128 doesn't overflow
match r128.checked_mul(p) {
Some(_) => { },
None => panic!("Invalid Scrypt parameters.")
};
// This check required by Scrypt:
// check: n < 2^(128 * r / 8)
// r * 16 won't overflow since r128 didn't
assert!((log_n as usize) < r * 16);
// This check required by Scrypt:
// check: p <= ((2^32-1) * 32) / (128 * r)
// It takes a bit of re-arranging to get the check above into this form, but, it is indeed
// the same.
assert!(r * p < 0x40000000);
ScryptParams {
log_n: log_n,
r: r as u32,
p: p as u32
}
}
}
/**
* The scrypt key derivation function.
*
* # Arguments
*
* * password - The password to process as a byte vector
* * salt - The salt value to use as a byte vector
* * params - The ScryptParams to use
* * output - The resulting derived key is returned in this byte vector.
*
*/
pub fn scrypt(password: &[u8], salt: &[u8], params: &ScryptParams, output: &mut [u8]) {
// This check required by Scrypt:
// check output.len() > 0 && output.len() <= (2^32 - 1) * 32
assert!(output.len() > 0);
assert!(output.len() / 32 <= 0xffffffff);
// The checks in the ScryptParams constructor guarantee that the following is safe:
let n = 1 << params.log_n;
let r128 = (params.r as usize) * 128;
let pr128 = (params.p as usize) * r128;
let nr128 = n * r128;
let mut mac = Hmac::new(Sha256::new(), password);
let mut b: Vec<u8> = repeat(0).take(pr128).collect();
pbkdf2(&mut mac, salt, 1, b.as_mut_slice());
let mut v: Vec<u8> = repeat(0).take(nr128).collect();
let mut t: Vec<u8> = repeat(0).take(r128).collect();
for chunk in b.as_mut_slice().chunks_mut(r128) {
scrypt_ro_mix(chunk, v.as_mut_slice(), t.as_mut_slice(), n);
}
pbkdf2(&mut mac, &*b, 1, output.as_mut_slice());
}
/**
* scrypt_simple is a helper function that should be sufficient for the majority of cases where
* an application needs to use Scrypt to hash a password for storage. The result is a String that
* contains the parameters used as part of its encoding. The scrypt_check function may be used on
* a password to check if it is equal to a hashed value.
*
* # Format
*
* The format of the output is a modified version of the Modular Crypt Format that encodes algorithm
* used and the parameter values. If all parameter values can each fit within a single byte, a
* compact format is used (format 0). However, if any value cannot, an expanded format where the r
* and p parameters are encoded using 4 bytes (format 1) is used. Both formats use a 128-bit salt
* and a 256-bit hash. The format is indicated as "rscrypt" which is short for "Rust Scrypt format."
*
* $rscrypt$<format>$<base64(log_n,r,p)>$<base64(salt)>$<based64(hash)>$
*
* # Arguments
*
* * password - The password to process as a str
* * params - The ScryptParams to use
*
*/
pub fn scrypt_simple(password: &str, params: &ScryptParams) -> io::Result<String> {
let mut rng = try!(OsRng::new());
// 128-bit salt
let salt: Vec<u8> = rng.gen_iter::<u8>().take(16).collect();
// 256-bit derived key
let mut dk = [0u8; 32];
scrypt(password.as_bytes(), &*salt, params, &mut dk);
let mut result = "$rscrypt$".to_string();
if params.r < 256 && params.p < 256 {
result.push_str("0$");
let mut tmp = [0u8; 3];
tmp[0] = params.log_n;
tmp[1] = params.r as u8;
tmp[2] = params.p as u8;
result.push_str(&*tmp.to_base64(base64::STANDARD));
} else {
result.push_str("1$");
let mut tmp = [0u8; 9];
tmp[0] = params.log_n;
write_u32_le(&mut tmp[1..5], params.r);
write_u32_le(&mut tmp[5..9], params.p);
result.push_str(&*tmp.to_base64(base64::STANDARD));
}
result.push('$');
result.push_str(&*salt.to_base64(base64::STANDARD));
result.push('$');
result.push_str(&*dk.to_base64(base64::STANDARD));
result.push('$');
Ok(result)
}
/**
* scrypt_check compares a password against the result of a previous call to scrypt_simple and
* returns true if the passed in password hashes to the same value.
*
* # Arguments
*
* * password - The password to process as a str
* * hashed_value - A string representing a hashed password returned by scrypt_simple()
*
*/
pub fn scrypt_check(password: &str, hashed_value: &str) -> Result<bool, &'static str> {
static ERR_STR: &'static str = "Hash is not in Rust Scrypt format.";
let mut iter = hashed_value.split('$');
// Check that there are no characters before the first "$"
match iter.next() {
Some(x) => if x != "" { return Err(ERR_STR); },
None => return Err(ERR_STR)
}
// Check the name
match iter.next() {
Some(t) => if t != "rscrypt" { return Err(ERR_STR); },
None => return Err(ERR_STR)
}
// Parse format - currenlty only version 0 (compact) and 1 (expanded) are supported
let params: ScryptParams;
match iter.next() {
Some(fstr) => {
// Parse the parameters - the size of them depends on the if we are using the compact or
// expanded format
let pvec = match iter.next() {
Some(pstr) => match pstr.from_base64() {
Ok(x) => x,
Err(_) => return Err(ERR_STR)
},
None => return Err(ERR_STR)
};
match fstr {
"0" => {
if pvec.len() != 3 { return Err(ERR_STR); }
let log_n = pvec[0] as u8;
let r = pvec[1] as u32;
let p = pvec[2] as u32;
params = ScryptParams::new(log_n, r, p);
}
"1" => {
if pvec.len() != 9 { return Err(ERR_STR); }
let log_n = pvec[0];
let mut pval = [0u32; 2];
read_u32v_le(&mut pval, &pvec[1..9]);
params = ScryptParams::new(log_n, pval[0], pval[1]);
}
_ => return Err(ERR_STR)
}
}
None => return Err(ERR_STR)
}
// Salt
let salt = match iter.next() {
Some(sstr) => match sstr.from_base64() {
Ok(salt) => salt,
Err(_) => return Err(ERR_STR)
},
None => return Err(ERR_STR)
};
// Hashed value
let hash = match iter.next() {
Some(hstr) => match hstr.from_base64() {
Ok(hash) => hash,
Err(_) => return Err(ERR_STR)
},
None => return Err(ERR_STR)
};
// Make sure that the input ends with a "$"
match iter.next() {
Some(x) => if x != "" { return Err(ERR_STR); },
None => return Err(ERR_STR)
}
// Make sure there is no trailing data after the final "$"
match iter.next() {
Some(_) => return Err(ERR_STR),
None => { }
}
let mut output: Vec<u8> = repeat(0).take(hash.len()).collect();
scrypt(password.as_bytes(), &*salt, ¶ms, output.as_mut_slice());
// Be careful here - its important that the comparison be done using a fixed time equality
// check. Otherwise an adversary that can measure how long this step takes can learn about the
// hashed value which would allow them to mount an offline brute force attack against the
// hashed password.
Ok(fixed_time_eq(&*output, &*hash))
}
#[cfg(test)]
mod test {
use std::iter::repeat;
use scrypt::{scrypt, scrypt_simple, scrypt_check, ScryptParams};
struct Test {
password: &'static str,
salt: &'static str,
log_n: u8,
r: u32,
p: u32,
expected: Vec<u8>
}
// Test vectors from [1]. The last test vector is omitted because it takes too long to run.
fn tests() -> Vec<Test> {
vec![
Test {
password: "",
salt: "",
log_n: 4,
r: 1,
p: 1,
expected: vec![
0x77, 0xd6, 0x57, 0x62, 0x38, 0x65, 0x7b, 0x20,
0x3b, 0x19, 0xca, 0x42, 0xc1, 0x8a, 0x04, 0x97,
0xf1, 0x6b, 0x48, 0x44, 0xe3, 0x07, 0x4a, 0xe8,
0xdf, 0xdf, 0xfa, 0x3f, 0xed, 0xe2, 0x14, 0x42,
0xfc, 0xd0, 0x06, 0x9d, 0xed, 0x09, 0x48, 0xf8,
0x32, 0x6a, 0x75, 0x3a, 0x0f, 0xc8, 0x1f, 0x17,
0xe8, 0xd3, 0xe0, 0xfb, 0x2e, 0x0d, 0x36, 0x28,
0xcf, 0x35, 0xe2, 0x0c, 0x38, 0xd1, 0x89, 0x06 ]
},
Test {
password: "password",
salt: "NaCl",
log_n: 10,
r: 8,
p: 16,
expected: vec![
0xfd, 0xba, 0xbe, 0x1c, 0x9d, 0x34, 0x72, 0x00,
0x78, 0x56, 0xe7, 0x19, 0x0d, 0x01, 0xe9, 0xfe,
0x7c, 0x6a, 0xd7, 0xcb, 0xc8, 0x23, 0x78, 0x30,
0xe7, 0x73, 0x76, 0x63, 0x4b, 0x37, 0x31, 0x62,
0x2e, 0xaf, 0x30, 0xd9, 0x2e, 0x22, 0xa3, 0x88,
0x6f, 0xf1, 0x09, 0x27, 0x9d, 0x98, 0x30, 0xda,
0xc7, 0x27, 0xaf, 0xb9, 0x4a, 0x83, 0xee, 0x6d,
0x83, 0x60, 0xcb, 0xdf, 0xa2, 0xcc, 0x06, 0x40 ]
},
Test {
password: "pleaseletmein",
salt: "SodiumChloride",
log_n: 14,
r: 8,
p: 1,
expected: vec![
0x70, 0x23, 0xbd, 0xcb, 0x3a, 0xfd, 0x73, 0x48,
0x46, 0x1c, 0x06, 0xcd, 0x81, 0xfd, 0x38, 0xeb,
0xfd, 0xa8, 0xfb, 0xba, 0x90, 0x4f, 0x8e, 0x3e,
0xa9, 0xb5, 0x43, 0xf6, 0x54, 0x5d, 0xa1, 0xf2,
0xd5, 0x43, 0x29, 0x55, 0x61, 0x3f, 0x0f, 0xcf,
0x62, 0xd4, 0x97, 0x05, 0x24, 0x2a, 0x9a, 0xf9,
0xe6, 0x1e, 0x85, 0xdc, 0x0d, 0x65, 0x1e, 0x40,
0xdf, 0xcf, 0x01, 0x7b, 0x45, 0x57, 0x58, 0x87 ]
},
]
}
#[test]
fn test_scrypt() {
let tests = tests();
for t in tests.iter() {
let mut result: Vec<u8> = repeat(0).take(t.expected.len()).collect();
let params = ScryptParams::new(t.log_n, t.r, t.p);
scrypt(t.password.as_bytes(), t.salt.as_bytes(), ¶ms, result.as_mut_slice());
assert!(result == t.expected);
}
}
fn test_scrypt_simple(log_n: u8, r: u32, p: u32) {
let password = "password";
let params = ScryptParams::new(log_n, r, p);
let out1 = scrypt_simple(password, ¶ms).unwrap();
let out2 = scrypt_simple(password, ¶ms).unwrap();
// This just makes sure that a salt is being applied. It doesn't verify that that salt is
// cryptographically strong, however.
assert!(out1 != out2);
match scrypt_check(password, &out1[..]) {
Ok(r) => assert!(r),
Err(_) => panic!()
}
match scrypt_check(password, &out2[..]) {
Ok(r) => assert!(r),
Err(_) => panic!()
}
match scrypt_check("wrong", &out1[..]) {
Ok(r) => assert!(!r),
Err(_) => panic!()
}
match scrypt_check("wrong", &out2[..]) {
Ok(r) => assert!(!r),
Err(_) => panic!()
}
}
#[test]
fn test_scrypt_simple_compact() {
// These parameters are intentionally very weak - the goal is to make the test run quickly!
test_scrypt_simple(7, 8, 1);
}
#[test]
fn test_scrypt_simple_expanded() {
// These parameters are intentionally very weak - the goal is to make the test run quickly!
test_scrypt_simple(3, 1, 256);
}
}
Update scrypt to use wrapping arithmetic functions
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* This module implements the Scrypt key derivation function as specified in [1].
*
* # References
* [1] - C. Percival. Stronger Key Derivation Via Sequential Memory-Hard Functions.
* http://www.tarsnap.com/scrypt/scrypt.pdf
*/
use std::iter::repeat;
use std::io;
use std::num::{Int, ToPrimitive};
use std::mem::size_of;
use std::slice::bytes::copy_memory;
use rand::{OsRng, Rng};
use serialize::base64;
use serialize::base64::{FromBase64, ToBase64};
use cryptoutil::{read_u32_le, read_u32v_le, write_u32_le};
use hmac::Hmac;
use pbkdf2::pbkdf2;
use sha2::Sha256;
use util::fixed_time_eq;
// The salsa20/8 core function.
fn salsa20_8(input: &[u8], output: &mut [u8]) {
let mut x = [0u32; 16];
read_u32v_le(&mut x, input);
let rounds = 8;
macro_rules! run_round (
($($set_idx:expr, $idx_a:expr, $idx_b:expr, $rot:expr);*) => { {
$( x[$set_idx] ^= x[$idx_a].wrapping_add(x[$idx_b]).rotate_left($rot); )*
} }
);
for _ in (0..rounds / 2) {
run_round!(
0x4, 0x0, 0xc, 7;
0x8, 0x4, 0x0, 9;
0xc, 0x8, 0x4, 13;
0x0, 0xc, 0x8, 18;
0x9, 0x5, 0x1, 7;
0xd, 0x9, 0x5, 9;
0x1, 0xd, 0x9, 13;
0x5, 0x1, 0xd, 18;
0xe, 0xa, 0x6, 7;
0x2, 0xe, 0xa, 9;
0x6, 0x2, 0xe, 13;
0xa, 0x6, 0x2, 18;
0x3, 0xf, 0xb, 7;
0x7, 0x3, 0xf, 9;
0xb, 0x7, 0x3, 13;
0xf, 0xb, 0x7, 18;
0x1, 0x0, 0x3, 7;
0x2, 0x1, 0x0, 9;
0x3, 0x2, 0x1, 13;
0x0, 0x3, 0x2, 18;
0x6, 0x5, 0x4, 7;
0x7, 0x6, 0x5, 9;
0x4, 0x7, 0x6, 13;
0x5, 0x4, 0x7, 18;
0xb, 0xa, 0x9, 7;
0x8, 0xb, 0xa, 9;
0x9, 0x8, 0xb, 13;
0xa, 0x9, 0x8, 18;
0xc, 0xf, 0xe, 7;
0xd, 0xc, 0xf, 9;
0xe, 0xd, 0xc, 13;
0xf, 0xe, 0xd, 18
)
}
for i in (0..16) {
write_u32_le(
&mut output[i * 4..(i + 1) * 4],
x[i].wrapping_add(read_u32_le(&input[i * 4..(i + 1) * 4])));
}
}
fn xor(x: &[u8], y: &[u8], output: &mut [u8]) {
for ((out, &x_i), &y_i) in output.iter_mut().zip(x.iter()).zip(y.iter()) {
*out = x_i ^ y_i;
}
}
// Execute the BlockMix operation
// input - the input vector. The length must be a multiple of 128.
// output - the output vector. Must be the same length as input.
fn scrypt_block_mix(input: &[u8], output: &mut [u8]) {
let mut x = [0u8; 64];
copy_memory(&mut x, &input[input.len() - 64..]);
let mut t = [0u8; 64];
for (i, chunk) in input.chunks(64).enumerate() {
xor(&x, chunk, &mut t);
salsa20_8(&t, &mut x);
let pos = if i % 2 == 0 { (i / 2) * 64 } else { (i / 2) * 64 + input.len() / 2 };
copy_memory(&mut output[pos..pos + 64], &x);
}
}
// Execute the ROMix operation in-place.
// b - the data to operate on
// v - a temporary variable to store the vector V
// t - a temporary variable to store the result of the xor
// n - the scrypt parameter N
fn scrypt_ro_mix(b: &mut [u8], v: &mut [u8], t: &mut [u8], n: usize) {
fn integerify(x: &[u8], n: usize) -> usize {
// n is a power of 2, so n - 1 gives us a bitmask that we can use to perform a calculation
// mod n using a simple bitwise and.
let mask = n - 1;
// This cast is safe since we're going to get the value mod n (which is a power of 2), so we
// don't have to care about truncating any of the high bits off
let result = (read_u32_le(&x[x.len() - 64..x.len() - 60]) as usize) & mask;
result
}
let len = b.len();
for chunk in v.chunks_mut(len) {
copy_memory(chunk, b);
scrypt_block_mix(chunk, b);
}
for _ in (0..n) {
let j = integerify(b, n);
xor(b, &v[j * len..(j + 1) * len], t);
scrypt_block_mix(t, b);
}
}
/**
* The Scrypt parameter values.
*/
#[derive(Clone, Copy)]
pub struct ScryptParams {
log_n: u8,
r: u32,
p: u32
}
impl ScryptParams {
/**
* Create a new instance of ScryptParams.
*
* # Arguments
*
* * log_n - The log2 of the Scrypt parameter N
* * r - The Scrypt parameter r
* * p - The Scrypt parameter p
*
*/
pub fn new(log_n: u8, r: u32, p: u32) -> ScryptParams {
assert!(r > 0);
assert!(p > 0);
assert!(log_n > 0);
assert!((log_n as usize) < size_of::<usize>() * 8);
let r = r.to_usize().unwrap();
let p = p.to_usize().unwrap();
let n: usize = 1 << log_n;
// check that r * 128 doesn't overflow
let r128 = match r.checked_mul(128) {
Some(x) => x,
None => panic!("Invalid Scrypt parameters.")
};
// check that n * r * 128 doesn't overflow
match r128.checked_mul(n) {
Some(_) => { },
None => panic!("Invalid Scrypt parameters.")
};
// check that p * r * 128 doesn't overflow
match r128.checked_mul(p) {
Some(_) => { },
None => panic!("Invalid Scrypt parameters.")
};
// This check required by Scrypt:
// check: n < 2^(128 * r / 8)
// r * 16 won't overflow since r128 didn't
assert!((log_n as usize) < r * 16);
// This check required by Scrypt:
// check: p <= ((2^32-1) * 32) / (128 * r)
// It takes a bit of re-arranging to get the check above into this form, but, it is indeed
// the same.
assert!(r * p < 0x40000000);
ScryptParams {
log_n: log_n,
r: r as u32,
p: p as u32
}
}
}
/**
* The scrypt key derivation function.
*
* # Arguments
*
* * password - The password to process as a byte vector
* * salt - The salt value to use as a byte vector
* * params - The ScryptParams to use
* * output - The resulting derived key is returned in this byte vector.
*
*/
pub fn scrypt(password: &[u8], salt: &[u8], params: &ScryptParams, output: &mut [u8]) {
// This check required by Scrypt:
// check output.len() > 0 && output.len() <= (2^32 - 1) * 32
assert!(output.len() > 0);
assert!(output.len() / 32 <= 0xffffffff);
// The checks in the ScryptParams constructor guarantee that the following is safe:
let n = 1 << params.log_n;
let r128 = (params.r as usize) * 128;
let pr128 = (params.p as usize) * r128;
let nr128 = n * r128;
let mut mac = Hmac::new(Sha256::new(), password);
let mut b: Vec<u8> = repeat(0).take(pr128).collect();
pbkdf2(&mut mac, salt, 1, b.as_mut_slice());
let mut v: Vec<u8> = repeat(0).take(nr128).collect();
let mut t: Vec<u8> = repeat(0).take(r128).collect();
for chunk in b.as_mut_slice().chunks_mut(r128) {
scrypt_ro_mix(chunk, v.as_mut_slice(), t.as_mut_slice(), n);
}
pbkdf2(&mut mac, &*b, 1, output.as_mut_slice());
}
/**
* scrypt_simple is a helper function that should be sufficient for the majority of cases where
* an application needs to use Scrypt to hash a password for storage. The result is a String that
* contains the parameters used as part of its encoding. The scrypt_check function may be used on
* a password to check if it is equal to a hashed value.
*
* # Format
*
* The format of the output is a modified version of the Modular Crypt Format that encodes algorithm
* used and the parameter values. If all parameter values can each fit within a single byte, a
* compact format is used (format 0). However, if any value cannot, an expanded format where the r
* and p parameters are encoded using 4 bytes (format 1) is used. Both formats use a 128-bit salt
* and a 256-bit hash. The format is indicated as "rscrypt" which is short for "Rust Scrypt format."
*
* $rscrypt$<format>$<base64(log_n,r,p)>$<base64(salt)>$<based64(hash)>$
*
* # Arguments
*
* * password - The password to process as a str
* * params - The ScryptParams to use
*
*/
pub fn scrypt_simple(password: &str, params: &ScryptParams) -> io::Result<String> {
let mut rng = try!(OsRng::new());
// 128-bit salt
let salt: Vec<u8> = rng.gen_iter::<u8>().take(16).collect();
// 256-bit derived key
let mut dk = [0u8; 32];
scrypt(password.as_bytes(), &*salt, params, &mut dk);
let mut result = "$rscrypt$".to_string();
if params.r < 256 && params.p < 256 {
result.push_str("0$");
let mut tmp = [0u8; 3];
tmp[0] = params.log_n;
tmp[1] = params.r as u8;
tmp[2] = params.p as u8;
result.push_str(&*tmp.to_base64(base64::STANDARD));
} else {
result.push_str("1$");
let mut tmp = [0u8; 9];
tmp[0] = params.log_n;
write_u32_le(&mut tmp[1..5], params.r);
write_u32_le(&mut tmp[5..9], params.p);
result.push_str(&*tmp.to_base64(base64::STANDARD));
}
result.push('$');
result.push_str(&*salt.to_base64(base64::STANDARD));
result.push('$');
result.push_str(&*dk.to_base64(base64::STANDARD));
result.push('$');
Ok(result)
}
/**
* scrypt_check compares a password against the result of a previous call to scrypt_simple and
* returns true if the passed in password hashes to the same value.
*
* # Arguments
*
* * password - The password to process as a str
* * hashed_value - A string representing a hashed password returned by scrypt_simple()
*
*/
pub fn scrypt_check(password: &str, hashed_value: &str) -> Result<bool, &'static str> {
static ERR_STR: &'static str = "Hash is not in Rust Scrypt format.";
let mut iter = hashed_value.split('$');
// Check that there are no characters before the first "$"
match iter.next() {
Some(x) => if x != "" { return Err(ERR_STR); },
None => return Err(ERR_STR)
}
// Check the name
match iter.next() {
Some(t) => if t != "rscrypt" { return Err(ERR_STR); },
None => return Err(ERR_STR)
}
// Parse format - currenlty only version 0 (compact) and 1 (expanded) are supported
let params: ScryptParams;
match iter.next() {
Some(fstr) => {
// Parse the parameters - the size of them depends on the if we are using the compact or
// expanded format
let pvec = match iter.next() {
Some(pstr) => match pstr.from_base64() {
Ok(x) => x,
Err(_) => return Err(ERR_STR)
},
None => return Err(ERR_STR)
};
match fstr {
"0" => {
if pvec.len() != 3 { return Err(ERR_STR); }
let log_n = pvec[0] as u8;
let r = pvec[1] as u32;
let p = pvec[2] as u32;
params = ScryptParams::new(log_n, r, p);
}
"1" => {
if pvec.len() != 9 { return Err(ERR_STR); }
let log_n = pvec[0];
let mut pval = [0u32; 2];
read_u32v_le(&mut pval, &pvec[1..9]);
params = ScryptParams::new(log_n, pval[0], pval[1]);
}
_ => return Err(ERR_STR)
}
}
None => return Err(ERR_STR)
}
// Salt
let salt = match iter.next() {
Some(sstr) => match sstr.from_base64() {
Ok(salt) => salt,
Err(_) => return Err(ERR_STR)
},
None => return Err(ERR_STR)
};
// Hashed value
let hash = match iter.next() {
Some(hstr) => match hstr.from_base64() {
Ok(hash) => hash,
Err(_) => return Err(ERR_STR)
},
None => return Err(ERR_STR)
};
// Make sure that the input ends with a "$"
match iter.next() {
Some(x) => if x != "" { return Err(ERR_STR); },
None => return Err(ERR_STR)
}
// Make sure there is no trailing data after the final "$"
match iter.next() {
Some(_) => return Err(ERR_STR),
None => { }
}
let mut output: Vec<u8> = repeat(0).take(hash.len()).collect();
scrypt(password.as_bytes(), &*salt, ¶ms, output.as_mut_slice());
// Be careful here - its important that the comparison be done using a fixed time equality
// check. Otherwise an adversary that can measure how long this step takes can learn about the
// hashed value which would allow them to mount an offline brute force attack against the
// hashed password.
Ok(fixed_time_eq(&*output, &*hash))
}
#[cfg(test)]
mod test {
use std::iter::repeat;
use scrypt::{scrypt, scrypt_simple, scrypt_check, ScryptParams};
struct Test {
password: &'static str,
salt: &'static str,
log_n: u8,
r: u32,
p: u32,
expected: Vec<u8>
}
// Test vectors from [1]. The last test vector is omitted because it takes too long to run.
fn tests() -> Vec<Test> {
vec![
Test {
password: "",
salt: "",
log_n: 4,
r: 1,
p: 1,
expected: vec![
0x77, 0xd6, 0x57, 0x62, 0x38, 0x65, 0x7b, 0x20,
0x3b, 0x19, 0xca, 0x42, 0xc1, 0x8a, 0x04, 0x97,
0xf1, 0x6b, 0x48, 0x44, 0xe3, 0x07, 0x4a, 0xe8,
0xdf, 0xdf, 0xfa, 0x3f, 0xed, 0xe2, 0x14, 0x42,
0xfc, 0xd0, 0x06, 0x9d, 0xed, 0x09, 0x48, 0xf8,
0x32, 0x6a, 0x75, 0x3a, 0x0f, 0xc8, 0x1f, 0x17,
0xe8, 0xd3, 0xe0, 0xfb, 0x2e, 0x0d, 0x36, 0x28,
0xcf, 0x35, 0xe2, 0x0c, 0x38, 0xd1, 0x89, 0x06 ]
},
Test {
password: "password",
salt: "NaCl",
log_n: 10,
r: 8,
p: 16,
expected: vec![
0xfd, 0xba, 0xbe, 0x1c, 0x9d, 0x34, 0x72, 0x00,
0x78, 0x56, 0xe7, 0x19, 0x0d, 0x01, 0xe9, 0xfe,
0x7c, 0x6a, 0xd7, 0xcb, 0xc8, 0x23, 0x78, 0x30,
0xe7, 0x73, 0x76, 0x63, 0x4b, 0x37, 0x31, 0x62,
0x2e, 0xaf, 0x30, 0xd9, 0x2e, 0x22, 0xa3, 0x88,
0x6f, 0xf1, 0x09, 0x27, 0x9d, 0x98, 0x30, 0xda,
0xc7, 0x27, 0xaf, 0xb9, 0x4a, 0x83, 0xee, 0x6d,
0x83, 0x60, 0xcb, 0xdf, 0xa2, 0xcc, 0x06, 0x40 ]
},
Test {
password: "pleaseletmein",
salt: "SodiumChloride",
log_n: 14,
r: 8,
p: 1,
expected: vec![
0x70, 0x23, 0xbd, 0xcb, 0x3a, 0xfd, 0x73, 0x48,
0x46, 0x1c, 0x06, 0xcd, 0x81, 0xfd, 0x38, 0xeb,
0xfd, 0xa8, 0xfb, 0xba, 0x90, 0x4f, 0x8e, 0x3e,
0xa9, 0xb5, 0x43, 0xf6, 0x54, 0x5d, 0xa1, 0xf2,
0xd5, 0x43, 0x29, 0x55, 0x61, 0x3f, 0x0f, 0xcf,
0x62, 0xd4, 0x97, 0x05, 0x24, 0x2a, 0x9a, 0xf9,
0xe6, 0x1e, 0x85, 0xdc, 0x0d, 0x65, 0x1e, 0x40,
0xdf, 0xcf, 0x01, 0x7b, 0x45, 0x57, 0x58, 0x87 ]
},
]
}
#[test]
fn test_scrypt() {
let tests = tests();
for t in tests.iter() {
let mut result: Vec<u8> = repeat(0).take(t.expected.len()).collect();
let params = ScryptParams::new(t.log_n, t.r, t.p);
scrypt(t.password.as_bytes(), t.salt.as_bytes(), ¶ms, result.as_mut_slice());
assert!(result == t.expected);
}
}
fn test_scrypt_simple(log_n: u8, r: u32, p: u32) {
let password = "password";
let params = ScryptParams::new(log_n, r, p);
let out1 = scrypt_simple(password, ¶ms).unwrap();
let out2 = scrypt_simple(password, ¶ms).unwrap();
// This just makes sure that a salt is being applied. It doesn't verify that that salt is
// cryptographically strong, however.
assert!(out1 != out2);
match scrypt_check(password, &out1[..]) {
Ok(r) => assert!(r),
Err(_) => panic!()
}
match scrypt_check(password, &out2[..]) {
Ok(r) => assert!(r),
Err(_) => panic!()
}
match scrypt_check("wrong", &out1[..]) {
Ok(r) => assert!(!r),
Err(_) => panic!()
}
match scrypt_check("wrong", &out2[..]) {
Ok(r) => assert!(!r),
Err(_) => panic!()
}
}
#[test]
fn test_scrypt_simple_compact() {
// These parameters are intentionally very weak - the goal is to make the test run quickly!
test_scrypt_simple(7, 8, 1);
}
#[test]
fn test_scrypt_simple_expanded() {
// These parameters are intentionally very weak - the goal is to make the test run quickly!
test_scrypt_simple(3, 1, 256);
}
}
|
use configuration::Configuration;
use std::slice::SliceExt;
use std::cmp::Ordering;
use score::Score;
struct Search {
config: Configuration,
current: uint,
query: String,
selection: Option<String>,
result: Vec<String>,
}
impl Search {
fn blank(config: Configuration) -> Search {
let query = config.initial_search.clone();
let previous_result = config.choices.clone();
Search::new(config, query, previous_result, 0)
}
fn new(config: Configuration, query: String, result: Vec<String>, index: uint) -> Search {
let selection = Search::select(&result, index);
Search { config: config,
current: index,
query: query,
selection: selection,
result: result }
}
fn new_for_index(self, index: uint) -> Search {
Search::new(self.config, self.query, self.result, index)
}
fn filter(query: &str, choices: &Vec<String>) -> Vec<String> {
let mut filtered = choices.iter().filter_map( |choice| {
let quality = Score::score(choice.as_slice(), query);
if quality > 0.0 {
Some((quality, choice.to_string()))
} else {
None
}
}).collect::<Vec<(f32, String)>>();
filtered.sort_by( |&(quality_a, _), &(quality_b, _)| {
quality_a.partial_cmp(&quality_b).unwrap_or(Ordering::Equal).reverse()
});
filtered.iter().map( |&(_, ref choice)| choice.to_string() ).collect::<Vec<String>>()
}
fn select(result: &Vec<String>, index: uint) -> Option<String> {
if result.len() > 0 {
Some(result[index].to_string())
} else {
None
}
}
fn down(self) -> Search {
let next_index = self.next_index();
self.new_for_index(next_index)
}
fn up(self) -> Search {
let next_index = self.prev_index();
self.new_for_index(next_index)
}
fn append_to_search(self, input: &str) -> Search {
let mut new_query = self.query;
new_query.push_str(input.as_slice());
let new_result = Search::filter(new_query.as_slice(), &self.result);
Search::new(self.config, new_query, new_result, self.current)
}
fn next_index(&self) -> uint {
let next_index = self.current + 1;
if next_index >= self.config.visible_limit { 0 } else { next_index }
}
fn prev_index(&self) -> uint {
if self.current == 0 { self.config.visible_limit - 1 } else { self.current - 1 }
}
}
#[cfg(test)]
fn one_two_three() -> Vec<String> {
vec!["one".to_string(),
"two".to_string(),
"three".to_string()]
}
#[test]
fn it_selects_the_first_choice_by_default() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.selection, Some("one".to_string()));
}
#[test]
fn it_selets_the_second_when_down_is_called() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.down().selection, Some("two".to_string()));
}
#[test]
fn it_loop_around_when_reaching_end_of_list() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.down().down().down().down().selection, Some("two".to_string()));
}
#[test]
fn it_loop_around_when_reaching_top_of_list() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.up().up().selection, Some("two".to_string()));
}
#[test]
fn it_loop_around_when_reaching_visible_limit() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, Some(2));
let search = Search::blank(config);
assert_eq!(search.down().down().down().selection, Some("two".to_string()));
}
#[test]
fn it_moves_down_the_filtered_search_results() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.append_to_search("t").down().selection, Some("three".to_string()));
}
#[test]
fn it_moves_down_the_filtered_search_results_twice() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.append_to_search("t").append_to_search("w").selection, Some("two".to_string()));
}
#[test]
fn it_handles_not_matching_anything() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.append_to_search("x").selection, None);
}
#[test]
fn up_match_nothing_after_filtering_all_out() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config).append_to_search("x");
assert_eq!(search.up().selection, None);
}
#[test]
fn down_match_nothing_after_filtering_all_out() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config).append_to_search("x");
assert_eq!(search.down().selection, None);
}
Implements backspace
use configuration::Configuration;
use std::slice::SliceExt;
use std::cmp::Ordering;
use score::Score;
struct Search {
config: Configuration,
current: uint,
query: String,
selection: Option<String>,
result: Vec<String>,
}
impl Search {
fn blank(config: Configuration) -> Search {
let query = config.initial_search.clone();
let previous_result = config.choices.clone();
Search::new(config, query, previous_result, 0)
}
fn new(config: Configuration, query: String, result: Vec<String>, index: uint) -> Search {
let selection = Search::select(&result, index);
Search { config: config,
current: index,
query: query,
selection: selection,
result: result }
}
fn new_for_index(self, index: uint) -> Search {
Search::new(self.config, self.query, self.result, index)
}
fn new_for_query(self, new_query: String) -> Search {
let new_result = Search::filter(new_query.as_slice(), &self.config.choices);
Search::new(self.config, new_query, new_result, self.current)
}
fn filter(query: &str, choices: &Vec<String>) -> Vec<String> {
let mut filtered = choices.iter().filter_map( |choice| {
let quality = Score::score(choice.as_slice(), query);
if quality > 0.0 {
Some((quality, choice.to_string()))
} else {
None
}
}).collect::<Vec<(f32, String)>>();
filtered.sort_by( |&(quality_a, _), &(quality_b, _)| {
quality_a.partial_cmp(&quality_b).unwrap_or(Ordering::Equal).reverse()
});
filtered.iter().map( |&(_, ref choice)| choice.to_string() ).collect::<Vec<String>>()
}
fn select(result: &Vec<String>, index: uint) -> Option<String> {
if result.len() > 0 {
Some(result[index].to_string())
} else {
None
}
}
fn down(self) -> Search {
let next_index = self.next_index();
self.new_for_index(next_index)
}
fn up(self) -> Search {
let next_index = self.prev_index();
self.new_for_index(next_index)
}
fn append_to_search(self, input: &str) -> Search {
let mut new_query = self.query.clone();
new_query.push_str(input.as_slice());
self.new_for_query(new_query)
}
fn backspace(self) -> Search {
let mut new_query = self.query.clone();
new_query.pop();
self.new_for_query(new_query).new_for_index(0)
}
fn next_index(&self) -> uint {
let next_index = self.current + 1;
if next_index >= self.config.visible_limit { 0 } else { next_index }
}
fn prev_index(&self) -> uint {
if self.current == 0 { self.config.visible_limit - 1 } else { self.current - 1 }
}
}
#[cfg(test)]
fn one_two_three() -> Vec<String> {
vec!["one".to_string(),
"two".to_string(),
"three".to_string()]
}
#[test]
fn it_selects_the_first_choice_by_default() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.selection, Some("one".to_string()));
}
#[test]
fn it_selets_the_second_when_down_is_called() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.down().selection, Some("two".to_string()));
}
#[test]
fn it_loop_around_when_reaching_end_of_list() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.down().down().down().down().selection, Some("two".to_string()));
}
#[test]
fn it_loop_around_when_reaching_top_of_list() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.up().up().selection, Some("two".to_string()));
}
#[test]
fn it_loop_around_when_reaching_visible_limit() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, Some(2));
let search = Search::blank(config);
assert_eq!(search.down().down().down().selection, Some("two".to_string()));
}
#[test]
fn it_moves_down_the_filtered_search_results() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.append_to_search("t").down().selection, Some("three".to_string()));
}
#[test]
fn it_moves_down_the_filtered_search_results_twice() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.append_to_search("t").append_to_search("w").selection, Some("two".to_string()));
}
#[test]
fn it_handles_not_matching_anything() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config);
assert_eq!(search.append_to_search("x").selection, None);
}
#[test]
fn up_match_nothing_after_filtering_all_out() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config).append_to_search("x");
assert_eq!(search.up().selection, None);
}
#[test]
fn down_match_nothing_after_filtering_all_out() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config).append_to_search("x");
assert_eq!(search.down().selection, None);
}
#[test]
fn backspaces_over_characters() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config).append_to_search("e");
assert_eq!(search.query, "e");
assert_eq!(search.backspace().query, "");
}
#[test]
fn resets_the_index() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config).append_to_search("e");
assert_eq!(search.down().backspace().current, 0);
}
#[test]
fn previous_results_appear_after_backspace() {
let input = one_two_three();
let config = Configuration::from_inputs(input, None, None);
let search = Search::blank(config).append_to_search("t");
assert_eq!(search.backspace().result.len(), 3);
}
|
// Copyright (c) 2014-2016 Sandstorm Development Group, Inc.
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use gj::{Promise, EventLoop};
use capnp::Error;
use capnp_rpc::{RpcSystem, twoparty, rpc_twoparty_capnp};
use rustc_serialize::{base64, hex, json};
use std::collections::hash_map::HashMap;
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use collections_capnp::ui_view_metadata;
use sandstorm::powerbox_capnp::powerbox_descriptor;
use sandstorm::identity_capnp::{user_info};
use sandstorm::grain_capnp::{session_context, ui_view, ui_session, sandstorm_api};
use sandstorm::grain_capnp::{static_asset};
use sandstorm::web_session_capnp::{web_session};
use sandstorm::web_session_capnp::web_session::web_socket_stream;
const ADD_GRAIN_ACTIVITY_INDEX: u16 = 0;
const REMOVE_GRAIN_ACTIVITY_INDEX: u16 = 1;
pub struct WebSocketStream {
id: u64,
awaiting_pong: Rc<Cell<bool>>,
_ping_pong_promise: Promise<(), Error>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
}
impl Drop for WebSocketStream {
fn drop(&mut self) {
self.saved_ui_views.borrow_mut().subscribers.remove(&self.id);
}
}
fn do_ping_pong(client_stream: web_socket_stream::Client,
timer: ::gjio::Timer,
awaiting_pong: Rc<Cell<bool>>) -> Promise<(), Error>
{
let mut req = client_stream.send_bytes_request();
req.get().set_message(&[0x89, 0]); // PING
let promise = req.send().promise;
awaiting_pong.set(true);
promise.then(move|_| {
timer.after_delay(::std::time::Duration::new(10, 0)).lift().then(move |_| {
if awaiting_pong.get() {
Promise::err(Error::failed("pong not received within 10 seconds".into()))
} else {
do_ping_pong(client_stream, timer, awaiting_pong)
}
})
})
}
impl WebSocketStream {
fn new(id: u64,
client_stream: web_socket_stream::Client,
timer: ::gjio::Timer,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>)
-> WebSocketStream
{
let awaiting = Rc::new(Cell::new(false));
let ping_pong_promise = do_ping_pong(client_stream,
timer,
awaiting.clone()).map_else(|r| match r {
Ok(_) => Ok(()),
Err(e) => {println!("ERROR {}", e); Ok(()) }
}).eagerly_evaluate();
WebSocketStream {
id: id,
awaiting_pong: awaiting,
_ping_pong_promise: ping_pong_promise,
saved_ui_views: saved_ui_views,
}
}
}
impl web_socket_stream::Server for WebSocketStream {
fn send_bytes(&mut self,
params: web_socket_stream::SendBytesParams,
_results: web_socket_stream::SendBytesResults)
-> Promise<(), Error>
{
let message = pry!(pry!(params.get()).get_message());
let opcode = message[0] & 0xf; // or is it 0xf0?
let _masked = (message[1] & 0x80) != 0;
let _length = message[1] & 0x7f;
match opcode {
0x0 => { // CONTINUE
}
0x1 => { // UTF-8 PAYLOAD
}
0x2 => { // BINARY PAYLOAD
}
0x8 => { // TERMINATE
// TODO: drop things to get them to close.
}
0x9 => { // PING
//TODO
println!("the client sent us a ping!");
}
0xa => { // PONG
self.awaiting_pong.set(false);
}
_ => { // OTHER
println!("unrecognized websocket opcode {}", opcode);
}
}
Promise::ok(())
}
}
fn encode_websocket_message(mut params: web_socket_stream::send_bytes_params::Builder,
message: &str)
{
// TODO(perf) avoid this allocation
let mut bytes: Vec<u8> = Vec::new();
bytes.push(0x81);
if message.len() < 126 {
bytes.push(message.len() as u8);
} else if message.len() < 1 << 16 {
// 16 bits
bytes.push(0x7e);
bytes.push((message.len() >> 8) as u8);
bytes.push(message.len() as u8);
} else {
// 64 bits
bytes.push(0x7f);
bytes.push((message.len() >> 56) as u8);
bytes.push((message.len() >> 48) as u8);
bytes.push((message.len() >> 40) as u8);
bytes.push((message.len() >> 32) as u8);
bytes.push((message.len() >> 16) as u8);
bytes.push((message.len() >> 24) as u8);
bytes.push((message.len() >> 8) as u8);
bytes.push(message.len() as u8);
}
bytes.extend_from_slice(message.as_bytes());
params.set_message(&bytes[..]);
}
#[derive(Clone)]
struct SavedUiViewData {
title: String,
date_added: u64,
added_by: String,
}
impl SavedUiViewData {
fn to_json(&self) -> String {
format!("{{\"title\":{},\"dateAdded\": \"{}\",\"addedBy\":\"{}\"}}",
json::ToJson::to_json(&self.title),
self.date_added,
self.added_by)
}
}
#[derive(Clone)]
struct ViewInfoData {
app_title: String,
grain_icon_url: String,
}
impl ViewInfoData {
fn to_json(&self) -> String {
format!("{{\"appTitle\":{},\"grainIconUrl\":\"{}\"}}",
json::ToJson::to_json(&self.app_title),
self.grain_icon_url)
}
}
#[derive(Clone)]
enum Action {
Insert { token: String, data: SavedUiViewData },
Remove { token: String },
ViewInfo { token: String, data: ViewInfoData },
CanWrite(bool),
Description(String),
}
impl Action {
fn to_json(&self) -> String {
match self {
&Action::Insert { ref token, ref data } => {
format!("{{\"insert\":{{\"token\":\"{}\",\"data\":{} }} }}",
token, data.to_json())
}
&Action::Remove { ref token } => {
format!("{{\"remove\":{{\"token\":\"{}\"}}}}", token)
}
&Action::ViewInfo { ref token, ref data } => {
format!("{{\"viewInfo\":{{\"token\":\"{}\",\"data\":{} }} }}",
token, data.to_json())
}
&Action::CanWrite(b) => {
format!("{{\"canWrite\":{}}}", b)
}
&Action::Description(ref s) => {
format!("{{\"description\":{}}}", json::ToJson::to_json(s))
}
}
}
}
struct Reaper;
impl ::gj::TaskReaper<(), Error> for Reaper {
fn task_failed(&mut self, error: Error) {
// TODO better message.
println!("task failed: {}", error);
}
}
pub struct SavedUiViewSet {
base_path: ::std::path::PathBuf,
views: HashMap<String, SavedUiViewData>,
view_infos: HashMap<String, ViewInfoData>,
next_id: u64,
subscribers: HashMap<u64, web_socket_stream::Client>,
tasks: ::gj::TaskSet<(), Error>,
description: String,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
}
impl SavedUiViewSet {
pub fn new<P>(token_directory: P,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>)
-> ::capnp::Result<Rc<RefCell<SavedUiViewSet>>>
where P: AsRef<::std::path::Path>
{
let description = match ::std::fs::File::open("/var/description") {
Ok(mut f) => {
use std::io::Read;
let mut result = String::new();
try!(f.read_to_string(&mut result));
result
}
Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => {
use std::io::Write;
let mut f = try!(::std::fs::File::create("/var/description"));
let result = "";
try!(f.write_all(result.as_bytes()));
result.into()
}
Err(e) => {
return Err(e.into());
}
};
let result = Rc::new(RefCell::new(SavedUiViewSet {
base_path: token_directory.as_ref().to_path_buf(),
views: HashMap::new(),
view_infos: HashMap::new(),
next_id: 0,
subscribers: HashMap::new(),
tasks: ::gj::TaskSet::new(Box::new(Reaper)),
description: description,
sandstorm_api: sandstorm_api,
}));
// create token directory if it does not yet exist
try!(::std::fs::create_dir_all(&token_directory));
for token_file in try!(::std::fs::read_dir(&token_directory)) {
let dir_entry = try!(token_file);
let token: String = match dir_entry.file_name().to_str() {
None => {
println!("malformed token: {:?}", dir_entry.file_name());
continue
}
Some(s) => s.into(),
};
if token.ends_with(".uploading") {
try!(::std::fs::remove_file(dir_entry.path()));
} else {
let mut reader = try!(::std::fs::File::open(dir_entry.path()));
let message = try!(::capnp::serialize::read_message(&mut reader,
Default::default()));
let metadata: ui_view_metadata::Reader = try!(message.get_root());
let entry = SavedUiViewData {
title: try!(metadata.get_title()).into(),
date_added: metadata.get_date_added(),
added_by: try!(metadata.get_added_by()).into(),
};
result.borrow_mut().views.insert(token.clone(), entry);
try!(SavedUiViewSet::retrieve_view_info(&result, token));
}
}
Ok(result)
}
fn retrieve_view_info(set_ref: &Rc<RefCell<SavedUiViewSet>>,
token: String) -> ::capnp::Result<()> {
// SandstormApi.restore, then call getViewInfo,
// then call get_url() on the grain static asset.
let set = set_ref.clone();
let binary_token = match base64::FromBase64::from_base64(&token[..]) {
Ok(b) => b,
Err(e) => return Err(Error::failed(format!("{}", e))),
};
let mut req = set.borrow().sandstorm_api.restore_request();
req.get().set_token(&binary_token);
let task = req.send().promise.then(move |response| {
let view: ui_view::Client =
pry!(pry!(response.get()).get_cap().get_as_capability());
view.get_view_info_request().send().promise.then(move |response| {
let view_info = pry!(response.get());
let app_title = pry!(pry!(view_info.get_app_title()).get_default_text()).to_string();
let asset = pry!(view_info.get_grain_icon());
asset.get_url_request().send().promise.then(move |response| {
let result = pry!(response.get());
let protocol = match pry!(result.get_protocol()) {
static_asset::Protocol::Https => "https".to_string(),
static_asset::Protocol::Http => "http".to_string(),
};
let info = ViewInfoData {
app_title: app_title,
grain_icon_url: format!("{}://{}", protocol, pry!(result.get_host_path())),
};
set.borrow_mut().view_infos.insert(token.clone(), info.clone());
let json_string = Action::ViewInfo { token: token, data: info }.to_json();
set.borrow_mut().send_message_to_subscribers(&json_string);
Promise::ok(())
})
})
});
set_ref.borrow_mut().tasks.add(task);
Ok(())
}
fn update_description(&mut self, description: &[u8]) -> ::capnp::Result<()> {
use std::io::Write;
let desc_string: String = match ::std::str::from_utf8(description) {
Err(e) => return Err(::capnp::Error::failed(format!("{}", e))),
Ok(d) => d.into(),
};
let temp_path = format!("/var/description.uploading");
try!(try!(::std::fs::File::create(&temp_path)).write_all(description));
try!(::std::fs::rename(temp_path, "/var/description"));
self.description = desc_string;
let json_string = Action::Description(self.description.clone()).to_json();
self.send_message_to_subscribers(&json_string);
Ok(())
}
fn insert(&mut self,
token: String,
title: String,
added_by: String) -> ::capnp::Result<()> {
let dur = ::std::time::SystemTime::now().duration_since(::std::time::UNIX_EPOCH).expect("TODO");
let date_added = dur.as_secs() * 1000 + (dur.subsec_nanos() / 1000000) as u64;
let mut token_path = ::std::path::PathBuf::new();
token_path.push(self.base_path.clone());
token_path.push(token.clone());
let mut temp_path = ::std::path::PathBuf::new();
temp_path.push(self.base_path.clone());
temp_path.push(format!("{}.uploading", token));
let mut writer = try!(::std::fs::File::create(&temp_path));
let mut message = ::capnp::message::Builder::new_default();
{
let mut metadata: ui_view_metadata::Builder = message.init_root();
metadata.set_title(&title);
metadata.set_date_added(date_added);
metadata.set_added_by(&added_by);
}
try!(::capnp::serialize::write_message(&mut writer, &message));
try!(::std::fs::rename(temp_path, token_path));
try!(writer.sync_all());
let entry = SavedUiViewData {
title: title,
date_added: date_added,
added_by: added_by,
};
let json_string = Action::Insert { token: token.clone(), data: entry.clone() }.to_json();
self.send_message_to_subscribers(&json_string);
self.views.insert(token, entry);
Ok(())
}
fn send_message_to_subscribers(&mut self, message: &str) {
for (_, sub) in &self.subscribers {
let mut req = sub.send_bytes_request();
encode_websocket_message(req.get(), message);
self.tasks.add(req.send().promise.map(|_| Ok(())));
}
}
fn remove(&mut self, token: &str) -> Result<(), Error> {
let mut path = self.base_path.clone();
path.push(token);
if let Err(e) = ::std::fs::remove_file(path) {
if e.kind() != ::std::io::ErrorKind::NotFound {
return Err(e.into())
}
}
let json_string = Action::Remove { token: token.into() }.to_json();
self.send_message_to_subscribers(&json_string);
self.views.remove(token);
Ok(())
}
fn new_subscribed_websocket(set: &Rc<RefCell<SavedUiViewSet>>,
client_stream: web_socket_stream::Client,
can_write: bool,
timer: &::gjio::Timer)
-> WebSocketStream
{
let id = set.borrow().next_id;
set.borrow_mut().next_id = id + 1;
set.borrow_mut().subscribers.insert(id, client_stream.clone());
let mut task = Promise::ok(());
{
let json_string = Action::CanWrite(can_write).to_json();
let mut req = client_stream.send_bytes_request();
encode_websocket_message(req.get(), &json_string);
let promise = req.send().promise.map(|_| Ok(()));
task = task.then(|_| promise);
}
{
let json_string = Action::Description(set.borrow().description.clone()).to_json();
let mut req = client_stream.send_bytes_request();
encode_websocket_message(req.get(), &json_string);
let promise = req.send().promise.map(|_| Ok(()));
task = task.then(|_| promise);
}
for (t, v) in &set.borrow().views {
let action = Action::Insert {
token: t.clone(),
data: v.clone()
};
let json_string = action.to_json();
let mut req = client_stream.send_bytes_request();
encode_websocket_message(req.get(), &json_string);
let promise = req.send().promise.map(|_| Ok(()));
task = task.then(|_| promise);
}
for (t, vi) in &set.borrow().view_infos {
let action = Action::ViewInfo {
token: t.clone(),
data: vi.clone()
};
let json_string = action.to_json();
let mut req = client_stream.send_bytes_request();
encode_websocket_message(req.get(), &json_string);
let promise = req.send().promise.map(|_| Ok(()));
task = task.then(|_| promise);
}
set.borrow_mut().tasks.add(task);
WebSocketStream::new(id, client_stream, timer.clone(), set.clone())
}
}
pub struct WebSession {
timer: ::gjio::Timer,
can_write: bool,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
context: session_context::Client,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
identity_id: String,
}
impl WebSession {
pub fn new(timer: ::gjio::Timer,
user_info: user_info::Reader,
context: session_context::Client,
_params: web_session::params::Reader,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>)
-> ::capnp::Result<WebSession>
{
// Permission #0 is "write". Check if bit 0 in the PermissionSet is set.
let permissions = try!(user_info.get_permissions());
let can_write = permissions.len() > 0 && permissions.get(0);
Ok(WebSession {
timer: timer,
can_write: can_write,
sandstorm_api: sandstorm_api,
context: context,
saved_ui_views: saved_ui_views,
identity_id: hex::ToHex::to_hex(try!(user_info.get_identity_id())),
})
// `UserInfo` is defined in `sandstorm/grain.capnp` and contains info like:
// - A stable ID for the user, so you can correlate sessions from the same user.
// - The user's display name, e.g. "Mark Miller", useful for identifying the user to other
// users.
// - The user's permissions (seen above).
// `WebSession::Params` is defined in `sandstorm/web-session.capnp` and contains info like:
// - The hostname where the grain was mapped for this user. Every time a user opens a grain,
// it is mapped at a new random hostname for security reasons.
// - The user's User-Agent and Accept-Languages headers.
// `SessionContext` is defined in `sandstorm/grain.capnp` and implements callbacks for
// sharing/access control and service publishing/discovery.
}
}
impl ui_session::Server for WebSession {}
impl web_session::Server for WebSession {
fn get(&mut self,
params: web_session::GetParams,
mut results: web_session::GetResults)
-> Promise<(), Error>
{
// HTTP GET request.
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
if path == "" {
let text = "<!DOCTYPE html>\
<html><head>\
<link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\">\
<script type=\"text/javascript\" src=\"script.js\" async></script>
</head><body><div id=\"main\"></div></body></html>";
let mut content = results.get().init_content();
content.set_mime_type("text/html; charset=UTF-8");
content.init_body().set_bytes(text.as_bytes());
Promise::ok(())
} else if path == "script.js" {
self.read_file("/script.js.gz", results, "text/javascript; charset=UTF-8", Some("gzip"))
} else if path == "style.css" {
self.read_file("/style.css.gz", results, "text/css; charset=UTF-8", Some("gzip"))
} else {
let mut error = results.get().init_client_error();
error.set_status_code(web_session::response::ClientErrorCode::NotFound);
Promise::ok(())
}
}
fn post(&mut self,
params: web_session::PostParams,
mut results: web_session::PostResults)
-> Promise<(), Error>
{
let path = {
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
path.to_string()
};
if path.starts_with("token/") {
self.receive_request_token(path[6..].to_string(), params, results)
} else if path.starts_with("offer/") {
self.offer_ui_view(path[6..].to_string(), params, results)
} else {
let mut error = results.get().init_client_error();
error.set_status_code(web_session::response::ClientErrorCode::NotFound);
Promise::ok(())
}
}
fn put(&mut self,
params: web_session::PutParams,
mut results: web_session::PutResults)
-> Promise<(), Error>
{
// HTTP PUT request.
let params = pry!(params.get());
let path = pry!(params.get_path());
pry!(self.require_canonical_path(path));
if !self.can_write {
results.get().init_client_error()
.set_status_code(web_session::response::ClientErrorCode::Forbidden);
} else if path == "description" {
let content = pry!(pry!(params.get_content()).get_content());
pry!(self.saved_ui_views.borrow_mut().update_description(content));
results.get().init_no_content();
}
Promise::ok(())
}
fn delete(&mut self,
params: web_session::DeleteParams,
mut results: web_session::DeleteResults)
-> Promise<(), Error>
{
// HTTP DELETE request.
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
if !path.starts_with("sturdyref/") {
return Promise::err(Error::failed("DELETE only supported under sturdyref/".to_string()));
}
if !self.can_write {
results.get().init_client_error()
.set_status_code(web_session::response::ClientErrorCode::Forbidden);
Promise::ok(())
} else {
let token_str = &path[10..];
let binary_token = match base64::FromBase64::from_base64(token_str) {
Ok(b) => b,
Err(e) => {
results.get().init_client_error().set_description_html(&format!("{}", e)[..]);
return Promise::ok(())
}
};
pry!(self.saved_ui_views.borrow_mut().remove(token_str));
let context = self.context.clone();
let mut req = self.sandstorm_api.drop_request();
req.get().set_token(&binary_token);
req.send().promise.then_else(move |_| {
// then_else() because drop() is currently broken. :(
let mut req = context.activity_request();
req.get().init_event().set_type(REMOVE_GRAIN_ACTIVITY_INDEX);
req.send().promise.then(move |_| {
results.get().init_no_content();
Promise::ok(())
})
})
}
}
fn open_web_socket(&mut self,
params: web_session::OpenWebSocketParams,
mut results: web_session::OpenWebSocketResults)
-> Promise<(), Error>
{
let client_stream = pry!(pry!(params.get()).get_client_stream());
results.get().set_server_stream(
web_socket_stream::ToClient::new(
SavedUiViewSet::new_subscribed_websocket(
&self.saved_ui_views,
client_stream,
self.can_write,
&self.timer)).from_server::<::capnp_rpc::Server>());
Promise::ok(())
}
}
fn fill_in_client_error(mut results: web_session::PostResults, e: Error)
{
let mut client_error = results.get().init_client_error();
client_error.set_description_html(&format!("{}", e)[..]);
}
impl WebSession {
fn offer_ui_view(&mut self,
text_token: String,
_params: web_session::PostParams,
mut results: web_session::PostResults)
-> Promise<(), Error>
{
let token = match base64::FromBase64::from_base64(&text_token[..]) {
Ok(b) => b,
Err(e) => return Promise::err(Error::failed(format!("{}", e))),
};
let session_context = self.context.clone();
let mut req = self.sandstorm_api.restore_request();
req.get().set_token(&token);
req.send().promise.then(move |response| {
let sealed_ui_view: ui_view::Client =
pry!(pry!(response.get()).get_cap().get_as_capability());
let mut req = session_context.offer_request();
req.get().get_cap().set_as_capability(sealed_ui_view.client.hook);
{
use capnp::traits::HasTypeId;
let tags = req.get().init_descriptor().init_tags(1);
tags.get(0).set_id(ui_view::Client::type_id());
}
req.send().promise
}).then_else(move |r| match r {
Ok(_) => {
results.get().init_no_content();
Promise::ok(())
}
Err(e) => {
fill_in_client_error(results, e);
Promise::ok(())
}
})
}
fn read_powerbox_tag(&mut self, decoded_content: Vec<u8>) -> ::capnp::Result<String>
{
let mut cursor = ::std::io::Cursor::new(decoded_content);
let message = try!(::capnp::serialize_packed::read_message(&mut cursor,
Default::default()));
let desc: powerbox_descriptor::Reader = try!(message.get_root());
let tags = try!(desc.get_tags());
if tags.len() == 0 {
Err(Error::failed("no powerbox tag".into()))
} else {
let value: ui_view::powerbox_tag::Reader = try!(tags.get(0).get_value().get_as());
Ok(try!(value.get_title()).into())
}
}
fn receive_request_token(&mut self,
token: String,
params: web_session::PostParams,
mut results: web_session::PostResults)
-> Promise<(), Error>
{
let content = pry!(pry!(pry!(params.get()).get_content()).get_content());
let decoded_content = match base64::FromBase64::from_base64(content) {
Ok(c) => c,
Err(_) => {
fill_in_client_error(results, Error::failed("failed to convert from base64".into()));
return Promise::ok(())
}
};
let grain_title: String = match self.read_powerbox_tag(decoded_content) {
Ok(t) => t,
Err(e) => {
fill_in_client_error(results, e);
return Promise::ok(());
}
};
// now let's save this thing into an actual uiview sturdyref
let mut req = self.context.claim_request_request();
let sandstorm_api = self.sandstorm_api.clone();
req.get().set_request_token(&token[..]);
let saved_ui_views = self.saved_ui_views.clone();
let identity_id = self.identity_id.clone();
let do_stuff = req.send().promise.then(move |response| {
let sealed_ui_view: ui_view::Client =
pry!(pry!(response.get()).get_cap().get_as_capability());
let mut req = sandstorm_api.save_request();
req.get().get_cap().set_as_capability(sealed_ui_view.client.hook);
{
let mut save_label = req.get().init_label();
save_label.set_default_text(&format!("grain with title: {}", grain_title)[..]);
}
req.send().promise.map(move |response| {
let binary_token = try!(try!(response.get()).get_token());
let token = base64::ToBase64::to_base64(binary_token, base64::URL_SAFE);
try!(saved_ui_views.borrow_mut().insert(token.clone(), grain_title, identity_id));
try!(SavedUiViewSet::retrieve_view_info(&saved_ui_views, token));
Ok(())
})
});
let context = self.context.clone();
do_stuff.then_else(move |r| match r {
Ok(()) => {
let mut req = context.activity_request();
req.get().init_event().set_type(ADD_GRAIN_ACTIVITY_INDEX);
req.send().promise.then(move |_| {
let mut _content = results.get().init_content();
Promise::ok(())
})
}
Err(e) => {
let mut error = results.get().init_client_error();
error.set_description_html(&format!("error: {:?}", e));
Promise::ok(())
}
})
}
fn require_canonical_path(&self, path: &str) -> Result<(), Error> {
// Require that the path doesn't contain "." or ".." or consecutive slashes, to prevent path
// injection attacks.
//
// Note that such attacks wouldn't actually accomplish much since everything outside /var
// is a read-only filesystem anyway, containing the app package contents which are non-secret.
for (idx, component) in path.split_terminator("/").enumerate() {
if component == "." || component == ".." || (component == "" && idx > 0) {
return Err(Error::failed(format!("non-canonical path: {:?}", path)));
}
}
Ok(())
}
fn read_file(&self,
filename: &str,
mut results: web_session::GetResults,
content_type: &str,
encoding: Option<&str>)
-> Promise<(), Error>
{
match ::std::fs::File::open(filename) {
Ok(mut f) => {
let size = pry!(f.metadata()).len();
let mut content = results.get().init_content();
content.set_status_code(web_session::response::SuccessCode::Ok);
content.set_mime_type(content_type);
encoding.map(|enc| content.set_encoding(enc));
let mut body = content.init_body().init_bytes(size as u32);
pry!(::std::io::copy(&mut f, &mut body));
Promise::ok(())
}
Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => {
let mut error = results.get().init_client_error();
error.set_status_code(web_session::response::ClientErrorCode::NotFound);
Promise::ok(())
}
Err(e) => {
Promise::err(e.into())
}
}
}
}
pub struct UiView {
timer: ::gjio::Timer,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
}
impl UiView {
fn new(timer: ::gjio::Timer,
client: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>) -> UiView
{
UiView {
timer: timer,
sandstorm_api: client,
saved_ui_views: saved_ui_views,
}
}
}
impl ui_view::Server for UiView {
fn get_view_info(&mut self,
_params: ui_view::GetViewInfoParams,
mut results: ui_view::GetViewInfoResults)
-> Promise<(), Error>
{
let mut view_info = results.get();
// Define a "write" permission, and then define roles "editor" and "viewer" where only "editor"
// has the "write" permission. This will allow people to share read-only.
{
let perms = view_info.borrow().init_permissions(1);
let mut write = perms.get(0);
write.set_name("write");
write.init_title().set_default_text("write");
}
{
let mut roles = view_info.borrow().init_roles(2);
{
let mut editor = roles.borrow().get(0);
editor.borrow().init_title().set_default_text("editor");
editor.borrow().init_verb_phrase().set_default_text("can edit");
editor.init_permissions(1).set(0, true); // has "write" permission
}
{
let mut viewer = roles.get(1);
viewer.borrow().init_title().set_default_text("viewer");
viewer.borrow().init_verb_phrase().set_default_text("can view");
viewer.init_permissions(1).set(0, false); // does not have "write" permission
}
}
{
let mut event_types = view_info.init_event_types(2);
{
let mut added = event_types.borrow().get(ADD_GRAIN_ACTIVITY_INDEX as u32);
added.set_name("add");
added.borrow().init_verb_phrase().set_default_text("grain added");
}
{
let mut removed = event_types.borrow().get(REMOVE_GRAIN_ACTIVITY_INDEX as u32);
removed.set_name("remove");
removed.borrow().init_verb_phrase().set_default_text("grain removed");
}
}
Promise::ok(())
}
fn new_session(&mut self,
params: ui_view::NewSessionParams,
mut results: ui_view::NewSessionResults)
-> Promise<(), Error>
{
use ::capnp::traits::HasTypeId;
let params = pry!(params.get());
if params.get_session_type() != web_session::Client::type_id() {
return Promise::err(Error::failed("unsupported session type".to_string()));
}
let session = pry!(WebSession::new(
self.timer.clone(),
pry!(params.get_user_info()),
pry!(params.get_context()),
pry!(params.get_session_params().get_as()),
self.sandstorm_api.clone(),
self.saved_ui_views.clone()));
let client: web_session::Client =
web_session::ToClient::new(session).from_server::<::capnp_rpc::Server>();
// we need to do this dance to upcast.
results.get().set_session(ui_session::Client { client : client.client});
Promise::ok(())
}
}
pub fn main() -> Result<(), Box<::std::error::Error>> {
EventLoop::top_level(move |wait_scope| {
let mut event_port = try!(::gjio::EventPort::new());
let network = event_port.get_network();
// Sandstorm launches us with a connection on file descriptor 3.
let stream = try!(unsafe { network.wrap_raw_socket_descriptor(3) });
let (p, f) = Promise::and_fulfiller();
let sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned> =
::capnp_rpc::new_promise_client(p);
let saved_uiviews = try!(SavedUiViewSet::new("/var/sturdyrefs", sandstorm_api.clone()));
let uiview = UiView::new(
event_port.get_timer(),
sandstorm_api,
saved_uiviews);
let client = ui_view::ToClient::new(uiview).from_server::<::capnp_rpc::Server>();
let network =
twoparty::VatNetwork::new(stream.clone(), stream,
rpc_twoparty_capnp::Side::Client, Default::default());
let mut rpc_system = RpcSystem::new(Box::new(network), Some(client.client));
let cap = rpc_system.bootstrap::<sandstorm_api::Client<::capnp::any_pointer::Owned>>(
::capnp_rpc::rpc_twoparty_capnp::Side::Server);
f.fulfill(cap.client);
Promise::never_done().wait(wait_scope, &mut event_port)
})
}
'edit description' event
// Copyright (c) 2014-2016 Sandstorm Development Group, Inc.
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use gj::{Promise, EventLoop};
use capnp::Error;
use capnp_rpc::{RpcSystem, twoparty, rpc_twoparty_capnp};
use rustc_serialize::{base64, hex, json};
use std::collections::hash_map::HashMap;
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use collections_capnp::ui_view_metadata;
use sandstorm::powerbox_capnp::powerbox_descriptor;
use sandstorm::identity_capnp::{user_info};
use sandstorm::grain_capnp::{session_context, ui_view, ui_session, sandstorm_api};
use sandstorm::grain_capnp::{static_asset};
use sandstorm::web_session_capnp::{web_session};
use sandstorm::web_session_capnp::web_session::web_socket_stream;
const ADD_GRAIN_ACTIVITY_INDEX: u16 = 0;
const REMOVE_GRAIN_ACTIVITY_INDEX: u16 = 1;
const EDIT_DESCRIPTION_ACTIVITY_INDEX: u16 = 2;
pub struct WebSocketStream {
id: u64,
awaiting_pong: Rc<Cell<bool>>,
_ping_pong_promise: Promise<(), Error>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
}
impl Drop for WebSocketStream {
fn drop(&mut self) {
self.saved_ui_views.borrow_mut().subscribers.remove(&self.id);
}
}
fn do_ping_pong(client_stream: web_socket_stream::Client,
timer: ::gjio::Timer,
awaiting_pong: Rc<Cell<bool>>) -> Promise<(), Error>
{
let mut req = client_stream.send_bytes_request();
req.get().set_message(&[0x89, 0]); // PING
let promise = req.send().promise;
awaiting_pong.set(true);
promise.then(move|_| {
timer.after_delay(::std::time::Duration::new(10, 0)).lift().then(move |_| {
if awaiting_pong.get() {
Promise::err(Error::failed("pong not received within 10 seconds".into()))
} else {
do_ping_pong(client_stream, timer, awaiting_pong)
}
})
})
}
impl WebSocketStream {
fn new(id: u64,
client_stream: web_socket_stream::Client,
timer: ::gjio::Timer,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>)
-> WebSocketStream
{
let awaiting = Rc::new(Cell::new(false));
let ping_pong_promise = do_ping_pong(client_stream,
timer,
awaiting.clone()).map_else(|r| match r {
Ok(_) => Ok(()),
Err(e) => {println!("ERROR {}", e); Ok(()) }
}).eagerly_evaluate();
WebSocketStream {
id: id,
awaiting_pong: awaiting,
_ping_pong_promise: ping_pong_promise,
saved_ui_views: saved_ui_views,
}
}
}
impl web_socket_stream::Server for WebSocketStream {
fn send_bytes(&mut self,
params: web_socket_stream::SendBytesParams,
_results: web_socket_stream::SendBytesResults)
-> Promise<(), Error>
{
let message = pry!(pry!(params.get()).get_message());
let opcode = message[0] & 0xf; // or is it 0xf0?
let _masked = (message[1] & 0x80) != 0;
let _length = message[1] & 0x7f;
match opcode {
0x0 => { // CONTINUE
}
0x1 => { // UTF-8 PAYLOAD
}
0x2 => { // BINARY PAYLOAD
}
0x8 => { // TERMINATE
// TODO: drop things to get them to close.
}
0x9 => { // PING
//TODO
println!("the client sent us a ping!");
}
0xa => { // PONG
self.awaiting_pong.set(false);
}
_ => { // OTHER
println!("unrecognized websocket opcode {}", opcode);
}
}
Promise::ok(())
}
}
fn encode_websocket_message(mut params: web_socket_stream::send_bytes_params::Builder,
message: &str)
{
// TODO(perf) avoid this allocation
let mut bytes: Vec<u8> = Vec::new();
bytes.push(0x81);
if message.len() < 126 {
bytes.push(message.len() as u8);
} else if message.len() < 1 << 16 {
// 16 bits
bytes.push(0x7e);
bytes.push((message.len() >> 8) as u8);
bytes.push(message.len() as u8);
} else {
// 64 bits
bytes.push(0x7f);
bytes.push((message.len() >> 56) as u8);
bytes.push((message.len() >> 48) as u8);
bytes.push((message.len() >> 40) as u8);
bytes.push((message.len() >> 32) as u8);
bytes.push((message.len() >> 16) as u8);
bytes.push((message.len() >> 24) as u8);
bytes.push((message.len() >> 8) as u8);
bytes.push(message.len() as u8);
}
bytes.extend_from_slice(message.as_bytes());
params.set_message(&bytes[..]);
}
#[derive(Clone)]
struct SavedUiViewData {
title: String,
date_added: u64,
added_by: String,
}
impl SavedUiViewData {
fn to_json(&self) -> String {
format!("{{\"title\":{},\"dateAdded\": \"{}\",\"addedBy\":\"{}\"}}",
json::ToJson::to_json(&self.title),
self.date_added,
self.added_by)
}
}
#[derive(Clone)]
struct ViewInfoData {
app_title: String,
grain_icon_url: String,
}
impl ViewInfoData {
fn to_json(&self) -> String {
format!("{{\"appTitle\":{},\"grainIconUrl\":\"{}\"}}",
json::ToJson::to_json(&self.app_title),
self.grain_icon_url)
}
}
#[derive(Clone)]
enum Action {
Insert { token: String, data: SavedUiViewData },
Remove { token: String },
ViewInfo { token: String, data: ViewInfoData },
CanWrite(bool),
Description(String),
}
impl Action {
fn to_json(&self) -> String {
match self {
&Action::Insert { ref token, ref data } => {
format!("{{\"insert\":{{\"token\":\"{}\",\"data\":{} }} }}",
token, data.to_json())
}
&Action::Remove { ref token } => {
format!("{{\"remove\":{{\"token\":\"{}\"}}}}", token)
}
&Action::ViewInfo { ref token, ref data } => {
format!("{{\"viewInfo\":{{\"token\":\"{}\",\"data\":{} }} }}",
token, data.to_json())
}
&Action::CanWrite(b) => {
format!("{{\"canWrite\":{}}}", b)
}
&Action::Description(ref s) => {
format!("{{\"description\":{}}}", json::ToJson::to_json(s))
}
}
}
}
struct Reaper;
impl ::gj::TaskReaper<(), Error> for Reaper {
fn task_failed(&mut self, error: Error) {
// TODO better message.
println!("task failed: {}", error);
}
}
pub struct SavedUiViewSet {
base_path: ::std::path::PathBuf,
views: HashMap<String, SavedUiViewData>,
view_infos: HashMap<String, ViewInfoData>,
next_id: u64,
subscribers: HashMap<u64, web_socket_stream::Client>,
tasks: ::gj::TaskSet<(), Error>,
description: String,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
}
impl SavedUiViewSet {
pub fn new<P>(token_directory: P,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>)
-> ::capnp::Result<Rc<RefCell<SavedUiViewSet>>>
where P: AsRef<::std::path::Path>
{
let description = match ::std::fs::File::open("/var/description") {
Ok(mut f) => {
use std::io::Read;
let mut result = String::new();
try!(f.read_to_string(&mut result));
result
}
Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => {
use std::io::Write;
let mut f = try!(::std::fs::File::create("/var/description"));
let result = "";
try!(f.write_all(result.as_bytes()));
result.into()
}
Err(e) => {
return Err(e.into());
}
};
let result = Rc::new(RefCell::new(SavedUiViewSet {
base_path: token_directory.as_ref().to_path_buf(),
views: HashMap::new(),
view_infos: HashMap::new(),
next_id: 0,
subscribers: HashMap::new(),
tasks: ::gj::TaskSet::new(Box::new(Reaper)),
description: description,
sandstorm_api: sandstorm_api,
}));
// create token directory if it does not yet exist
try!(::std::fs::create_dir_all(&token_directory));
for token_file in try!(::std::fs::read_dir(&token_directory)) {
let dir_entry = try!(token_file);
let token: String = match dir_entry.file_name().to_str() {
None => {
println!("malformed token: {:?}", dir_entry.file_name());
continue
}
Some(s) => s.into(),
};
if token.ends_with(".uploading") {
try!(::std::fs::remove_file(dir_entry.path()));
} else {
let mut reader = try!(::std::fs::File::open(dir_entry.path()));
let message = try!(::capnp::serialize::read_message(&mut reader,
Default::default()));
let metadata: ui_view_metadata::Reader = try!(message.get_root());
let entry = SavedUiViewData {
title: try!(metadata.get_title()).into(),
date_added: metadata.get_date_added(),
added_by: try!(metadata.get_added_by()).into(),
};
result.borrow_mut().views.insert(token.clone(), entry);
try!(SavedUiViewSet::retrieve_view_info(&result, token));
}
}
Ok(result)
}
fn retrieve_view_info(set_ref: &Rc<RefCell<SavedUiViewSet>>,
token: String) -> ::capnp::Result<()> {
// SandstormApi.restore, then call getViewInfo,
// then call get_url() on the grain static asset.
let set = set_ref.clone();
let binary_token = match base64::FromBase64::from_base64(&token[..]) {
Ok(b) => b,
Err(e) => return Err(Error::failed(format!("{}", e))),
};
let mut req = set.borrow().sandstorm_api.restore_request();
req.get().set_token(&binary_token);
let task = req.send().promise.then(move |response| {
let view: ui_view::Client =
pry!(pry!(response.get()).get_cap().get_as_capability());
view.get_view_info_request().send().promise.then(move |response| {
let view_info = pry!(response.get());
let app_title = pry!(pry!(view_info.get_app_title()).get_default_text()).to_string();
let asset = pry!(view_info.get_grain_icon());
asset.get_url_request().send().promise.then(move |response| {
let result = pry!(response.get());
let protocol = match pry!(result.get_protocol()) {
static_asset::Protocol::Https => "https".to_string(),
static_asset::Protocol::Http => "http".to_string(),
};
let info = ViewInfoData {
app_title: app_title,
grain_icon_url: format!("{}://{}", protocol, pry!(result.get_host_path())),
};
set.borrow_mut().view_infos.insert(token.clone(), info.clone());
let json_string = Action::ViewInfo { token: token, data: info }.to_json();
set.borrow_mut().send_message_to_subscribers(&json_string);
Promise::ok(())
})
})
});
set_ref.borrow_mut().tasks.add(task);
Ok(())
}
fn update_description(&mut self, description: &[u8]) -> ::capnp::Result<()> {
use std::io::Write;
let desc_string: String = match ::std::str::from_utf8(description) {
Err(e) => return Err(::capnp::Error::failed(format!("{}", e))),
Ok(d) => d.into(),
};
let temp_path = format!("/var/description.uploading");
try!(try!(::std::fs::File::create(&temp_path)).write_all(description));
try!(::std::fs::rename(temp_path, "/var/description"));
self.description = desc_string;
let json_string = Action::Description(self.description.clone()).to_json();
self.send_message_to_subscribers(&json_string);
Ok(())
}
fn insert(&mut self,
token: String,
title: String,
added_by: String) -> ::capnp::Result<()> {
let dur = ::std::time::SystemTime::now().duration_since(::std::time::UNIX_EPOCH).expect("TODO");
let date_added = dur.as_secs() * 1000 + (dur.subsec_nanos() / 1000000) as u64;
let mut token_path = ::std::path::PathBuf::new();
token_path.push(self.base_path.clone());
token_path.push(token.clone());
let mut temp_path = ::std::path::PathBuf::new();
temp_path.push(self.base_path.clone());
temp_path.push(format!("{}.uploading", token));
let mut writer = try!(::std::fs::File::create(&temp_path));
let mut message = ::capnp::message::Builder::new_default();
{
let mut metadata: ui_view_metadata::Builder = message.init_root();
metadata.set_title(&title);
metadata.set_date_added(date_added);
metadata.set_added_by(&added_by);
}
try!(::capnp::serialize::write_message(&mut writer, &message));
try!(::std::fs::rename(temp_path, token_path));
try!(writer.sync_all());
let entry = SavedUiViewData {
title: title,
date_added: date_added,
added_by: added_by,
};
let json_string = Action::Insert { token: token.clone(), data: entry.clone() }.to_json();
self.send_message_to_subscribers(&json_string);
self.views.insert(token, entry);
Ok(())
}
fn send_message_to_subscribers(&mut self, message: &str) {
for (_, sub) in &self.subscribers {
let mut req = sub.send_bytes_request();
encode_websocket_message(req.get(), message);
self.tasks.add(req.send().promise.map(|_| Ok(())));
}
}
fn remove(&mut self, token: &str) -> Result<(), Error> {
let mut path = self.base_path.clone();
path.push(token);
if let Err(e) = ::std::fs::remove_file(path) {
if e.kind() != ::std::io::ErrorKind::NotFound {
return Err(e.into())
}
}
let json_string = Action::Remove { token: token.into() }.to_json();
self.send_message_to_subscribers(&json_string);
self.views.remove(token);
Ok(())
}
fn new_subscribed_websocket(set: &Rc<RefCell<SavedUiViewSet>>,
client_stream: web_socket_stream::Client,
can_write: bool,
timer: &::gjio::Timer)
-> WebSocketStream
{
let id = set.borrow().next_id;
set.borrow_mut().next_id = id + 1;
set.borrow_mut().subscribers.insert(id, client_stream.clone());
let mut task = Promise::ok(());
{
let json_string = Action::CanWrite(can_write).to_json();
let mut req = client_stream.send_bytes_request();
encode_websocket_message(req.get(), &json_string);
let promise = req.send().promise.map(|_| Ok(()));
task = task.then(|_| promise);
}
{
let json_string = Action::Description(set.borrow().description.clone()).to_json();
let mut req = client_stream.send_bytes_request();
encode_websocket_message(req.get(), &json_string);
let promise = req.send().promise.map(|_| Ok(()));
task = task.then(|_| promise);
}
for (t, v) in &set.borrow().views {
let action = Action::Insert {
token: t.clone(),
data: v.clone()
};
let json_string = action.to_json();
let mut req = client_stream.send_bytes_request();
encode_websocket_message(req.get(), &json_string);
let promise = req.send().promise.map(|_| Ok(()));
task = task.then(|_| promise);
}
for (t, vi) in &set.borrow().view_infos {
let action = Action::ViewInfo {
token: t.clone(),
data: vi.clone()
};
let json_string = action.to_json();
let mut req = client_stream.send_bytes_request();
encode_websocket_message(req.get(), &json_string);
let promise = req.send().promise.map(|_| Ok(()));
task = task.then(|_| promise);
}
set.borrow_mut().tasks.add(task);
WebSocketStream::new(id, client_stream, timer.clone(), set.clone())
}
}
pub struct WebSession {
timer: ::gjio::Timer,
can_write: bool,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
context: session_context::Client,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
identity_id: String,
}
impl WebSession {
pub fn new(timer: ::gjio::Timer,
user_info: user_info::Reader,
context: session_context::Client,
_params: web_session::params::Reader,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>)
-> ::capnp::Result<WebSession>
{
// Permission #0 is "write". Check if bit 0 in the PermissionSet is set.
let permissions = try!(user_info.get_permissions());
let can_write = permissions.len() > 0 && permissions.get(0);
Ok(WebSession {
timer: timer,
can_write: can_write,
sandstorm_api: sandstorm_api,
context: context,
saved_ui_views: saved_ui_views,
identity_id: hex::ToHex::to_hex(try!(user_info.get_identity_id())),
})
// `UserInfo` is defined in `sandstorm/grain.capnp` and contains info like:
// - A stable ID for the user, so you can correlate sessions from the same user.
// - The user's display name, e.g. "Mark Miller", useful for identifying the user to other
// users.
// - The user's permissions (seen above).
// `WebSession::Params` is defined in `sandstorm/web-session.capnp` and contains info like:
// - The hostname where the grain was mapped for this user. Every time a user opens a grain,
// it is mapped at a new random hostname for security reasons.
// - The user's User-Agent and Accept-Languages headers.
// `SessionContext` is defined in `sandstorm/grain.capnp` and implements callbacks for
// sharing/access control and service publishing/discovery.
}
}
impl ui_session::Server for WebSession {}
impl web_session::Server for WebSession {
fn get(&mut self,
params: web_session::GetParams,
mut results: web_session::GetResults)
-> Promise<(), Error>
{
// HTTP GET request.
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
if path == "" {
let text = "<!DOCTYPE html>\
<html><head>\
<link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\">\
<script type=\"text/javascript\" src=\"script.js\" async></script>
</head><body><div id=\"main\"></div></body></html>";
let mut content = results.get().init_content();
content.set_mime_type("text/html; charset=UTF-8");
content.init_body().set_bytes(text.as_bytes());
Promise::ok(())
} else if path == "script.js" {
self.read_file("/script.js.gz", results, "text/javascript; charset=UTF-8", Some("gzip"))
} else if path == "style.css" {
self.read_file("/style.css.gz", results, "text/css; charset=UTF-8", Some("gzip"))
} else {
let mut error = results.get().init_client_error();
error.set_status_code(web_session::response::ClientErrorCode::NotFound);
Promise::ok(())
}
}
fn post(&mut self,
params: web_session::PostParams,
mut results: web_session::PostResults)
-> Promise<(), Error>
{
let path = {
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
path.to_string()
};
if path.starts_with("token/") {
self.receive_request_token(path[6..].to_string(), params, results)
} else if path.starts_with("offer/") {
self.offer_ui_view(path[6..].to_string(), params, results)
} else {
let mut error = results.get().init_client_error();
error.set_status_code(web_session::response::ClientErrorCode::NotFound);
Promise::ok(())
}
}
fn put(&mut self,
params: web_session::PutParams,
mut results: web_session::PutResults)
-> Promise<(), Error>
{
// HTTP PUT request.
let params = pry!(params.get());
let path = pry!(params.get_path());
pry!(self.require_canonical_path(path));
if !self.can_write {
results.get().init_client_error()
.set_status_code(web_session::response::ClientErrorCode::Forbidden);
Promise::ok(())
} else if path == "description" {
let content = pry!(pry!(params.get_content()).get_content());
pry!(self.saved_ui_views.borrow_mut().update_description(content));
let mut req = self.context.activity_request();
req.get().init_event().set_type(EDIT_DESCRIPTION_ACTIVITY_INDEX);
req.send().promise.then(move |_| {
results.get().init_no_content();
Promise::ok(())
})
} else {
results.get().init_client_error()
.set_status_code(web_session::response::ClientErrorCode::Forbidden);
Promise::ok(())
}
}
fn delete(&mut self,
params: web_session::DeleteParams,
mut results: web_session::DeleteResults)
-> Promise<(), Error>
{
// HTTP DELETE request.
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
if !path.starts_with("sturdyref/") {
return Promise::err(Error::failed("DELETE only supported under sturdyref/".to_string()));
}
if !self.can_write {
results.get().init_client_error()
.set_status_code(web_session::response::ClientErrorCode::Forbidden);
Promise::ok(())
} else {
let token_str = &path[10..];
let binary_token = match base64::FromBase64::from_base64(token_str) {
Ok(b) => b,
Err(e) => {
results.get().init_client_error().set_description_html(&format!("{}", e)[..]);
return Promise::ok(())
}
};
pry!(self.saved_ui_views.borrow_mut().remove(token_str));
let context = self.context.clone();
let mut req = self.sandstorm_api.drop_request();
req.get().set_token(&binary_token);
req.send().promise.then_else(move |_| {
// then_else() because drop() is currently broken. :(
let mut req = context.activity_request();
req.get().init_event().set_type(REMOVE_GRAIN_ACTIVITY_INDEX);
req.send().promise.then(move |_| {
results.get().init_no_content();
Promise::ok(())
})
})
}
}
fn open_web_socket(&mut self,
params: web_session::OpenWebSocketParams,
mut results: web_session::OpenWebSocketResults)
-> Promise<(), Error>
{
let client_stream = pry!(pry!(params.get()).get_client_stream());
results.get().set_server_stream(
web_socket_stream::ToClient::new(
SavedUiViewSet::new_subscribed_websocket(
&self.saved_ui_views,
client_stream,
self.can_write,
&self.timer)).from_server::<::capnp_rpc::Server>());
Promise::ok(())
}
}
fn fill_in_client_error(mut results: web_session::PostResults, e: Error)
{
let mut client_error = results.get().init_client_error();
client_error.set_description_html(&format!("{}", e)[..]);
}
impl WebSession {
fn offer_ui_view(&mut self,
text_token: String,
_params: web_session::PostParams,
mut results: web_session::PostResults)
-> Promise<(), Error>
{
let token = match base64::FromBase64::from_base64(&text_token[..]) {
Ok(b) => b,
Err(e) => return Promise::err(Error::failed(format!("{}", e))),
};
let session_context = self.context.clone();
let mut req = self.sandstorm_api.restore_request();
req.get().set_token(&token);
req.send().promise.then(move |response| {
let sealed_ui_view: ui_view::Client =
pry!(pry!(response.get()).get_cap().get_as_capability());
let mut req = session_context.offer_request();
req.get().get_cap().set_as_capability(sealed_ui_view.client.hook);
{
use capnp::traits::HasTypeId;
let tags = req.get().init_descriptor().init_tags(1);
tags.get(0).set_id(ui_view::Client::type_id());
}
req.send().promise
}).then_else(move |r| match r {
Ok(_) => {
results.get().init_no_content();
Promise::ok(())
}
Err(e) => {
fill_in_client_error(results, e);
Promise::ok(())
}
})
}
fn read_powerbox_tag(&mut self, decoded_content: Vec<u8>) -> ::capnp::Result<String>
{
let mut cursor = ::std::io::Cursor::new(decoded_content);
let message = try!(::capnp::serialize_packed::read_message(&mut cursor,
Default::default()));
let desc: powerbox_descriptor::Reader = try!(message.get_root());
let tags = try!(desc.get_tags());
if tags.len() == 0 {
Err(Error::failed("no powerbox tag".into()))
} else {
let value: ui_view::powerbox_tag::Reader = try!(tags.get(0).get_value().get_as());
Ok(try!(value.get_title()).into())
}
}
fn receive_request_token(&mut self,
token: String,
params: web_session::PostParams,
mut results: web_session::PostResults)
-> Promise<(), Error>
{
let content = pry!(pry!(pry!(params.get()).get_content()).get_content());
let decoded_content = match base64::FromBase64::from_base64(content) {
Ok(c) => c,
Err(_) => {
fill_in_client_error(results, Error::failed("failed to convert from base64".into()));
return Promise::ok(())
}
};
let grain_title: String = match self.read_powerbox_tag(decoded_content) {
Ok(t) => t,
Err(e) => {
fill_in_client_error(results, e);
return Promise::ok(());
}
};
// now let's save this thing into an actual uiview sturdyref
let mut req = self.context.claim_request_request();
let sandstorm_api = self.sandstorm_api.clone();
req.get().set_request_token(&token[..]);
let saved_ui_views = self.saved_ui_views.clone();
let identity_id = self.identity_id.clone();
let do_stuff = req.send().promise.then(move |response| {
let sealed_ui_view: ui_view::Client =
pry!(pry!(response.get()).get_cap().get_as_capability());
let mut req = sandstorm_api.save_request();
req.get().get_cap().set_as_capability(sealed_ui_view.client.hook);
{
let mut save_label = req.get().init_label();
save_label.set_default_text(&format!("grain with title: {}", grain_title)[..]);
}
req.send().promise.map(move |response| {
let binary_token = try!(try!(response.get()).get_token());
let token = base64::ToBase64::to_base64(binary_token, base64::URL_SAFE);
try!(saved_ui_views.borrow_mut().insert(token.clone(), grain_title, identity_id));
try!(SavedUiViewSet::retrieve_view_info(&saved_ui_views, token));
Ok(())
})
});
let context = self.context.clone();
do_stuff.then_else(move |r| match r {
Ok(()) => {
let mut req = context.activity_request();
req.get().init_event().set_type(ADD_GRAIN_ACTIVITY_INDEX);
req.send().promise.then(move |_| {
let mut _content = results.get().init_content();
Promise::ok(())
})
}
Err(e) => {
let mut error = results.get().init_client_error();
error.set_description_html(&format!("error: {:?}", e));
Promise::ok(())
}
})
}
fn require_canonical_path(&self, path: &str) -> Result<(), Error> {
// Require that the path doesn't contain "." or ".." or consecutive slashes, to prevent path
// injection attacks.
//
// Note that such attacks wouldn't actually accomplish much since everything outside /var
// is a read-only filesystem anyway, containing the app package contents which are non-secret.
for (idx, component) in path.split_terminator("/").enumerate() {
if component == "." || component == ".." || (component == "" && idx > 0) {
return Err(Error::failed(format!("non-canonical path: {:?}", path)));
}
}
Ok(())
}
fn read_file(&self,
filename: &str,
mut results: web_session::GetResults,
content_type: &str,
encoding: Option<&str>)
-> Promise<(), Error>
{
match ::std::fs::File::open(filename) {
Ok(mut f) => {
let size = pry!(f.metadata()).len();
let mut content = results.get().init_content();
content.set_status_code(web_session::response::SuccessCode::Ok);
content.set_mime_type(content_type);
encoding.map(|enc| content.set_encoding(enc));
let mut body = content.init_body().init_bytes(size as u32);
pry!(::std::io::copy(&mut f, &mut body));
Promise::ok(())
}
Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => {
let mut error = results.get().init_client_error();
error.set_status_code(web_session::response::ClientErrorCode::NotFound);
Promise::ok(())
}
Err(e) => {
Promise::err(e.into())
}
}
}
}
pub struct UiView {
timer: ::gjio::Timer,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
}
impl UiView {
fn new(timer: ::gjio::Timer,
client: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>) -> UiView
{
UiView {
timer: timer,
sandstorm_api: client,
saved_ui_views: saved_ui_views,
}
}
}
impl ui_view::Server for UiView {
fn get_view_info(&mut self,
_params: ui_view::GetViewInfoParams,
mut results: ui_view::GetViewInfoResults)
-> Promise<(), Error>
{
let mut view_info = results.get();
// Define a "write" permission, and then define roles "editor" and "viewer" where only "editor"
// has the "write" permission. This will allow people to share read-only.
{
let perms = view_info.borrow().init_permissions(1);
let mut write = perms.get(0);
write.set_name("write");
write.init_title().set_default_text("write");
}
{
let mut roles = view_info.borrow().init_roles(2);
{
let mut editor = roles.borrow().get(0);
editor.borrow().init_title().set_default_text("editor");
editor.borrow().init_verb_phrase().set_default_text("can edit");
editor.init_permissions(1).set(0, true); // has "write" permission
}
{
let mut viewer = roles.get(1);
viewer.borrow().init_title().set_default_text("viewer");
viewer.borrow().init_verb_phrase().set_default_text("can view");
viewer.init_permissions(1).set(0, false); // does not have "write" permission
}
}
{
let mut event_types = view_info.init_event_types(3);
{
let mut added = event_types.borrow().get(ADD_GRAIN_ACTIVITY_INDEX as u32);
added.set_name("add");
added.borrow().init_verb_phrase().set_default_text("grain added");
}
{
let mut removed = event_types.borrow().get(REMOVE_GRAIN_ACTIVITY_INDEX as u32);
removed.set_name("remove");
removed.borrow().init_verb_phrase().set_default_text("grain removed");
}
{
let mut removed = event_types.borrow().get(EDIT_DESCRIPTION_ACTIVITY_INDEX as u32);
removed.set_name("description");
removed.borrow().init_verb_phrase().set_default_text("description edited");
}
}
Promise::ok(())
}
fn new_session(&mut self,
params: ui_view::NewSessionParams,
mut results: ui_view::NewSessionResults)
-> Promise<(), Error>
{
use ::capnp::traits::HasTypeId;
let params = pry!(params.get());
if params.get_session_type() != web_session::Client::type_id() {
return Promise::err(Error::failed("unsupported session type".to_string()));
}
let session = pry!(WebSession::new(
self.timer.clone(),
pry!(params.get_user_info()),
pry!(params.get_context()),
pry!(params.get_session_params().get_as()),
self.sandstorm_api.clone(),
self.saved_ui_views.clone()));
let client: web_session::Client =
web_session::ToClient::new(session).from_server::<::capnp_rpc::Server>();
// we need to do this dance to upcast.
results.get().set_session(ui_session::Client { client : client.client});
Promise::ok(())
}
}
pub fn main() -> Result<(), Box<::std::error::Error>> {
EventLoop::top_level(move |wait_scope| {
let mut event_port = try!(::gjio::EventPort::new());
let network = event_port.get_network();
// Sandstorm launches us with a connection on file descriptor 3.
let stream = try!(unsafe { network.wrap_raw_socket_descriptor(3) });
let (p, f) = Promise::and_fulfiller();
let sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned> =
::capnp_rpc::new_promise_client(p);
let saved_uiviews = try!(SavedUiViewSet::new("/var/sturdyrefs", sandstorm_api.clone()));
let uiview = UiView::new(
event_port.get_timer(),
sandstorm_api,
saved_uiviews);
let client = ui_view::ToClient::new(uiview).from_server::<::capnp_rpc::Server>();
let network =
twoparty::VatNetwork::new(stream.clone(), stream,
rpc_twoparty_capnp::Side::Client, Default::default());
let mut rpc_system = RpcSystem::new(Box::new(network), Some(client.client));
let cap = rpc_system.bootstrap::<sandstorm_api::Client<::capnp::any_pointer::Owned>>(
::capnp_rpc::rpc_twoparty_capnp::Side::Server);
f.fulfill(cap.client);
Promise::never_done().wait(wait_scope, &mut event_port)
})
}
|
// Copyright (c) 2014-2016 Sandstorm Development Group, Inc.
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use gj::{Promise, EventLoop};
use capnp::Error;
use capnp_rpc::{RpcSystem, twoparty, rpc_twoparty_capnp};
use rustc_serialize::{base64, hex};
use std::collections::hash_map::HashMap;
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use collections_capnp::ui_view_metadata;
use sandstorm::powerbox_capnp::powerbox_descriptor;
use sandstorm::grain_capnp::{session_context, user_info, ui_view, ui_session, sandstorm_api};
use sandstorm::grain_capnp::denormalized_grain_metadata;
use sandstorm::web_session_capnp::{web_session};
use sandstorm::web_session_capnp::web_session::web_socket_stream;
pub struct WebSocketStream {
id: u64,
client_stream: web_socket_stream::Client,
awaiting_pong: Rc<Cell<bool>>,
_ping_pong_promise: Promise<(), Error>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
}
impl Drop for WebSocketStream {
fn drop(&mut self) {
self.saved_ui_views.borrow_mut().subscribers.remove(&self.id);
}
}
fn do_ping_pong(client_stream: web_socket_stream::Client,
timer: ::gjio::Timer,
awaiting_pong: Rc<Cell<bool>>) -> Promise<(), Error>
{
println!("pinging");
let mut req = client_stream.send_bytes_request();
req.get().set_message(&[0x89, 0]); // PING
let promise = req.send().promise;
awaiting_pong.set(true);
promise.then(move|_| {
timer.after_delay(::std::time::Duration::new(10, 0)).lift().then(move |_| {
if awaiting_pong.get() {
Promise::err(Error::failed("pong not received within 10 seconds".into()))
} else {
do_ping_pong(client_stream, timer, awaiting_pong)
}
})
})
}
impl WebSocketStream {
fn new(id: u64,
client_stream: web_socket_stream::Client,
timer: ::gjio::Timer,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>)
-> WebSocketStream
{
let awaiting = Rc::new(Cell::new(false));
let ping_pong_promise = do_ping_pong(client_stream.clone(),
timer,
awaiting.clone()).map_else(|r| match r {
Ok(_) => Ok(()),
Err(e) => {println!("ERROR {}", e); Ok(()) }
}).eagerly_evaluate();
WebSocketStream {
id: id,
client_stream: client_stream,
awaiting_pong: awaiting,
_ping_pong_promise: ping_pong_promise,
saved_ui_views: saved_ui_views,
}
}
}
impl web_socket_stream::Server for WebSocketStream {
fn send_bytes(&mut self,
params: web_socket_stream::SendBytesParams,
_results: web_socket_stream::SendBytesResults)
-> Promise<(), Error>
{
let message = pry!(pry!(params.get()).get_message());
let opcode = message[0] & 0xf; // or is it 0xf0?
let masked = (message[1] & 0x80) != 0;
let length = message[1] & 0x7f;
match opcode {
0x0 => { // CONTINUE
}
0x1 => { // UTF-8 PAYLOAD
}
0x2 => { // BINARY PAYLOAD
}
0x8 => { // TERMINATE
// TODO: drop things to get them to close.
}
0x9 => { // PING
//TODO
println!("the client sent us a ping!");
}
0xa => { // PONG
self.awaiting_pong.set(false);
}
_ => { // OTHER
println!("unrecognized websocket opcode {}", opcode);
}
}
println!("opcode {}, masked {}, length {}", opcode, masked, length);
println!("websocket message {:?}", message);
Promise::ok(())
}
}
fn encode_websocket_message(mut params: web_socket_stream::send_bytes_params::Builder,
message: &str)
{
// TODO(perf) avoid this allocation
let mut bytes: Vec<u8> = Vec::new();
bytes.push(0x81);
if message.len() < 126 {
bytes.push(message.len() as u8);
} else if message.len() < 1 << 16 {
// 16 bits
bytes.push(0x7e);
bytes.push((message.len() >> 8) as u8);
bytes.push(message.len() as u8);
} else {
// 64 bits
bytes.push(0x7f);
bytes.push((message.len() >> 56) as u8);
bytes.push((message.len() >> 48) as u8);
bytes.push((message.len() >> 40) as u8);
bytes.push((message.len() >> 32) as u8);
bytes.push((message.len() >> 16) as u8);
bytes.push((message.len() >> 24) as u8);
bytes.push((message.len() >> 8) as u8);
bytes.push(message.len() as u8);
}
bytes.extend_from_slice(message.as_bytes());
params.set_message(&bytes[..]);
}
#[derive(Clone)]
struct SavedUiViewData {
title: String,
date_added: u64,
added_by: String,
}
impl SavedUiViewData {
fn to_json(&self) -> String {
format!("{{\"title\":\"{}\",\"date_added\": \"{}\",\"added_by\":\"{}\"}}",
self.title,
self.date_added,
self.added_by)
}
}
#[derive(Clone)]
enum Action {
Insert { token: String, data: SavedUiViewData },
Remove { token: String },
}
impl Action {
fn to_json(&self) -> String {
match self {
&Action::Insert { ref token, ref data } => {
format!("{{\"insert\":{{\"token\":\"{}\",\"data\":{} }} }}",
token, data.to_json())
}
&Action::Remove { ref token } => {
format!("{{\"remove\":{{\"token\":\"{}\"}}}}", token)
}
}
}
}
struct Reaper;
impl ::gj::TaskReaper<(), Error> for Reaper {
fn task_failed(&mut self, error: Error) {
// TODO better message.
println!("task failed: {}", error);
}
}
pub struct SavedUiViewSet {
base_path: ::std::path::PathBuf,
views: HashMap<String, SavedUiViewData>,
next_id: u64,
subscribers: HashMap<u64, web_socket_stream::Client>,
tasks: ::gj::TaskSet<(), Error>,
}
impl SavedUiViewSet {
pub fn new<P>(token_directory: P) -> ::capnp::Result<SavedUiViewSet>
where P: AsRef<::std::path::Path>
{
// create token directory if it does not yet exist
try!(::std::fs::create_dir_all(&token_directory));
let mut map = HashMap::new();
for token_file in try!(::std::fs::read_dir(&token_directory)) {
let dir_entry = try!(token_file);
let token: String = match dir_entry.file_name().to_str() {
None => {
println!("malformed token: {:?}", dir_entry.file_name());
continue
}
Some(s) => s.into(),
};
let mut reader = try!(::std::fs::File::open(dir_entry.path()));
let message = try!(::capnp::serialize::read_message(&mut reader, Default::default()));
let metadata: ui_view_metadata::Reader = try!(message.get_root());
let entry = SavedUiViewData {
title: try!(metadata.get_title()).into(),
date_added: metadata.get_date_added(),
added_by: try!(metadata.get_added_by()).into(),
};
map.insert(token, entry);
}
Ok(SavedUiViewSet {
base_path: token_directory.as_ref().to_path_buf(),
views: map,
next_id: 0,
subscribers: HashMap::new(),
tasks: ::gj::TaskSet::new(Box::new(Reaper)),
})
}
fn insert(&mut self, binary_token: &[u8], title: String,
added_by: String) -> ::capnp::Result<()> {
let token = base64::ToBase64::to_base64(binary_token, base64::URL_SAFE);
let dur = ::std::time::SystemTime::now().duration_since(::std::time::UNIX_EPOCH).expect("TODO");
let date_added = dur.as_secs() * 1000 + (dur.subsec_nanos() / 1000000) as u64;
let mut token_path = ::std::path::PathBuf::new();
token_path.push(self.base_path.clone());
token_path.push(token.clone());
let mut writer = try!(::std::fs::File::create(token_path));
let mut message = ::capnp::message::Builder::new_default();
{
let mut metadata: ui_view_metadata::Builder = message.init_root();
metadata.set_title(&title);
metadata.set_date_added(date_added);
metadata.set_added_by(&added_by);
}
try!(::capnp::serialize::write_message(&mut writer, &message));
let entry = SavedUiViewData {
title: title,
date_added: date_added,
added_by: added_by,
};
let json_string = Action::Insert { token: token.clone(), data: entry.clone() }.to_json();
self.send_message_to_subscribers(&json_string);
self.views.insert(token, entry);
Ok(())
}
fn send_message_to_subscribers(&mut self, message: &str) {
for (_, sub) in &self.subscribers {
let mut req = sub.send_bytes_request();
encode_websocket_message(req.get(), message);
self.tasks.add(req.send().promise.map(|_| Ok(())));
}
}
fn remove(&mut self, token: &str) -> Result<(), Error> {
if let Err(e) = ::std::fs::remove_file(format!("/var/sturdyrefs/{}", token)) {
if e.kind() != ::std::io::ErrorKind::NotFound {
return Err(e.into())
}
}
let json_string = Action::Remove { token: token.into() }.to_json();
self.send_message_to_subscribers(&json_string);
Ok(())
}
fn new_subscribed_websocket(set: &Rc<RefCell<SavedUiViewSet>>,
client_stream: web_socket_stream::Client,
timer: &::gjio::Timer)
-> WebSocketStream
{
let id = set.borrow().next_id;
set.borrow_mut().next_id = id + 1;
set.borrow_mut().subscribers.insert(id, client_stream.clone());
let mut task = Promise::ok(());
for (t, v) in &set.borrow().views {
let action = Action::Insert {
token: t.clone(),
data: v.clone()
};
let json_string = action.to_json();
let mut req = client_stream.send_bytes_request();
encode_websocket_message(req.get(), &json_string);
let promise = req.send().promise.map(|_| Ok(()));
task = task.then(|_| promise);
}
set.borrow_mut().tasks.add(task);
WebSocketStream::new(id, client_stream, timer.clone(), set.clone())
}
}
pub struct WebSession {
timer: ::gjio::Timer,
can_write: bool,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
identity_id: String,
static_asset_path: String,
}
impl WebSession {
pub fn new(timer: ::gjio::Timer,
user_info: user_info::Reader,
_context: session_context::Client,
params: web_session::params::Reader,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>)
-> ::capnp::Result<WebSession>
{
// Permission #0 is "write". Check if bit 0 in the PermissionSet is set.
let permissions = try!(user_info.get_permissions());
let can_write = permissions.len() > 0 && permissions.get(0);
Ok(WebSession {
timer: timer,
can_write: can_write,
sandstorm_api: sandstorm_api,
saved_ui_views: saved_ui_views,
identity_id: hex::ToHex::to_hex(try!(user_info.get_identity_id())),
static_asset_path: try!(params.get_static_asset_path()).into(),
})
// `UserInfo` is defined in `sandstorm/grain.capnp` and contains info like:
// - A stable ID for the user, so you can correlate sessions from the same user.
// - The user's display name, e.g. "Mark Miller", useful for identifying the user to other
// users.
// - The user's permissions (seen above).
// `WebSession::Params` is defined in `sandstorm/web-session.capnp` and contains info like:
// - The hostname where the grain was mapped for this user. Every time a user opens a grain,
// it is mapped at a new random hostname for security reasons.
// - The user's User-Agent and Accept-Languages headers.
// `SessionContext` is defined in `sandstorm/grain.capnp` and implements callbacks for
// sharing/access control and service publishing/discovery.
}
}
impl ui_session::Server for WebSession {}
impl web_session::Server for WebSession {
fn get(&mut self,
params: web_session::GetParams,
mut results: web_session::GetResults)
-> Promise<(), Error>
{
// HTTP GET request.
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
println!("PATH {}", path);
if path == "" {
let text = "<!DOCTYPE html>\
<html><head>\
<link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\">\
<script type=\"text/javascript\" src=\"script.js\" async></script>
</head><body><div id=\"main\"></div></body></html>";
let mut content = results.get().init_content();
content.set_mime_type("text/html; charset=UTF-8");
content.init_body().set_bytes(text.as_bytes());
Promise::ok(())
} else if path == "script.js" {
self.read_file("/script.js.gz", results, "text/javascript; charset=UTF-8", Some("gzip"))
} else if path == "style.css" {
self.read_file("/style.css.gz", results, "text/css; charset=UTF-8", Some("gzip"))
} else if path == "var" || path == "var/" {
// Return a listing of the directory contents, one per line.
let mut entries = Vec::new();
for entry in pry!(::std::fs::read_dir(path)) {
let entry = pry!(entry);
let name = entry.file_name().into_string().expect("bad file name");
if (&name != ".") && (&name != "..") {
entries.push(name);
}
}
let text = entries.join("\n");
let mut response = results.get().init_content();
response.set_mime_type("text/plain");
response.init_body().set_bytes(text.as_bytes());
Promise::ok(())
} else if path.starts_with("var/") {
// Serve all files under /var with type application/octet-stream since it comes from the
// user. E.g. serving as "text/html" here would allow someone to trivially XSS other users
// of the grain by PUTing malicious HTML content. (Such an attack wouldn't be a huge deal:
// it would only allow the attacker to hijack another user's access to this grain, not to
// Sandstorm in general, and if they attacker already has write access to upload the
// malicious content, they have little to gain from hijacking another session.)
self.read_file(path, results, "application/octet-stream", None)
} else if path == "" || path.ends_with("/") {
// A directory. Serve "index.html".
self.read_file(&format!("client/{}index.html", path), results, "text/html; charset=UTF-8",
None)
} else {
// Request for a static file. Look for it under "client/".
let filename = format!("client/{}", path);
// Check if it's a directory.
if let Ok(true) = ::std::fs::metadata(&filename).map(|md| md.is_dir()) {
// It is. Return redirect to add '/'.
let mut redirect = results.get().init_redirect();
redirect.set_is_permanent(true);
redirect.set_switch_to_get(true);
redirect.set_location(&format!("{}/", path));
Promise::ok(())
} else {
// Regular file (or non-existent).
self.read_file(&filename, results, self.infer_content_type(path), None)
}
}
}
fn post(&mut self,
params: web_session::PostParams,
mut results: web_session::PostResults)
-> Promise<(), Error>
{
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
let token = if path.starts_with("token/") {
&path[6..]
} else {
let mut error = results.get().init_client_error();
error.set_status_code(web_session::response::ClientErrorCode::NotFound);
return Promise::ok(())
};
println!("token: {}", token);
let content = pry!(pry!(pry!(params.get()).get_content()).get_content());
let decoded_content = match base64::FromBase64::from_base64(content) {
Ok(c) => c,
Err(_) => {
// XXX should return a 400 error
return Promise::err(Error::failed("failed to convert from base64".into()));
}
};
let mut grain_title: String = String::new();
{
let mut cursor = ::std::io::Cursor::new(decoded_content);
let message = pry!(::capnp::serialize_packed::read_message(&mut cursor,
Default::default()));
let desc: powerbox_descriptor::Reader = pry!(message.get_root());
for tag in pry!(desc.get_tags()).iter() {
println!("tag {}", tag.get_id());
let value: ui_view::powerbox_tag::Reader = pry!(tag.get_value().get_as());
grain_title = pry!(value.get_title()).into();
println!("title: {}", grain_title);
}
}
// now let's save this thing into an actual uiview sturdyref
let mut req = self.sandstorm_api.claim_request_request();
let sandstorm_api = self.sandstorm_api.clone();
req.get().set_request_token(token);
let static_asset_path = self.static_asset_path.clone();
let saved_ui_views = self.saved_ui_views.clone();
let identity_id = self.identity_id.clone();
let do_stuff = req.send().promise.then(move |response| {
println!("restored!");
let sealed_ui_view: ui_view::Client =
pry!(pry!(response.get()).get_cap().get_as_capability());
println!("got the cap!");
sealed_ui_view.get_view_info_request().send().promise.then(move |response| {
println!("got viewinfo");
let view_info = pry!(response.get());
let metadata = pry!(view_info.get_metadata());
let title = pry!(metadata.get_app_title());
println!("title: {}", pry!(title.get_default_text()));
match pry!(metadata.which()) {
denormalized_grain_metadata::Icon(icon) => {
println!("asset URL 1 {}{}", static_asset_path, pry!(icon.get_asset_id()));
println!("asset URL 2 {}{}",
static_asset_path, pry!(icon.get_asset_id2x_dpi()));
}
denormalized_grain_metadata::AppId(app_id) => {
println!("app id {}", pry!(app_id));
}
}
let mut req = sandstorm_api.save_request();
req.get().get_cap().set_as_capability(sealed_ui_view.client.hook);
{
let mut save_label = req.get().init_label();
save_label.set_default_text("[save label chosen by collections app]");
}
req.send().promise.map(move |response| {
let token = try!(try!(response.get()).get_token());
try!(saved_ui_views.borrow_mut().insert(token, grain_title, identity_id));
Ok(())
})
})
});
do_stuff.then_else(move |r| match r {
Ok(()) => {
let mut _content = results.get().init_content();
Promise::ok(())
}
Err(e) => {
let mut error = results.get().init_client_error();
error.set_description_html(&format!("error: {:?}", e));
Promise::ok(())
}
})
}
fn put(&mut self,
params: web_session::PutParams,
mut results: web_session::PutResults)
-> Promise<(), Error>
{
// HTTP PUT request.
let params = pry!(params.get());
let path = pry!(params.get_path());
pry!(self.require_canonical_path(path));
if !path.starts_with("var/") {
return Promise::err(Error::failed("PUT only supported under /var.".to_string()));
}
if !self.can_write {
results.get().init_client_error()
.set_status_code(web_session::response::ClientErrorCode::Forbidden);
} else {
use std::io::Write;
let temp_path = format!("{}.uploading", path);
let data = pry!(pry!(params.get_content()).get_content());
pry!(pry!(::std::fs::File::create(&temp_path)).write_all(data));
pry!(::std::fs::rename(temp_path, path));
results.get().init_no_content();
}
Promise::ok(())
}
fn delete(&mut self,
params: web_session::DeleteParams,
mut results: web_session::DeleteResults)
-> Promise<(), Error>
{
// HTTP DELETE request.
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
if !path.starts_with("sturdyref/") {
return Promise::err(Error::failed("DELETE only supported under sturdyref/".to_string()));
}
if !self.can_write {
results.get().init_client_error()
.set_status_code(web_session::response::ClientErrorCode::Forbidden);
Promise::ok(())
} else {
pry!(self.saved_ui_views.borrow_mut().remove(&path[10..]));
results.get().init_no_content();
Promise::ok(())
}
}
fn open_web_socket(&mut self,
params: web_session::OpenWebSocketParams,
mut results: web_session::OpenWebSocketResults)
-> Promise<(), Error>
{
println!("open web socket!");
let client_stream = pry!(pry!(params.get()).get_client_stream());
results.get().set_server_stream(
web_socket_stream::ToClient::new(
SavedUiViewSet::new_subscribed_websocket(
&self.saved_ui_views,
client_stream,
&self.timer)).from_server::<::capnp_rpc::Server>());
Promise::ok(())
}
}
impl WebSession {
fn require_canonical_path(&self, path: &str) -> Result<(), Error> {
// Require that the path doesn't contain "." or ".." or consecutive slashes, to prevent path
// injection attacks.
//
// Note that such attacks wouldn't actually accomplish much since everything outside /var
// is a read-only filesystem anyway, containing the app package contents which are non-secret.
for (idx, component) in path.split_terminator("/").enumerate() {
if component == "." || component == ".." || (component == "" && idx > 0) {
return Err(Error::failed(format!("non-canonical path: {:?}", path)));
}
}
Ok(())
}
fn infer_content_type(&self, filename: &str) -> &'static str {
if filename.ends_with(".html") {
"text/html; charset=UTF-8"
} else if filename.ends_with(".js") {
"text/javascript; charset=UTF-8"
} else if filename.ends_with(".css") {
"text/css; charset=UTF-8"
} else if filename.ends_with(".png") {
"image/png"
} else if filename.ends_with(".gif") {
"image/gif"
} else if filename.ends_with(".jpg") || filename.ends_with(".jpeg") {
"image/jpeg"
} else if filename.ends_with(".svg") {
"image/svg+xml; charset=UTF-8"
} else if filename.ends_with(".txt") {
"text/plain; charset=UTF-8"
} else {
"application/octet-stream"
}
}
fn read_file(&self,
filename: &str,
mut results: web_session::GetResults,
content_type: &str,
encoding: Option<&str>)
-> Promise<(), Error>
{
match ::std::fs::File::open(filename) {
Ok(mut f) => {
let size = pry!(f.metadata()).len();
let mut content = results.get().init_content();
content.set_status_code(web_session::response::SuccessCode::Ok);
content.set_mime_type(content_type);
encoding.map(|enc| content.set_encoding(enc));
let mut body = content.init_body().init_bytes(size as u32);
pry!(::std::io::copy(&mut f, &mut body));
Promise::ok(())
}
Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => {
let mut error = results.get().init_client_error();
error.set_status_code(web_session::response::ClientErrorCode::NotFound);
Promise::ok(())
}
Err(e) => {
Promise::err(e.into())
}
}
}
}
pub struct UiView {
timer: ::gjio::Timer,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
}
impl UiView {
fn new(timer: ::gjio::Timer,
client: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: SavedUiViewSet) -> UiView
{
UiView {
timer: timer,
sandstorm_api: client,
saved_ui_views: Rc::new(RefCell::new(saved_ui_views)),
}
}
}
impl ui_view::Server for UiView {
fn get_view_info(&mut self,
_params: ui_view::GetViewInfoParams,
mut results: ui_view::GetViewInfoResults)
-> Promise<(), Error>
{
let mut view_info = results.get();
// Define a "write" permission, and then define roles "editor" and "viewer" where only "editor"
// has the "write" permission. This will allow people to share read-only.
{
let perms = view_info.borrow().init_permissions(1);
let mut write = perms.get(0);
write.set_name("write");
write.init_title().set_default_text("write");
}
let mut roles = view_info.init_roles(2);
{
let mut editor = roles.borrow().get(0);
editor.borrow().init_title().set_default_text("editor");
editor.borrow().init_verb_phrase().set_default_text("can edit");
editor.init_permissions(1).set(0, true); // has "write" permission
}
{
let mut viewer = roles.get(1);
viewer.borrow().init_title().set_default_text("viewer");
viewer.borrow().init_verb_phrase().set_default_text("can view");
viewer.init_permissions(1).set(0, false); // does not have "write" permission
}
Promise::ok(())
}
fn new_session(&mut self,
params: ui_view::NewSessionParams,
mut results: ui_view::NewSessionResults)
-> Promise<(), Error>
{
use ::capnp::traits::HasTypeId;
let params = pry!(params.get());
if params.get_session_type() != web_session::Client::type_id() {
return Promise::err(Error::failed("unsupported session type".to_string()));
}
let session = pry!(WebSession::new(
self.timer.clone(),
pry!(params.get_user_info()),
pry!(params.get_context()),
pry!(params.get_session_params().get_as()),
self.sandstorm_api.clone(),
self.saved_ui_views.clone()));
let client: web_session::Client =
web_session::ToClient::new(session).from_server::<::capnp_rpc::Server>();
// we need to do this dance to upcast.
results.get().set_session(ui_session::Client { client : client.client});
Promise::ok(())
}
}
pub fn main() -> Result<(), Box<::std::error::Error>> {
EventLoop::top_level(move |wait_scope| {
let mut event_port = try!(::gjio::EventPort::new());
let network = event_port.get_network();
// sandstorm launches us with a connection on file descriptor 3
let stream = try!(unsafe { network.wrap_raw_socket_descriptor(3) });
let saved_uiviews = try!(SavedUiViewSet::new("/var/sturdyrefs"));
let (p, f) = Promise::and_fulfiller();
let uiview = UiView::new(
event_port.get_timer(),
::capnp_rpc::new_promise_client(p),
saved_uiviews);
let client = ui_view::ToClient::new(uiview).from_server::<::capnp_rpc::Server>();
let network =
twoparty::VatNetwork::new(stream.clone(), stream,
rpc_twoparty_capnp::Side::Client, Default::default());
let mut rpc_system = RpcSystem::new(Box::new(network), Some(client.client));
let cap = rpc_system.bootstrap::<sandstorm_api::Client<::capnp::any_pointer::Owned>>(
::capnp_rpc::rpc_twoparty_capnp::Side::Server);
f.fulfill(cap.client);
Promise::never_done().wait(wait_scope, &mut event_port)
})
}
update for new grain.capnp
// Copyright (c) 2014-2016 Sandstorm Development Group, Inc.
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use gj::{Promise, EventLoop};
use capnp::Error;
use capnp_rpc::{RpcSystem, twoparty, rpc_twoparty_capnp};
use rustc_serialize::{base64, hex};
use std::collections::hash_map::HashMap;
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use collections_capnp::ui_view_metadata;
use sandstorm::powerbox_capnp::powerbox_descriptor;
use sandstorm::grain_capnp::{session_context, user_info, ui_view, ui_session, sandstorm_api};
use sandstorm::web_session_capnp::{web_session};
use sandstorm::web_session_capnp::web_session::web_socket_stream;
pub struct WebSocketStream {
id: u64,
client_stream: web_socket_stream::Client,
awaiting_pong: Rc<Cell<bool>>,
_ping_pong_promise: Promise<(), Error>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
}
impl Drop for WebSocketStream {
fn drop(&mut self) {
self.saved_ui_views.borrow_mut().subscribers.remove(&self.id);
}
}
fn do_ping_pong(client_stream: web_socket_stream::Client,
timer: ::gjio::Timer,
awaiting_pong: Rc<Cell<bool>>) -> Promise<(), Error>
{
println!("pinging");
let mut req = client_stream.send_bytes_request();
req.get().set_message(&[0x89, 0]); // PING
let promise = req.send().promise;
awaiting_pong.set(true);
promise.then(move|_| {
timer.after_delay(::std::time::Duration::new(10, 0)).lift().then(move |_| {
if awaiting_pong.get() {
Promise::err(Error::failed("pong not received within 10 seconds".into()))
} else {
do_ping_pong(client_stream, timer, awaiting_pong)
}
})
})
}
impl WebSocketStream {
fn new(id: u64,
client_stream: web_socket_stream::Client,
timer: ::gjio::Timer,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>)
-> WebSocketStream
{
let awaiting = Rc::new(Cell::new(false));
let ping_pong_promise = do_ping_pong(client_stream.clone(),
timer,
awaiting.clone()).map_else(|r| match r {
Ok(_) => Ok(()),
Err(e) => {println!("ERROR {}", e); Ok(()) }
}).eagerly_evaluate();
WebSocketStream {
id: id,
client_stream: client_stream,
awaiting_pong: awaiting,
_ping_pong_promise: ping_pong_promise,
saved_ui_views: saved_ui_views,
}
}
}
impl web_socket_stream::Server for WebSocketStream {
fn send_bytes(&mut self,
params: web_socket_stream::SendBytesParams,
_results: web_socket_stream::SendBytesResults)
-> Promise<(), Error>
{
let message = pry!(pry!(params.get()).get_message());
let opcode = message[0] & 0xf; // or is it 0xf0?
let masked = (message[1] & 0x80) != 0;
let length = message[1] & 0x7f;
match opcode {
0x0 => { // CONTINUE
}
0x1 => { // UTF-8 PAYLOAD
}
0x2 => { // BINARY PAYLOAD
}
0x8 => { // TERMINATE
// TODO: drop things to get them to close.
}
0x9 => { // PING
//TODO
println!("the client sent us a ping!");
}
0xa => { // PONG
self.awaiting_pong.set(false);
}
_ => { // OTHER
println!("unrecognized websocket opcode {}", opcode);
}
}
println!("opcode {}, masked {}, length {}", opcode, masked, length);
println!("websocket message {:?}", message);
Promise::ok(())
}
}
fn encode_websocket_message(mut params: web_socket_stream::send_bytes_params::Builder,
message: &str)
{
// TODO(perf) avoid this allocation
let mut bytes: Vec<u8> = Vec::new();
bytes.push(0x81);
if message.len() < 126 {
bytes.push(message.len() as u8);
} else if message.len() < 1 << 16 {
// 16 bits
bytes.push(0x7e);
bytes.push((message.len() >> 8) as u8);
bytes.push(message.len() as u8);
} else {
// 64 bits
bytes.push(0x7f);
bytes.push((message.len() >> 56) as u8);
bytes.push((message.len() >> 48) as u8);
bytes.push((message.len() >> 40) as u8);
bytes.push((message.len() >> 32) as u8);
bytes.push((message.len() >> 16) as u8);
bytes.push((message.len() >> 24) as u8);
bytes.push((message.len() >> 8) as u8);
bytes.push(message.len() as u8);
}
bytes.extend_from_slice(message.as_bytes());
params.set_message(&bytes[..]);
}
#[derive(Clone)]
struct SavedUiViewData {
title: String,
date_added: u64,
added_by: String,
}
impl SavedUiViewData {
fn to_json(&self) -> String {
format!("{{\"title\":\"{}\",\"date_added\": \"{}\",\"added_by\":\"{}\"}}",
self.title,
self.date_added,
self.added_by)
}
}
#[derive(Clone)]
enum Action {
Insert { token: String, data: SavedUiViewData },
Remove { token: String },
}
impl Action {
fn to_json(&self) -> String {
match self {
&Action::Insert { ref token, ref data } => {
format!("{{\"insert\":{{\"token\":\"{}\",\"data\":{} }} }}",
token, data.to_json())
}
&Action::Remove { ref token } => {
format!("{{\"remove\":{{\"token\":\"{}\"}}}}", token)
}
}
}
}
struct Reaper;
impl ::gj::TaskReaper<(), Error> for Reaper {
fn task_failed(&mut self, error: Error) {
// TODO better message.
println!("task failed: {}", error);
}
}
pub struct SavedUiViewSet {
base_path: ::std::path::PathBuf,
views: HashMap<String, SavedUiViewData>,
next_id: u64,
subscribers: HashMap<u64, web_socket_stream::Client>,
tasks: ::gj::TaskSet<(), Error>,
}
impl SavedUiViewSet {
pub fn new<P>(token_directory: P) -> ::capnp::Result<SavedUiViewSet>
where P: AsRef<::std::path::Path>
{
// create token directory if it does not yet exist
try!(::std::fs::create_dir_all(&token_directory));
let mut map = HashMap::new();
for token_file in try!(::std::fs::read_dir(&token_directory)) {
let dir_entry = try!(token_file);
let token: String = match dir_entry.file_name().to_str() {
None => {
println!("malformed token: {:?}", dir_entry.file_name());
continue
}
Some(s) => s.into(),
};
let mut reader = try!(::std::fs::File::open(dir_entry.path()));
let message = try!(::capnp::serialize::read_message(&mut reader, Default::default()));
let metadata: ui_view_metadata::Reader = try!(message.get_root());
let entry = SavedUiViewData {
title: try!(metadata.get_title()).into(),
date_added: metadata.get_date_added(),
added_by: try!(metadata.get_added_by()).into(),
};
map.insert(token, entry);
}
Ok(SavedUiViewSet {
base_path: token_directory.as_ref().to_path_buf(),
views: map,
next_id: 0,
subscribers: HashMap::new(),
tasks: ::gj::TaskSet::new(Box::new(Reaper)),
})
}
fn insert(&mut self, binary_token: &[u8], title: String,
added_by: String) -> ::capnp::Result<()> {
let token = base64::ToBase64::to_base64(binary_token, base64::URL_SAFE);
let dur = ::std::time::SystemTime::now().duration_since(::std::time::UNIX_EPOCH).expect("TODO");
let date_added = dur.as_secs() * 1000 + (dur.subsec_nanos() / 1000000) as u64;
let mut token_path = ::std::path::PathBuf::new();
token_path.push(self.base_path.clone());
token_path.push(token.clone());
let mut writer = try!(::std::fs::File::create(token_path));
let mut message = ::capnp::message::Builder::new_default();
{
let mut metadata: ui_view_metadata::Builder = message.init_root();
metadata.set_title(&title);
metadata.set_date_added(date_added);
metadata.set_added_by(&added_by);
}
try!(::capnp::serialize::write_message(&mut writer, &message));
let entry = SavedUiViewData {
title: title,
date_added: date_added,
added_by: added_by,
};
let json_string = Action::Insert { token: token.clone(), data: entry.clone() }.to_json();
self.send_message_to_subscribers(&json_string);
self.views.insert(token, entry);
Ok(())
}
fn send_message_to_subscribers(&mut self, message: &str) {
for (_, sub) in &self.subscribers {
let mut req = sub.send_bytes_request();
encode_websocket_message(req.get(), message);
self.tasks.add(req.send().promise.map(|_| Ok(())));
}
}
fn remove(&mut self, token: &str) -> Result<(), Error> {
if let Err(e) = ::std::fs::remove_file(format!("/var/sturdyrefs/{}", token)) {
if e.kind() != ::std::io::ErrorKind::NotFound {
return Err(e.into())
}
}
let json_string = Action::Remove { token: token.into() }.to_json();
self.send_message_to_subscribers(&json_string);
Ok(())
}
fn new_subscribed_websocket(set: &Rc<RefCell<SavedUiViewSet>>,
client_stream: web_socket_stream::Client,
timer: &::gjio::Timer)
-> WebSocketStream
{
let id = set.borrow().next_id;
set.borrow_mut().next_id = id + 1;
set.borrow_mut().subscribers.insert(id, client_stream.clone());
let mut task = Promise::ok(());
for (t, v) in &set.borrow().views {
let action = Action::Insert {
token: t.clone(),
data: v.clone()
};
let json_string = action.to_json();
let mut req = client_stream.send_bytes_request();
encode_websocket_message(req.get(), &json_string);
let promise = req.send().promise.map(|_| Ok(()));
task = task.then(|_| promise);
}
set.borrow_mut().tasks.add(task);
WebSocketStream::new(id, client_stream, timer.clone(), set.clone())
}
}
pub struct WebSession {
timer: ::gjio::Timer,
can_write: bool,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
identity_id: String,
}
impl WebSession {
pub fn new(timer: ::gjio::Timer,
user_info: user_info::Reader,
_context: session_context::Client,
params: web_session::params::Reader,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>)
-> ::capnp::Result<WebSession>
{
// Permission #0 is "write". Check if bit 0 in the PermissionSet is set.
let permissions = try!(user_info.get_permissions());
let can_write = permissions.len() > 0 && permissions.get(0);
Ok(WebSession {
timer: timer,
can_write: can_write,
sandstorm_api: sandstorm_api,
saved_ui_views: saved_ui_views,
identity_id: hex::ToHex::to_hex(try!(user_info.get_identity_id())),
})
// `UserInfo` is defined in `sandstorm/grain.capnp` and contains info like:
// - A stable ID for the user, so you can correlate sessions from the same user.
// - The user's display name, e.g. "Mark Miller", useful for identifying the user to other
// users.
// - The user's permissions (seen above).
// `WebSession::Params` is defined in `sandstorm/web-session.capnp` and contains info like:
// - The hostname where the grain was mapped for this user. Every time a user opens a grain,
// it is mapped at a new random hostname for security reasons.
// - The user's User-Agent and Accept-Languages headers.
// `SessionContext` is defined in `sandstorm/grain.capnp` and implements callbacks for
// sharing/access control and service publishing/discovery.
}
}
impl ui_session::Server for WebSession {}
impl web_session::Server for WebSession {
fn get(&mut self,
params: web_session::GetParams,
mut results: web_session::GetResults)
-> Promise<(), Error>
{
// HTTP GET request.
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
println!("PATH {}", path);
if path == "" {
let text = "<!DOCTYPE html>\
<html><head>\
<link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\">\
<script type=\"text/javascript\" src=\"script.js\" async></script>
</head><body><div id=\"main\"></div></body></html>";
let mut content = results.get().init_content();
content.set_mime_type("text/html; charset=UTF-8");
content.init_body().set_bytes(text.as_bytes());
Promise::ok(())
} else if path == "script.js" {
self.read_file("/script.js.gz", results, "text/javascript; charset=UTF-8", Some("gzip"))
} else if path == "style.css" {
self.read_file("/style.css.gz", results, "text/css; charset=UTF-8", Some("gzip"))
} else if path == "var" || path == "var/" {
// Return a listing of the directory contents, one per line.
let mut entries = Vec::new();
for entry in pry!(::std::fs::read_dir(path)) {
let entry = pry!(entry);
let name = entry.file_name().into_string().expect("bad file name");
if (&name != ".") && (&name != "..") {
entries.push(name);
}
}
let text = entries.join("\n");
let mut response = results.get().init_content();
response.set_mime_type("text/plain");
response.init_body().set_bytes(text.as_bytes());
Promise::ok(())
} else if path.starts_with("var/") {
// Serve all files under /var with type application/octet-stream since it comes from the
// user. E.g. serving as "text/html" here would allow someone to trivially XSS other users
// of the grain by PUTing malicious HTML content. (Such an attack wouldn't be a huge deal:
// it would only allow the attacker to hijack another user's access to this grain, not to
// Sandstorm in general, and if they attacker already has write access to upload the
// malicious content, they have little to gain from hijacking another session.)
self.read_file(path, results, "application/octet-stream", None)
} else if path == "" || path.ends_with("/") {
// A directory. Serve "index.html".
self.read_file(&format!("client/{}index.html", path), results, "text/html; charset=UTF-8",
None)
} else {
// Request for a static file. Look for it under "client/".
let filename = format!("client/{}", path);
// Check if it's a directory.
if let Ok(true) = ::std::fs::metadata(&filename).map(|md| md.is_dir()) {
// It is. Return redirect to add '/'.
let mut redirect = results.get().init_redirect();
redirect.set_is_permanent(true);
redirect.set_switch_to_get(true);
redirect.set_location(&format!("{}/", path));
Promise::ok(())
} else {
// Regular file (or non-existent).
self.read_file(&filename, results, self.infer_content_type(path), None)
}
}
}
fn post(&mut self,
params: web_session::PostParams,
mut results: web_session::PostResults)
-> Promise<(), Error>
{
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
let token = if path.starts_with("token/") {
&path[6..]
} else {
let mut error = results.get().init_client_error();
error.set_status_code(web_session::response::ClientErrorCode::NotFound);
return Promise::ok(())
};
println!("token: {}", token);
let content = pry!(pry!(pry!(params.get()).get_content()).get_content());
let decoded_content = match base64::FromBase64::from_base64(content) {
Ok(c) => c,
Err(_) => {
// XXX should return a 400 error
return Promise::err(Error::failed("failed to convert from base64".into()));
}
};
let mut grain_title: String = String::new();
{
let mut cursor = ::std::io::Cursor::new(decoded_content);
let message = pry!(::capnp::serialize_packed::read_message(&mut cursor,
Default::default()));
let desc: powerbox_descriptor::Reader = pry!(message.get_root());
for tag in pry!(desc.get_tags()).iter() {
println!("tag {}", tag.get_id());
let value: ui_view::powerbox_tag::Reader = pry!(tag.get_value().get_as());
grain_title = pry!(value.get_title()).into();
println!("grain title: {}", grain_title);
}
}
// now let's save this thing into an actual uiview sturdyref
let mut req = self.sandstorm_api.claim_request_request();
let sandstorm_api = self.sandstorm_api.clone();
req.get().set_request_token(token);
let saved_ui_views = self.saved_ui_views.clone();
let identity_id = self.identity_id.clone();
let do_stuff = req.send().promise.then(move |response| {
println!("restored!");
let sealed_ui_view: ui_view::Client =
pry!(pry!(response.get()).get_cap().get_as_capability());
println!("got the cap!");
sealed_ui_view.get_view_info_request().send().promise.then(move |response| {
println!("got viewinfo");
let view_info = pry!(response.get());
let title = pry!(view_info.get_app_title());
println!("app title: {}", pry!(title.get_default_text()));
let icon = pry!(view_info.get_grain_icon());
icon.get_url_request().send().promise.then(move |response| {
let url = pry!(pry!(response.get()).get_url());
println!("grain icon url: {}", url);
let mut req = sandstorm_api.save_request();
req.get().get_cap().set_as_capability(sealed_ui_view.client.hook);
{
let mut save_label = req.get().init_label();
save_label.set_default_text("[save label chosen by collections app]");
}
req.send().promise.map(move |response| {
let token = try!(try!(response.get()).get_token());
try!(saved_ui_views.borrow_mut().insert(token, grain_title, identity_id));
Ok(())
})
})
})
});
do_stuff.then_else(move |r| match r {
Ok(()) => {
let mut _content = results.get().init_content();
Promise::ok(())
}
Err(e) => {
let mut error = results.get().init_client_error();
error.set_description_html(&format!("error: {:?}", e));
Promise::ok(())
}
})
}
fn put(&mut self,
params: web_session::PutParams,
mut results: web_session::PutResults)
-> Promise<(), Error>
{
// HTTP PUT request.
let params = pry!(params.get());
let path = pry!(params.get_path());
pry!(self.require_canonical_path(path));
if !path.starts_with("var/") {
return Promise::err(Error::failed("PUT only supported under /var.".to_string()));
}
if !self.can_write {
results.get().init_client_error()
.set_status_code(web_session::response::ClientErrorCode::Forbidden);
} else {
use std::io::Write;
let temp_path = format!("{}.uploading", path);
let data = pry!(pry!(params.get_content()).get_content());
pry!(pry!(::std::fs::File::create(&temp_path)).write_all(data));
pry!(::std::fs::rename(temp_path, path));
results.get().init_no_content();
}
Promise::ok(())
}
fn delete(&mut self,
params: web_session::DeleteParams,
mut results: web_session::DeleteResults)
-> Promise<(), Error>
{
// HTTP DELETE request.
let path = pry!(pry!(params.get()).get_path());
pry!(self.require_canonical_path(path));
if !path.starts_with("sturdyref/") {
return Promise::err(Error::failed("DELETE only supported under sturdyref/".to_string()));
}
if !self.can_write {
results.get().init_client_error()
.set_status_code(web_session::response::ClientErrorCode::Forbidden);
Promise::ok(())
} else {
pry!(self.saved_ui_views.borrow_mut().remove(&path[10..]));
results.get().init_no_content();
Promise::ok(())
}
}
fn open_web_socket(&mut self,
params: web_session::OpenWebSocketParams,
mut results: web_session::OpenWebSocketResults)
-> Promise<(), Error>
{
println!("open web socket!");
let client_stream = pry!(pry!(params.get()).get_client_stream());
results.get().set_server_stream(
web_socket_stream::ToClient::new(
SavedUiViewSet::new_subscribed_websocket(
&self.saved_ui_views,
client_stream,
&self.timer)).from_server::<::capnp_rpc::Server>());
Promise::ok(())
}
}
impl WebSession {
fn require_canonical_path(&self, path: &str) -> Result<(), Error> {
// Require that the path doesn't contain "." or ".." or consecutive slashes, to prevent path
// injection attacks.
//
// Note that such attacks wouldn't actually accomplish much since everything outside /var
// is a read-only filesystem anyway, containing the app package contents which are non-secret.
for (idx, component) in path.split_terminator("/").enumerate() {
if component == "." || component == ".." || (component == "" && idx > 0) {
return Err(Error::failed(format!("non-canonical path: {:?}", path)));
}
}
Ok(())
}
fn infer_content_type(&self, filename: &str) -> &'static str {
if filename.ends_with(".html") {
"text/html; charset=UTF-8"
} else if filename.ends_with(".js") {
"text/javascript; charset=UTF-8"
} else if filename.ends_with(".css") {
"text/css; charset=UTF-8"
} else if filename.ends_with(".png") {
"image/png"
} else if filename.ends_with(".gif") {
"image/gif"
} else if filename.ends_with(".jpg") || filename.ends_with(".jpeg") {
"image/jpeg"
} else if filename.ends_with(".svg") {
"image/svg+xml; charset=UTF-8"
} else if filename.ends_with(".txt") {
"text/plain; charset=UTF-8"
} else {
"application/octet-stream"
}
}
fn read_file(&self,
filename: &str,
mut results: web_session::GetResults,
content_type: &str,
encoding: Option<&str>)
-> Promise<(), Error>
{
match ::std::fs::File::open(filename) {
Ok(mut f) => {
let size = pry!(f.metadata()).len();
let mut content = results.get().init_content();
content.set_status_code(web_session::response::SuccessCode::Ok);
content.set_mime_type(content_type);
encoding.map(|enc| content.set_encoding(enc));
let mut body = content.init_body().init_bytes(size as u32);
pry!(::std::io::copy(&mut f, &mut body));
Promise::ok(())
}
Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => {
let mut error = results.get().init_client_error();
error.set_status_code(web_session::response::ClientErrorCode::NotFound);
Promise::ok(())
}
Err(e) => {
Promise::err(e.into())
}
}
}
}
pub struct UiView {
timer: ::gjio::Timer,
sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: Rc<RefCell<SavedUiViewSet>>,
}
impl UiView {
fn new(timer: ::gjio::Timer,
client: sandstorm_api::Client<::capnp::any_pointer::Owned>,
saved_ui_views: SavedUiViewSet) -> UiView
{
UiView {
timer: timer,
sandstorm_api: client,
saved_ui_views: Rc::new(RefCell::new(saved_ui_views)),
}
}
}
impl ui_view::Server for UiView {
fn get_view_info(&mut self,
_params: ui_view::GetViewInfoParams,
mut results: ui_view::GetViewInfoResults)
-> Promise<(), Error>
{
let mut view_info = results.get();
// Define a "write" permission, and then define roles "editor" and "viewer" where only "editor"
// has the "write" permission. This will allow people to share read-only.
{
let perms = view_info.borrow().init_permissions(1);
let mut write = perms.get(0);
write.set_name("write");
write.init_title().set_default_text("write");
}
let mut roles = view_info.init_roles(2);
{
let mut editor = roles.borrow().get(0);
editor.borrow().init_title().set_default_text("editor");
editor.borrow().init_verb_phrase().set_default_text("can edit");
editor.init_permissions(1).set(0, true); // has "write" permission
}
{
let mut viewer = roles.get(1);
viewer.borrow().init_title().set_default_text("viewer");
viewer.borrow().init_verb_phrase().set_default_text("can view");
viewer.init_permissions(1).set(0, false); // does not have "write" permission
}
Promise::ok(())
}
fn new_session(&mut self,
params: ui_view::NewSessionParams,
mut results: ui_view::NewSessionResults)
-> Promise<(), Error>
{
use ::capnp::traits::HasTypeId;
let params = pry!(params.get());
if params.get_session_type() != web_session::Client::type_id() {
return Promise::err(Error::failed("unsupported session type".to_string()));
}
let session = pry!(WebSession::new(
self.timer.clone(),
pry!(params.get_user_info()),
pry!(params.get_context()),
pry!(params.get_session_params().get_as()),
self.sandstorm_api.clone(),
self.saved_ui_views.clone()));
let client: web_session::Client =
web_session::ToClient::new(session).from_server::<::capnp_rpc::Server>();
// we need to do this dance to upcast.
results.get().set_session(ui_session::Client { client : client.client});
Promise::ok(())
}
}
pub fn main() -> Result<(), Box<::std::error::Error>> {
EventLoop::top_level(move |wait_scope| {
let mut event_port = try!(::gjio::EventPort::new());
let network = event_port.get_network();
// sandstorm launches us with a connection on file descriptor 3
let stream = try!(unsafe { network.wrap_raw_socket_descriptor(3) });
let saved_uiviews = try!(SavedUiViewSet::new("/var/sturdyrefs"));
let (p, f) = Promise::and_fulfiller();
let uiview = UiView::new(
event_port.get_timer(),
::capnp_rpc::new_promise_client(p),
saved_uiviews);
let client = ui_view::ToClient::new(uiview).from_server::<::capnp_rpc::Server>();
let network =
twoparty::VatNetwork::new(stream.clone(), stream,
rpc_twoparty_capnp::Side::Client, Default::default());
let mut rpc_system = RpcSystem::new(Box::new(network), Some(client.client));
let cap = rpc_system.bootstrap::<sandstorm_api::Client<::capnp::any_pointer::Owned>>(
::capnp_rpc::rpc_twoparty_capnp::Side::Server);
f.fulfill(cap.client);
Promise::never_done().wait(wait_scope, &mut event_port)
})
}
|
use std::net::{TcpListener, TcpStream};
use std::collections::HashMap;
use std::thread;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Sender};
use std::io::prelude::*;
use std::io::BufReader;
extern crate hmacsha1;
extern crate uuid;
use self::uuid::Uuid;
use client::Client;
use distributor::Distributor;
use distributor::Kind as DistKind;
pub struct Player {
client_idx: usize, //this is dynamic in the sense that it may be different on intial run
}
pub struct Server {
clients: Vec<Client>, // this always grows
players: HashMap<Uuid, Player>,
pub dist_tx: Sender<DistKind<TcpStream>>,
}
impl Server {
pub fn new(ip: &str) {
let listener = TcpListener::bind(ip).unwrap();
let (dist_tx,mut dist) = Distributor::new();
thread::spawn(move || dist.run());
let server = Server {
clients: vec!(),
players: HashMap::new(),
dist_tx: dist_tx,
};
let server = Arc::new(Mutex::new(server));
for s in listener.incoming() {
match s {
Ok(s) => {
let server = server.clone();
thread::spawn(|| Server::handler(server,s));
},
_ => {},
}
}
}
fn handler (mut server: Arc<Mutex<Server>>, mut s: TcpStream) {
let mut cmd = [0;1];
//new conn needs auth code
let m = uuid::Uuid::new_v4();
s.write_all(m.as_bytes());
let mut client_idx = None;
loop {
if let Ok(_) = s.read_exact(&mut cmd) {
match cmd[0] {
0 => { //login
if let Some(c) = Client::load(&mut s) {
let mut server = server.lock().unwrap();
let mut reg_key = None;
for (i,n) in server.clients.iter_mut().enumerate() {
if n.id == c.id {
reg_key = Some(n.key);
client_idx = Some(i);
break
}
}
if let Some(key) = reg_key {
let hm = hmacsha1::hmac_sha1(&key, m.as_bytes());
if c.key == hm {
server.players.insert(c.id,
Player {client_idx:client_idx.unwrap()});
if let Ok(stmp) = s.try_clone() {
//n.stream = Some(Arc::new(Mutex::new(stmp)));
server.dist_tx.send(DistKind::Add(c.id,stmp));
}
println!("login:{:?}",c.id);
println!("total clients:{:?}",server.clients.len());
}
else {
panic!("client invalid login {:?}", c)
}
}
else { panic!("client unregistered {:?}", c) }
}
},
1 => { //register
if let Some(c) = Client::load(&mut s) {
let mut server = server.lock().unwrap();
for n in server.clients.iter() {
if n.id == c.id { continue }
}
println!("registered:{:?}",c.id);
server.clients.push(c);
}
},
2 => { //chat
let mut text = String::new();
{
let mut bs = BufReader::new(&s);
bs.read_line(&mut text);
}
if text.chars().count() > 0 {
println!("chat-client:{:?}",text.trim());
//broadcast
let mut data = Vec::new();
data.push(2u8);
data.append(&mut text.into_bytes());
let mut server = server.lock().unwrap();
server.dist_tx.send(DistKind::Broadcast(data));
}
},
_ => panic!("cmd:{:?}",cmd)
}
}
}
println!("client dropped");
}
}
logic for authentication and routing, refactor
use std::net::{TcpListener, TcpStream};
use std::collections::HashMap;
use std::thread;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Sender};
use std::io::prelude::*;
use std::io::BufReader;
extern crate hmacsha1;
extern crate uuid;
use self::uuid::Uuid;
use client::Client;
use distributor::Distributor;
use distributor::Kind as DistKind;
pub struct Player {
client_idx: usize, //this is dynamic in the sense that it may be different on intial run
}
pub struct Server {
clients: Vec<Client>, // this always grows
players: HashMap<Uuid, Player>,
pub dist_tx: Sender<DistKind<TcpStream>>,
}
impl Server {
pub fn new(ip: &str) {
let listener = TcpListener::bind(ip).unwrap();
let (dist_tx,mut dist) = Distributor::new();
thread::spawn(move || dist.run());
let server = Server {
clients: vec!(),
players: HashMap::new(),
dist_tx: dist_tx,
};
let server = Arc::new(Mutex::new(server));
for s in listener.incoming() {
match s {
Ok(s) => {
let server = server.clone();
thread::spawn(|| Server::handler(server,s));
},
_ => {},
}
}
}
fn handler (mut server: Arc<Mutex<Server>>, mut s: TcpStream) {
let mut cmd = [0;1];
//new conn needs auth code
let m = uuid::Uuid::new_v4();
s.write_all(m.as_bytes());
let mut client_idx = None;
loop {
if let Ok(_) = s.read_exact(&mut cmd) {
match cmd[0] {
0 => { //login
client_idx = Server::login(&mut server, &mut s, m);
},
1 => { //register
if let Some(c) = Client::load(&mut s) {
let mut server = server.lock().unwrap();
for n in server.clients.iter() {
if n.id == c.id { continue }
}
println!("registered:{:?}",c.id);
server.clients.push(c);
}
},
_ => {
if client_idx.is_some() {
match cmd[0] {
2 => { //chat
Server::chat(&mut server, &mut s);
},
_ => panic!("unknown cmd:{:?}",cmd)
}
}
}
}
}
else { break } //drop dead client
}
println!("client dropped");
}
fn chat (server: &mut Arc<Mutex<Server>>,
mut s: &mut TcpStream,) {
let mut text = String::new();
{
let mut bs = BufReader::new(s);
bs.read_line(&mut text);
}
if text.chars().count() > 0 {
println!("chat-client:{:?}",text.trim());
//broadcast
let mut data = Vec::new();
data.push(2u8);
data.append(&mut text.into_bytes());
let mut server = server.lock().unwrap();
server.dist_tx.send(DistKind::Broadcast(data));
}
}
fn login (server: &mut Arc<Mutex<Server>>,
mut s: &mut TcpStream,
m: Uuid) -> Option<usize> {
let mut client_idx = None;
if let Some(c) = Client::load(&mut s) {
let mut server = server.lock().unwrap();
let mut reg_key = None;
for (i,n) in server.clients.iter_mut().enumerate() {
if n.id == c.id {
reg_key = Some(n.key);
client_idx = Some(i);
break
}
}
if let Some(key) = reg_key {
let hm = hmacsha1::hmac_sha1(&key, m.as_bytes());
if c.key == hm {
server.players.insert(c.id,
Player {client_idx:client_idx.unwrap()});
if let Ok(stmp) = s.try_clone() {
//n.stream = Some(Arc::new(Mutex::new(stmp)));
server.dist_tx.send(DistKind::Add(c.id,stmp));
}
println!("login:{:?}",c.id);
println!("total clients:{:?}",server.clients.len());
}
else {
panic!("client invalid login {:?}", c)
}
}
else { panic!("client unregistered {:?}", c) }
}
client_idx
}
}
|
// Copyright 2016 Mozilla Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::cache::{storage_from_config, Storage};
use crate::compiler::{
get_compiler_info, CacheControl, CompileResult, Compiler, CompilerArguments, CompilerHasher,
CompilerKind, CompilerProxy, DistType, MissType,
};
#[cfg(feature = "dist-client")]
use crate::config;
use crate::config::Config;
use crate::dist;
use crate::jobserver::Client;
use crate::mock_command::{CommandCreatorSync, ProcessCommandCreator};
use crate::protocol::{Compile, CompileFinished, CompileResponse, Request, Response};
use crate::util;
#[cfg(feature = "dist-client")]
use anyhow::Context as _;
use bytes::{buf::BufMut, Bytes, BytesMut};
use filetime::FileTime;
use futures::channel::mpsc;
use futures::future::FutureExt;
use futures::{future, stream, Sink, SinkExt, Stream, StreamExt, TryFutureExt};
use futures_locks::RwLock;
use number_prefix::NumberPrefix;
use std::collections::HashMap;
use std::env;
use std::ffi::{OsStr, OsString};
use std::fs::metadata;
use std::future::Future;
use std::io::{self, Write};
use std::marker::Unpin;
#[cfg(feature = "dist-client")]
use std::mem;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::path::PathBuf;
use std::pin::Pin;
use std::process::{ExitStatus, Output};
use std::sync::Arc;
use std::sync::Mutex;
use std::task::{Context, Poll, Waker};
use std::time::Duration;
#[cfg(feature = "dist-client")]
use std::time::Instant;
use std::u64;
use tokio::{
io::{AsyncRead, AsyncWrite},
net::TcpListener,
runtime::Runtime,
time::{self, sleep, Sleep},
};
use tokio_serde::Framed;
use tokio_util::codec::{length_delimited, LengthDelimitedCodec};
use tower::Service;
use crate::errors::*;
/// If the server is idle for this many seconds, shut down.
const DEFAULT_IDLE_TIMEOUT: u64 = 600;
/// If the dist client couldn't be created, retry creation at this number
/// of seconds from now (or later)
#[cfg(feature = "dist-client")]
const DIST_CLIENT_RECREATE_TIMEOUT: Duration = Duration::from_secs(30);
/// Result of background server startup.
#[derive(Debug, Serialize, Deserialize)]
pub enum ServerStartup {
/// Server started successfully on `port`.
Ok { port: u16 },
/// Server Addr already in suse
AddrInUse,
/// Timed out waiting for server startup.
TimedOut,
/// Server encountered an error.
Err { reason: String },
}
/// Get the time the server should idle for before shutting down.
fn get_idle_timeout() -> u64 {
// A value of 0 disables idle shutdown entirely.
env::var("SCCACHE_IDLE_TIMEOUT")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(DEFAULT_IDLE_TIMEOUT)
}
fn notify_server_startup_internal<W: Write>(mut w: W, status: ServerStartup) -> Result<()> {
util::write_length_prefixed_bincode(&mut w, status)
}
#[cfg(unix)]
fn notify_server_startup(name: &Option<OsString>, status: ServerStartup) -> Result<()> {
use std::os::unix::net::UnixStream;
let name = match *name {
Some(ref s) => s,
None => return Ok(()),
};
debug!("notify_server_startup({:?})", status);
let stream = UnixStream::connect(name)?;
notify_server_startup_internal(stream, status)
}
#[cfg(windows)]
fn notify_server_startup(name: &Option<OsString>, status: ServerStartup) -> Result<()> {
use std::fs::OpenOptions;
let name = match *name {
Some(ref s) => s,
None => return Ok(()),
};
let pipe = OpenOptions::new().write(true).read(true).open(name)?;
notify_server_startup_internal(pipe, status)
}
#[cfg(unix)]
fn get_signal(status: ExitStatus) -> i32 {
use std::os::unix::prelude::*;
status.signal().expect("must have signal")
}
#[cfg(windows)]
fn get_signal(_status: ExitStatus) -> i32 {
panic!("no signals on windows")
}
pub struct DistClientContainer {
// The actual dist client state
#[cfg(feature = "dist-client")]
state: Mutex<DistClientState>,
}
#[cfg(feature = "dist-client")]
struct DistClientConfig {
// Reusable items tied to an SccacheServer instance
pool: tokio::runtime::Handle,
// From the static dist configuration
scheduler_url: Option<config::HTTPUrl>,
auth: config::DistAuth,
cache_dir: PathBuf,
toolchain_cache_size: u64,
toolchains: Vec<config::DistToolchainConfig>,
rewrite_includes_only: bool,
}
#[cfg(feature = "dist-client")]
enum DistClientState {
#[cfg(feature = "dist-client")]
Some(Box<DistClientConfig>, Arc<dyn dist::Client>),
#[cfg(feature = "dist-client")]
FailWithMessage(Box<DistClientConfig>, String),
#[cfg(feature = "dist-client")]
RetryCreateAt(Box<DistClientConfig>, Instant),
Disabled,
}
#[cfg(not(feature = "dist-client"))]
impl DistClientContainer {
#[cfg(not(feature = "dist-client"))]
fn new(config: &Config, _: &tokio::runtime::Handle) -> Self {
if config.dist.scheduler_url.is_some() {
warn!("Scheduler address configured but dist feature disabled, disabling distributed sccache")
}
Self {}
}
pub fn new_disabled() -> Self {
Self {}
}
pub fn reset_state(&self) {}
pub async fn get_status(&self) -> DistInfo {
DistInfo::Disabled("dist-client feature not selected".to_string())
}
fn get_client(&self) -> Result<Option<Arc<dyn dist::Client>>> {
Ok(None)
}
}
#[cfg(feature = "dist-client")]
impl DistClientContainer {
fn new(config: &Config, pool: &tokio::runtime::Handle) -> Self {
let config = DistClientConfig {
pool: pool.clone(),
scheduler_url: config.dist.scheduler_url.clone(),
auth: config.dist.auth.clone(),
cache_dir: config.dist.cache_dir.clone(),
toolchain_cache_size: config.dist.toolchain_cache_size,
toolchains: config.dist.toolchains.clone(),
rewrite_includes_only: config.dist.rewrite_includes_only,
};
let state = Self::create_state(config);
Self {
state: Mutex::new(state),
}
}
pub fn new_disabled() -> Self {
Self {
state: Mutex::new(DistClientState::Disabled),
}
}
pub fn reset_state(&self) {
let mut guard = self.state.lock();
let state = guard.as_mut().unwrap();
let state: &mut DistClientState = &mut **state;
match mem::replace(state, DistClientState::Disabled) {
DistClientState::Some(cfg, _)
| DistClientState::FailWithMessage(cfg, _)
| DistClientState::RetryCreateAt(cfg, _) => {
warn!("State reset. Will recreate");
*state =
DistClientState::RetryCreateAt(cfg, Instant::now() - Duration::from_secs(1));
}
DistClientState::Disabled => (),
}
}
pub fn get_status(&self) -> impl Future<Output = DistInfo> {
// This function can't be wholly async because we can't hold mutex guard
// across the yield point - instead, either return an immediately ready
// future or perform async query with the client cloned beforehand.
let mut guard = self.state.lock();
let state = guard.as_mut().unwrap();
let state: &mut DistClientState = &mut **state;
let (client, scheduler_url) = match state {
DistClientState::Disabled => {
return Either::Left(future::ready(DistInfo::Disabled("disabled".to_string())))
}
DistClientState::FailWithMessage(cfg, _) => {
return Either::Left(future::ready(DistInfo::NotConnected(
cfg.scheduler_url.clone(),
"enabled, auth not configured".to_string(),
)))
}
DistClientState::RetryCreateAt(cfg, _) => {
return Either::Left(future::ready(DistInfo::NotConnected(
cfg.scheduler_url.clone(),
"enabled, not connected, will retry".to_string(),
)))
}
DistClientState::Some(cfg, client) => (Arc::clone(client), cfg.scheduler_url.clone()),
};
Either::Right(Box::pin(async move {
match client.do_get_status().await {
Ok(res) => DistInfo::SchedulerStatus(scheduler_url.clone(), res),
Err(_) => DistInfo::NotConnected(
scheduler_url.clone(),
"could not communicate with scheduler".to_string(),
),
}
}))
}
fn get_client(&self) -> Result<Option<Arc<dyn dist::Client>>> {
let mut guard = self.state.lock();
let state = guard.as_mut().unwrap();
let state: &mut DistClientState = &mut **state;
Self::maybe_recreate_state(state);
let res = match state {
DistClientState::Some(_, dc) => Ok(Some(dc.clone())),
DistClientState::Disabled | DistClientState::RetryCreateAt(_, _) => Ok(None),
DistClientState::FailWithMessage(_, msg) => Err(anyhow!(msg.clone())),
};
if res.is_err() {
let config = match mem::replace(state, DistClientState::Disabled) {
DistClientState::FailWithMessage(config, _) => config,
_ => unreachable!(),
};
// The client is most likely mis-configured, make sure we
// re-create on our next attempt.
*state =
DistClientState::RetryCreateAt(config, Instant::now() - Duration::from_secs(1));
}
res
}
fn maybe_recreate_state(state: &mut DistClientState) {
if let DistClientState::RetryCreateAt(_, instant) = *state {
if instant > Instant::now() {
return;
}
let config = match mem::replace(state, DistClientState::Disabled) {
DistClientState::RetryCreateAt(config, _) => config,
_ => unreachable!(),
};
info!("Attempting to recreate the dist client");
*state = Self::create_state(*config)
}
}
// Attempt to recreate the dist client
fn create_state(config: DistClientConfig) -> DistClientState {
macro_rules! try_or_retry_later {
($v:expr) => {{
match $v {
Ok(v) => v,
Err(e) => {
// `{:?}` prints the full cause chain and backtrace.
error!("{:?}", e);
return DistClientState::RetryCreateAt(
Box::new(config),
Instant::now() + DIST_CLIENT_RECREATE_TIMEOUT,
);
}
}
}};
}
macro_rules! try_or_fail_with_message {
($v:expr) => {{
match $v {
Ok(v) => v,
Err(e) => {
// `{:?}` prints the full cause chain and backtrace.
let errmsg = format!("{:?}", e);
error!("{}", errmsg);
return DistClientState::FailWithMessage(
Box::new(config),
errmsg.to_string(),
);
}
}
}};
}
match config.scheduler_url {
Some(ref addr) => {
let url = addr.to_url();
info!("Enabling distributed sccache to {}", url);
let auth_token = match &config.auth {
config::DistAuth::Token { token } => Ok(token.to_owned()),
config::DistAuth::Oauth2CodeGrantPKCE { auth_url, .. }
| config::DistAuth::Oauth2Implicit { auth_url, .. } => {
Self::get_cached_config_auth_token(auth_url)
}
};
let auth_token = try_or_fail_with_message!(auth_token
.context("could not load client auth token, run |sccache --dist-auth|"));
let dist_client = dist::http::Client::new(
&config.pool,
url,
&config.cache_dir.join("client"),
config.toolchain_cache_size,
&config.toolchains,
auth_token,
config.rewrite_includes_only,
);
let dist_client =
try_or_retry_later!(dist_client.context("failure during dist client creation"));
use crate::dist::Client;
match config.pool.block_on(dist_client.do_get_status()) {
Ok(res) => {
info!(
"Successfully created dist client with {:?} cores across {:?} servers",
res.num_cpus, res.num_servers
);
DistClientState::Some(Box::new(config), Arc::new(dist_client))
}
Err(_) => {
warn!("Scheduler address configured, but could not communicate with scheduler");
DistClientState::RetryCreateAt(
Box::new(config),
Instant::now() + DIST_CLIENT_RECREATE_TIMEOUT,
)
}
}
}
None => {
info!("No scheduler address configured, disabling distributed sccache");
DistClientState::Disabled
}
}
}
fn get_cached_config_auth_token(auth_url: &str) -> Result<String> {
let cached_config = config::CachedConfig::reload()?;
cached_config
.with(|c| c.dist.auth_tokens.get(auth_url).map(String::to_owned))
.with_context(|| format!("token for url {} not present in cached config", auth_url))
}
}
/// Start an sccache server, listening on `port`.
///
/// Spins an event loop handling client connections until a client
/// requests a shutdown.
pub fn start_server(config: &Config, port: u16) -> Result<()> {
info!("start_server: port: {}", port);
let client = unsafe { Client::new() };
let runtime = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.worker_threads(std::cmp::max(20, 2 * num_cpus::get()))
.build()?;
let pool = runtime.handle().clone();
let dist_client = DistClientContainer::new(config, &pool);
let storage = storage_from_config(config, &pool);
let res =
SccacheServer::<ProcessCommandCreator>::new(port, runtime, client, dist_client, storage);
let notify = env::var_os("SCCACHE_STARTUP_NOTIFY");
match res {
Ok(srv) => {
let port = srv.port();
info!("server started, listening on port {}", port);
notify_server_startup(¬ify, ServerStartup::Ok { port })?;
srv.run(future::pending::<()>())?;
Ok(())
}
Err(e) => {
error!("failed to start server: {}", e);
match e.downcast_ref::<io::Error>() {
Some(io_err) if io::ErrorKind::AddrInUse == io_err.kind() => {
notify_server_startup(¬ify, ServerStartup::AddrInUse)?;
}
_ => {
let reason = e.to_string();
notify_server_startup(¬ify, ServerStartup::Err { reason })?;
}
};
Err(e)
}
}
}
pub struct SccacheServer<C: CommandCreatorSync> {
runtime: Runtime,
listener: TcpListener,
rx: mpsc::Receiver<ServerMessage>,
timeout: Duration,
service: SccacheService<C>,
wait: WaitUntilZero,
}
impl<C: CommandCreatorSync> SccacheServer<C> {
pub fn new(
port: u16,
runtime: Runtime,
client: Client,
dist_client: DistClientContainer,
storage: Arc<dyn Storage>,
) -> Result<SccacheServer<C>> {
let addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port);
let listener = runtime.block_on(TcpListener::bind(&SocketAddr::V4(addr)))?;
// Prepare the service which we'll use to service all incoming TCP
// connections.
let (tx, rx) = mpsc::channel(1);
let (wait, info) = WaitUntilZero::new();
let pool = runtime.handle().clone();
let service = SccacheService::new(dist_client, storage, &client, pool, tx, info);
Ok(SccacheServer {
runtime,
listener,
rx,
service,
timeout: Duration::from_secs(get_idle_timeout()),
wait,
})
}
/// Configures how long this server will be idle before shutting down.
#[allow(dead_code)]
pub fn set_idle_timeout(&mut self, timeout: Duration) {
self.timeout = timeout;
}
/// Set the storage this server will use.
#[allow(dead_code)]
pub fn set_storage(&mut self, storage: Arc<dyn Storage>) {
self.service.storage = storage;
}
/// Returns a reference to a thread pool to run work on
#[allow(dead_code)]
pub fn pool(&self) -> &tokio::runtime::Handle {
&self.service.rt
}
/// Returns a reference to the command creator this server will use
#[allow(dead_code)]
pub fn command_creator(&self) -> &C {
&self.service.creator
}
/// Returns the port that this server is bound to
#[allow(dead_code)]
pub fn port(&self) -> u16 {
self.listener.local_addr().unwrap().port()
}
/// Runs this server to completion.
///
/// If the `shutdown` future resolves then the server will be shut down,
/// otherwise the server may naturally shut down if it becomes idle for too
/// long anyway.
pub fn run<F>(self, shutdown: F) -> io::Result<()>
where
F: Future,
C: Send,
{
let SccacheServer {
runtime,
listener,
rx,
service,
timeout,
wait,
} = self;
// Create our "server future" which will simply handle all incoming
// connections in separate tasks.
let server = async move {
loop {
let (socket, _) = listener.accept().await?;
trace!("incoming connection");
let conn = service.clone().bind(socket).map_err(|res| {
error!("Failed to bind socket: {}", res);
});
// We're not interested if the task panicked; immediately process
// another connection
let _ = tokio::spawn(conn);
}
};
// Right now there's a whole bunch of ways to shut down this server for
// various purposes. These include:
//
// 1. The `shutdown` future above.
// 2. An RPC indicating the server should shut down
// 3. A period of inactivity (no requests serviced)
//
// These are all encapsulated wih the future that we're creating below.
// The `ShutdownOrInactive` indicates the RPC or the period of
// inactivity, and this is then select'd with the `shutdown` future
// passed to this function.
let shutdown = shutdown.map(|_| {
info!("shutting down due to explicit signal");
});
let shutdown_idle = async {
ShutdownOrInactive {
rx,
timeout: if timeout != Duration::new(0, 0) {
Some(Box::pin(sleep(timeout)))
} else {
None
},
timeout_dur: timeout,
}
.await;
info!("shutting down due to being idle or request");
};
runtime.block_on(async {
futures::select! {
server = server.fuse() => server,
_res = shutdown.fuse() => Ok(()),
_res = shutdown_idle.fuse() => Ok::<_, io::Error>(()),
}
})?;
const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10);
info!(
"moving into the shutdown phase now, waiting at most {} seconds \
for all client requests to complete",
SHUTDOWN_TIMEOUT.as_secs()
);
// Once our server has shut down either due to inactivity or a manual
// request we still need to give a bit of time for all active
// connections to finish. This `wait` future will resolve once all
// instances of `SccacheService` have been dropped.
//
// Note that we cap the amount of time this can take, however, as we
// don't want to wait *too* long.
runtime.block_on(async { time::timeout(SHUTDOWN_TIMEOUT, wait).await })?;
info!("ok, fully shutting down now");
Ok(())
}
}
/// Maps a compiler proxy path to a compiler proxy and it's last modification time
type CompilerProxyMap<C> = HashMap<PathBuf, (Box<dyn CompilerProxy<C>>, FileTime)>;
type CompilerMap<C> = HashMap<PathBuf, Option<CompilerCacheEntry<C>>>;
/// entry of the compiler cache
struct CompilerCacheEntry<C> {
/// compiler argument trait obj
pub compiler: Box<dyn Compiler<C>>,
/// modification time of the compilers executable file
pub mtime: FileTime,
/// distributed compilation extra info
pub dist_info: Option<(PathBuf, FileTime)>,
}
impl<C> CompilerCacheEntry<C> {
fn new(
compiler: Box<dyn Compiler<C>>,
mtime: FileTime,
dist_info: Option<(PathBuf, FileTime)>,
) -> Self {
Self {
compiler,
mtime,
dist_info,
}
}
}
/// Service implementation for sccache
#[derive(Clone)]
struct SccacheService<C>
where
C: Send,
{
/// Server statistics.
stats: Arc<RwLock<ServerStats>>,
/// Distributed sccache client
dist_client: Arc<DistClientContainer>,
/// Cache storage.
storage: Arc<dyn Storage>,
/// A cache of known compiler info.
compilers: Arc<RwLock<CompilerMap<C>>>,
/// map the cwd with compiler proxy path to a proxy resolver, which
/// will dynamically resolve the input compiler for the current context
/// (usually file or current working directory)
/// the associated `FileTime` is the modification time of
/// the compiler proxy, in order to track updates of the proxy itself
compiler_proxies: Arc<RwLock<CompilerProxyMap<C>>>,
/// Task pool for blocking (used mostly for disk I/O-bound tasks) and
// non-blocking tasks
rt: tokio::runtime::Handle,
/// An object for creating commands.
///
/// This is mostly useful for unit testing, where we
/// can mock this out.
creator: C,
/// Message channel used to learn about requests received by this server.
///
/// Note that messages sent along this channel will keep the server alive
/// (reset the idle timer) and this channel can also be used to shut down
/// the entire server immediately via a message.
tx: mpsc::Sender<ServerMessage>,
/// Information tracking how many services (connected clients) are active.
info: ActiveInfo,
}
type SccacheRequest = Message<Request, Body<()>>;
type SccacheResponse = Message<Response, Body<Response>>;
/// Messages sent from all services to the main event loop indicating activity.
///
/// Whenever a request is receive a `Request` message is sent which will reset
/// the idle shutdown timer, and otherwise a `Shutdown` message indicates that
/// a server shutdown was requested via an RPC.
pub enum ServerMessage {
/// A message sent whenever a request is received.
Request,
/// Message sent whenever a shutdown request is received.
Shutdown,
}
impl<C> Service<SccacheRequest> for Arc<SccacheService<C>>
where
C: CommandCreatorSync + Send + Sync + 'static,
{
type Response = SccacheResponse;
type Error = Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response>> + Send + 'static>>;
fn call(&mut self, req: SccacheRequest) -> Self::Future {
trace!("handle_client");
// Opportunistically let channel know that we've received a request. We
// ignore failures here as well as backpressure as it's not imperative
// that every message is received.
drop(self.tx.clone().start_send(ServerMessage::Request));
let me = self.clone();
Box::pin(async move {
match req.into_inner() {
Request::Compile(compile) => {
debug!("handle_client: compile");
me.stats.write().await.compile_requests += 1;
me.handle_compile(compile).await
}
Request::GetStats => {
debug!("handle_client: get_stats");
me.get_info()
.await
.map(|i| Response::Stats(Box::new(i)))
.map(Message::WithoutBody)
}
Request::DistStatus => {
debug!("handle_client: dist_status");
me.get_dist_status()
.await
.map(Response::DistStatus)
.map(Message::WithoutBody)
}
Request::ZeroStats => {
debug!("handle_client: zero_stats");
me.zero_stats().await;
me.get_info()
.await
.map(|i| Response::Stats(Box::new(i)))
.map(Message::WithoutBody)
}
Request::Shutdown => {
debug!("handle_client: shutdown");
let mut tx = me.tx.clone();
future::try_join(
async {
let _ = tx.send(ServerMessage::Shutdown).await;
Ok(())
},
me.get_info(),
)
.await
.map(move |(_, info)| {
Message::WithoutBody(Response::ShuttingDown(Box::new(info)))
})
}
}
})
}
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<()>> {
Poll::Ready(Ok(()))
}
}
use futures::future::Either;
use futures::TryStreamExt;
impl<C> SccacheService<C>
where
C: CommandCreatorSync + Clone + Send + Sync + 'static,
{
pub fn new(
dist_client: DistClientContainer,
storage: Arc<dyn Storage>,
client: &Client,
rt: tokio::runtime::Handle,
tx: mpsc::Sender<ServerMessage>,
info: ActiveInfo,
) -> SccacheService<C> {
SccacheService {
stats: Arc::new(RwLock::new(ServerStats::default())),
dist_client: Arc::new(dist_client),
storage,
compilers: Arc::new(RwLock::new(HashMap::new())),
compiler_proxies: Arc::new(RwLock::new(HashMap::new())),
rt,
creator: C::new(client),
tx,
info,
}
}
fn bind<T>(self, socket: T) -> impl Future<Output = Result<()>> + Send + Sized + 'static
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
let mut builder = length_delimited::Builder::new();
if let Ok(max_frame_length_str) = env::var("SCCACHE_MAX_FRAME_LENGTH") {
if let Ok(max_frame_length) = max_frame_length_str.parse::<usize>() {
builder.max_frame_length(max_frame_length);
} else {
warn!("Content of SCCACHE_MAX_FRAME_LENGTH is not a valid number, using default");
}
}
let io = builder.new_framed(socket);
let (sink, stream) = SccacheTransport {
inner: Framed::new(io.sink_err_into().err_into(), BincodeCodec),
}
.split();
let sink = sink.sink_err_into::<Error>();
let me = Arc::new(self);
stream
.err_into::<Error>()
.and_then(move |input| me.clone().call(input))
.and_then(move |message| async move {
let fut = match message {
Message::WithoutBody(message) => {
let stream = stream::once(async move { Ok(Frame::Message { message }) });
Either::Left(stream)
}
Message::WithBody(message, body) => {
let stream = stream::once(async move { Ok(Frame::Message { message }) })
.chain(body.map_ok(|chunk| Frame::Body { chunk: Some(chunk) }))
.chain(stream::once(async move { Ok(Frame::Body { chunk: None }) }));
Either::Right(stream)
}
};
Ok(Box::pin(fut))
})
.try_flatten()
.forward(sink)
}
/// Get dist status.
async fn get_dist_status(&self) -> Result<DistInfo> {
Ok(self.dist_client.get_status().await)
}
/// Get info and stats about the cache.
async fn get_info(&self) -> Result<ServerInfo> {
let stats = self.stats.read().await.clone();
let cache_location = self.storage.location();
futures::try_join!(self.storage.current_size(), self.storage.max_size()).map(
move |(cache_size, max_cache_size)| ServerInfo {
stats,
cache_location,
cache_size,
max_cache_size,
},
)
}
/// Zero stats about the cache.
async fn zero_stats(&self) {
*self.stats.write().await = ServerStats::default();
}
/// Handle a compile request from a client.
///
/// This will handle a compile request entirely, generating a response with
/// the inital information and an optional body which will eventually
/// contain the results of the compilation.
async fn handle_compile(&self, compile: Compile) -> Result<SccacheResponse> {
let exe = compile.exe;
let cmd = compile.args;
let cwd: PathBuf = compile.cwd.into();
let env_vars = compile.env_vars;
let me = self.clone();
let info = self.compiler_info(exe.into(), cwd.clone(), &env_vars).await;
Ok(me.check_compiler(info, cmd, cwd, env_vars).await)
}
/// Look up compiler info from the cache for the compiler `path`.
/// If not cached, determine the compiler type and cache the result.
async fn compiler_info(
&self,
path: PathBuf,
cwd: PathBuf,
env: &[(OsString, OsString)],
) -> Result<Box<dyn Compiler<C>>> {
trace!("compiler_info");
let me = self.clone();
let me1 = self.clone();
// lookup if compiler proxy exists for the current compiler path
let path2 = path.clone();
let path1 = path.clone();
let env = env.to_vec();
let resolved_with_proxy = {
let compiler_proxies_borrow = self.compiler_proxies.read().await;
// Create an owned future - compiler proxy is not Send so we can't
// really await while borrowing the proxy since rustc is too conservative
let resolve_proxied_executable =
compiler_proxies_borrow
.get(&path)
.map(|(compiler_proxy, _filetime)| {
compiler_proxy.resolve_proxied_executable(
self.creator.clone(),
cwd.clone(),
env.as_slice(),
)
});
match resolve_proxied_executable {
Some(fut) => fut.await.ok(),
None => None,
}
};
// use the supplied compiler path as fallback, lookup its modification time too
let (resolved_compiler_path, mtime) = match resolved_with_proxy {
Some(x) => x, // TODO resolve the path right away
_ => {
// fallback to using the path directly
metadata(&path2)
.map(|attr| FileTime::from_last_modification_time(&attr))
.ok()
.map(move |filetime| (path2, filetime))
.expect("Must contain sane data, otherwise mtime is not avail")
}
};
let dist_info = match me1.dist_client.get_client() {
Ok(Some(ref client)) => {
if let Some(archive) = client.get_custom_toolchain(&resolved_compiler_path) {
match metadata(&archive)
.map(|attr| FileTime::from_last_modification_time(&attr))
{
Ok(mtime) => Some((archive, mtime)),
_ => None,
}
} else {
None
}
}
_ => None,
};
let opt = match me1.compilers.read().await.get(&resolved_compiler_path) {
// It's a hit only if the mtime and dist archive data matches.
Some(&Some(ref entry)) => {
if entry.mtime == mtime && entry.dist_info == dist_info {
Some(entry.compiler.box_clone())
} else {
None
}
}
_ => None,
};
match opt {
Some(info) => {
trace!("compiler_info cache hit");
Ok(info)
}
None => {
trace!("compiler_info cache miss");
// Check the compiler type and return the result when
// finished. This generally involves invoking the compiler,
// so do it asynchronously.
// the compiler path might be compiler proxy, so it is important to use
// `path` (or its clone `path1`) to resolve using that one, not using `resolved_compiler_path`
let info = get_compiler_info::<C>(
me.creator.clone(),
&path1,
&cwd,
env.as_slice(),
&me.rt,
dist_info.clone().map(|(p, _)| p),
)
.await;
let (c, proxy) = match info {
Ok((c, proxy)) => (c.clone(), proxy.clone()),
Err(err) => {
trace!("Inserting PLAIN cache map info for {:?}", &path);
me.compilers.write().await.insert(path, None);
return Err(err);
}
};
// register the proxy for this compiler, so it will be used directly from now on
// and the true/resolved compiler will create table hits in the hash map
// based on the resolved path
if let Some(proxy) = proxy {
trace!(
"Inserting new path proxy {:?} @ {:?} -> {:?}",
&path,
&cwd,
resolved_compiler_path
);
me.compiler_proxies
.write()
.await
.insert(path, (proxy, mtime));
}
// TODO add some safety checks in case a proxy exists, that the initial `path` is not
// TODO the same as the resolved compiler binary
// cache
let map_info = CompilerCacheEntry::new(c.clone(), mtime, dist_info);
trace!(
"Inserting POSSIBLY PROXIED cache map info for {:?}",
&resolved_compiler_path
);
me.compilers
.write()
.await
.insert(resolved_compiler_path, Some(map_info));
// drop the proxy information, response is compiler only
Ok(c)
}
}
}
/// Check that we can handle and cache `cmd` when run with `compiler`.
/// If so, run `start_compile_task` to execute it.
async fn check_compiler(
&self,
compiler: Result<Box<dyn Compiler<C>>>,
cmd: Vec<OsString>,
cwd: PathBuf,
env_vars: Vec<(OsString, OsString)>,
) -> SccacheResponse {
let mut stats = self.stats.write().await;
match compiler {
Err(e) => {
debug!("check_compiler: Unsupported compiler: {}", e.to_string());
stats.requests_unsupported_compiler += 1;
return Message::WithoutBody(Response::Compile(
CompileResponse::UnsupportedCompiler(OsString::from(e.to_string())),
));
}
Ok(c) => {
debug!("check_compiler: Supported compiler");
// Now check that we can handle this compiler with
// the provided commandline.
match c.parse_arguments(&cmd, &cwd) {
CompilerArguments::Ok(hasher) => {
debug!("parse_arguments: Ok: {:?}", cmd);
stats.requests_executed += 1;
let (tx, rx) = Body::pair();
self.start_compile_task(c, hasher, cmd, cwd, env_vars, tx);
let res = CompileResponse::CompileStarted;
return Message::WithBody(Response::Compile(res), rx);
}
CompilerArguments::CannotCache(why, extra_info) => {
if let Some(extra_info) = extra_info {
debug!(
"parse_arguments: CannotCache({}, {}): {:?}",
why, extra_info, cmd
)
} else {
debug!("parse_arguments: CannotCache({}): {:?}", why, cmd)
}
stats.requests_not_cacheable += 1;
*stats.not_cached.entry(why.to_string()).or_insert(0) += 1;
}
CompilerArguments::NotCompilation => {
debug!("parse_arguments: NotCompilation: {:?}", cmd);
stats.requests_not_compile += 1;
}
}
}
}
let res = CompileResponse::UnhandledCompile;
Message::WithoutBody(Response::Compile(res))
}
/// Given compiler arguments `arguments`, look up
/// a compile result in the cache or execute the compilation and store
/// the result in the cache.
fn start_compile_task(
&self,
compiler: Box<dyn Compiler<C>>,
hasher: Box<dyn CompilerHasher<C>>,
arguments: Vec<OsString>,
cwd: PathBuf,
env_vars: Vec<(OsString, OsString)>,
mut tx: mpsc::Sender<Result<Response>>,
) {
let force_recache = env_vars
.iter()
.any(|&(ref k, ref _v)| k.as_os_str() == OsStr::new("SCCACHE_RECACHE"));
let cache_control = if force_recache {
CacheControl::ForceRecache
} else {
CacheControl::Default
};
let out_pretty = hasher.output_pretty().into_owned();
let color_mode = hasher.color_mode();
let me = self.clone();
let kind = compiler.kind();
let dist_client = self.dist_client.get_client();
let creator = self.creator.clone();
let storage = self.storage.clone();
let pool = self.rt.clone();
let task = async move {
let result = match dist_client {
Ok(client) => {
hasher
.get_cached_or_compile(
client,
creator,
storage,
arguments,
cwd,
env_vars,
cache_control,
pool,
)
.await
}
Err(e) => Err(e),
};
let mut cache_write = None;
let mut res = CompileFinished {
color_mode,
..Default::default()
};
match result {
Ok((compiled, out)) => {
let mut stats = me.stats.write().await;
match compiled {
CompileResult::Error => {
stats.cache_errors.increment(&kind);
}
CompileResult::CacheHit(duration) => {
stats.cache_hits.increment(&kind);
stats.cache_read_hit_duration += duration;
}
CompileResult::CacheMiss(miss_type, dist_type, duration, future) => {
match dist_type {
DistType::NoDist => {}
DistType::Ok(id) => {
let server = id.addr().to_string();
let server_count =
stats.dist_compiles.entry(server).or_insert(0);
*server_count += 1;
}
DistType::Error => stats.dist_errors += 1,
}
match miss_type {
MissType::Normal => {}
MissType::ForcedRecache => {
stats.forced_recaches += 1;
}
MissType::TimedOut => {
stats.cache_timeouts += 1;
}
MissType::CacheReadError => {
stats.cache_errors.increment(&kind);
}
}
stats.cache_misses.increment(&kind);
stats.cache_read_miss_duration += duration;
cache_write = Some(future);
}
CompileResult::NotCacheable => {
stats.cache_misses.increment(&kind);
stats.non_cacheable_compilations += 1;
}
CompileResult::CompileFailed => {
stats.compile_fails += 1;
}
};
let Output {
status,
stdout,
stderr,
} = out;
trace!("CompileFinished retcode: {}", status);
match status.code() {
Some(code) => res.retcode = Some(code),
None => res.signal = Some(get_signal(status)),
};
res.stdout = stdout;
res.stderr = stderr;
}
Err(err) => {
let mut stats = me.stats.write().await;
match err.downcast::<ProcessError>() {
Ok(ProcessError(output)) => {
debug!("Compilation failed: {:?}", output);
stats.compile_fails += 1;
match output.status.code() {
Some(code) => res.retcode = Some(code),
None => res.signal = Some(get_signal(output.status)),
};
res.stdout = output.stdout;
res.stderr = output.stderr;
}
Err(err) => match err.downcast::<HttpClientError>() {
Ok(HttpClientError(msg)) => {
me.dist_client.reset_state();
let errmsg =
format!("[{:?}] http error status: {}", out_pretty, msg);
error!("{}", errmsg);
res.retcode = Some(1);
res.stderr = errmsg.as_bytes().to_vec();
}
Err(err) => {
use std::fmt::Write;
error!("[{:?}] fatal error: {}", out_pretty, err);
let mut error = "sccache: encountered fatal error\n".to_string();
let _ = writeln!(error, "sccache: error: {}", err);
for e in err.chain() {
error!("[{:?}] \t{}", out_pretty, e);
let _ = writeln!(error, "sccache: caused by: {}", e);
}
stats.cache_errors.increment(&kind);
//TODO: figure out a better way to communicate this?
res.retcode = Some(-2);
res.stderr = error.into_bytes();
}
},
}
}
};
let send = tx
.send(Ok(Response::CompileFinished(res)))
.map_err(|e| anyhow!("send on finish failed").context(e));
let me = me.clone();
let cache_write = async move {
if let Some(cache_write) = cache_write {
match cache_write.await {
Err(e) => {
debug!("Error executing cache write: {}", e);
me.stats.write().await.cache_write_errors += 1;
}
//TODO: save cache stats!
Ok(info) => {
debug!(
"[{}]: Cache write finished in {}",
info.object_file_pretty,
util::fmt_duration_as_secs(&info.duration)
);
let mut stats = me.stats.write().await;
stats.cache_writes += 1;
stats.cache_write_duration += info.duration;
}
}
}
Ok(())
};
futures::future::try_join(send, cache_write).await?;
Ok::<_, Error>(())
};
self.rt.spawn(async move {
task.await
.unwrap_or_else(|e| warn!("Failed to execute task: {:?}", e));
});
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct PerLanguageCount {
counts: HashMap<String, u64>,
}
impl PerLanguageCount {
fn increment(&mut self, kind: &CompilerKind) {
let key = kind.lang_kind();
let count = self.counts.entry(key).or_insert(0);
*count += 1;
}
pub fn all(&self) -> u64 {
self.counts.values().sum()
}
pub fn get(&self, key: &str) -> Option<&u64> {
self.counts.get(key)
}
pub fn new() -> PerLanguageCount {
Self::default()
}
}
/// Statistics about the server.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ServerStats {
/// The count of client compile requests.
pub compile_requests: u64,
/// The count of client requests that used an unsupported compiler.
pub requests_unsupported_compiler: u64,
/// The count of client requests that were not compilation.
pub requests_not_compile: u64,
/// The count of client requests that were not cacheable.
pub requests_not_cacheable: u64,
/// The count of client requests that were executed.
pub requests_executed: u64,
/// The count of errors handling compile requests (per language).
pub cache_errors: PerLanguageCount,
/// The count of cache hits for handled compile requests (per language).
pub cache_hits: PerLanguageCount,
/// The count of cache misses for handled compile requests (per language).
pub cache_misses: PerLanguageCount,
/// The count of cache misses because the cache took too long to respond.
pub cache_timeouts: u64,
/// The count of errors reading cache entries.
pub cache_read_errors: u64,
/// The count of compilations which were successful but couldn't be cached.
pub non_cacheable_compilations: u64,
/// The count of compilations which forcibly ignored the cache.
pub forced_recaches: u64,
/// The count of errors writing to cache.
pub cache_write_errors: u64,
/// The number of successful cache writes.
pub cache_writes: u64,
/// The total time spent writing cache entries.
pub cache_write_duration: Duration,
/// The total time spent reading cache hits.
pub cache_read_hit_duration: Duration,
/// The total time spent reading cache misses.
pub cache_read_miss_duration: Duration,
/// The count of compilation failures.
pub compile_fails: u64,
/// Counts of reasons why compiles were not cached.
pub not_cached: HashMap<String, usize>,
/// The count of compilations that were successfully distributed indexed
/// by the server that ran those compilations.
pub dist_compiles: HashMap<String, usize>,
/// The count of compilations that were distributed but failed and had to be re-run locally
pub dist_errors: u64,
}
/// Info and stats about the server.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ServerInfo {
pub stats: ServerStats,
pub cache_location: String,
pub cache_size: Option<u64>,
pub max_cache_size: Option<u64>,
}
/// Status of the dist client.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum DistInfo {
Disabled(String),
#[cfg(feature = "dist-client")]
NotConnected(Option<config::HTTPUrl>, String),
#[cfg(feature = "dist-client")]
SchedulerStatus(Option<config::HTTPUrl>, dist::SchedulerStatusResult),
}
impl Default for ServerStats {
fn default() -> ServerStats {
ServerStats {
compile_requests: u64::default(),
requests_unsupported_compiler: u64::default(),
requests_not_compile: u64::default(),
requests_not_cacheable: u64::default(),
requests_executed: u64::default(),
cache_errors: PerLanguageCount::new(),
cache_hits: PerLanguageCount::new(),
cache_misses: PerLanguageCount::new(),
cache_timeouts: u64::default(),
cache_read_errors: u64::default(),
non_cacheable_compilations: u64::default(),
forced_recaches: u64::default(),
cache_write_errors: u64::default(),
cache_writes: u64::default(),
cache_write_duration: Duration::new(0, 0),
cache_read_hit_duration: Duration::new(0, 0),
cache_read_miss_duration: Duration::new(0, 0),
compile_fails: u64::default(),
not_cached: HashMap::new(),
dist_compiles: HashMap::new(),
dist_errors: u64::default(),
}
}
}
impl ServerStats {
/// Print stats to stdout in a human-readable format.
///
/// Return the formatted width of each of the (name, value) columns.
fn print(&self) -> (usize, usize) {
macro_rules! set_stat {
($vec:ident, $var:expr, $name:expr) => {{
// name, value, suffix length
$vec.push(($name.to_string(), $var.to_string(), 0));
}};
}
macro_rules! set_lang_stat {
($vec:ident, $var:expr, $name:expr) => {{
$vec.push(($name.to_string(), $var.all().to_string(), 0));
let mut sorted_stats: Vec<_> = $var.counts.iter().collect();
sorted_stats.sort_by_key(|v| v.0);
for (lang, count) in sorted_stats.iter() {
$vec.push((format!("{} ({})", $name, lang), count.to_string(), 0));
}
}};
}
macro_rules! set_duration_stat {
($vec:ident, $dur:expr, $num:expr, $name:expr) => {{
let s = if $num > 0 {
$dur / $num as u32
} else {
Default::default()
};
// name, value, suffix length
$vec.push(($name.to_string(), util::fmt_duration_as_secs(&s), 2));
}};
}
let mut stats_vec = vec![];
//TODO: this would be nice to replace with a custom derive implementation.
set_stat!(stats_vec, self.compile_requests, "Compile requests");
set_stat!(
stats_vec,
self.requests_executed,
"Compile requests executed"
);
set_lang_stat!(stats_vec, self.cache_hits, "Cache hits");
set_lang_stat!(stats_vec, self.cache_misses, "Cache misses");
set_stat!(stats_vec, self.cache_timeouts, "Cache timeouts");
set_stat!(stats_vec, self.cache_read_errors, "Cache read errors");
set_stat!(stats_vec, self.forced_recaches, "Forced recaches");
set_stat!(stats_vec, self.cache_write_errors, "Cache write errors");
set_stat!(stats_vec, self.compile_fails, "Compilation failures");
set_lang_stat!(stats_vec, self.cache_errors, "Cache errors");
set_stat!(
stats_vec,
self.non_cacheable_compilations,
"Non-cacheable compilations"
);
set_stat!(
stats_vec,
self.requests_not_cacheable,
"Non-cacheable calls"
);
set_stat!(
stats_vec,
self.requests_not_compile,
"Non-compilation calls"
);
set_stat!(
stats_vec,
self.requests_unsupported_compiler,
"Unsupported compiler calls"
);
set_duration_stat!(
stats_vec,
self.cache_write_duration,
self.cache_writes,
"Average cache write"
);
set_duration_stat!(
stats_vec,
self.cache_read_miss_duration,
self.cache_misses.all(),
"Average cache read miss"
);
set_duration_stat!(
stats_vec,
self.cache_read_hit_duration,
self.cache_hits.all(),
"Average cache read hit"
);
set_stat!(
stats_vec,
self.dist_errors,
"Failed distributed compilations"
);
let name_width = stats_vec
.iter()
.map(|&(ref n, _, _)| n.len())
.max()
.unwrap();
let stat_width = stats_vec
.iter()
.map(|&(_, ref s, _)| s.len())
.max()
.unwrap();
for (name, stat, suffix_len) in stats_vec {
println!(
"{:<name_width$} {:>stat_width$}",
name,
stat,
name_width = name_width,
stat_width = stat_width + suffix_len
);
}
if !self.dist_compiles.is_empty() {
println!("\nSuccessful distributed compiles");
let mut counts: Vec<_> = self.dist_compiles.iter().collect();
counts.sort_by(|(_, c1), (_, c2)| c1.cmp(c2).reverse());
for (reason, count) in counts {
println!(
" {:<name_width$} {:>stat_width$}",
reason,
count,
name_width = name_width - 2,
stat_width = stat_width
);
}
}
if !self.not_cached.is_empty() {
println!("\nNon-cacheable reasons:");
let mut counts: Vec<_> = self.not_cached.iter().collect();
counts.sort_by(|(_, c1), (_, c2)| c1.cmp(c2).reverse());
for (reason, count) in counts {
println!(
"{:<name_width$} {:>stat_width$}",
reason,
count,
name_width = name_width,
stat_width = stat_width
);
}
println!();
}
(name_width, stat_width)
}
}
impl ServerInfo {
/// Print info to stdout in a human-readable format.
pub fn print(&self) {
let (name_width, stat_width) = self.stats.print();
println!(
"{:<name_width$} {}",
"Cache location",
self.cache_location,
name_width = name_width
);
for &(name, val) in &[
("Cache size", &self.cache_size),
("Max cache size", &self.max_cache_size),
] {
if let Some(val) = *val {
let (val, suffix) = match NumberPrefix::binary(val as f64) {
NumberPrefix::Standalone(bytes) => (bytes.to_string(), "bytes".to_string()),
NumberPrefix::Prefixed(prefix, n) => {
(format!("{:.0}", n), format!("{}B", prefix))
}
};
println!(
"{:<name_width$} {:>stat_width$} {}",
name,
val,
suffix,
name_width = name_width,
stat_width = stat_width
);
}
}
}
}
enum Frame<R, R1> {
Body { chunk: Option<R1> },
Message { message: R },
}
struct Body<R> {
receiver: mpsc::Receiver<Result<R>>,
}
impl<R> Body<R> {
fn pair() -> (mpsc::Sender<Result<R>>, Self) {
let (tx, rx) = mpsc::channel(0);
(tx, Body { receiver: rx })
}
}
impl<R> futures::Stream for Body<R> {
type Item = Result<R>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
Pin::new(&mut self.receiver).poll_next(cx)
}
}
enum Message<R, B> {
WithBody(R, B),
WithoutBody(R),
}
impl<R, B> Message<R, B> {
fn into_inner(self) -> R {
match self {
Message::WithBody(r, _) => r,
Message::WithoutBody(r) => r,
}
}
}
struct BincodeCodec;
impl<T> tokio_serde::Serializer<T> for BincodeCodec
where
T: serde::Serialize,
{
type Error = Error;
fn serialize(self: Pin<&mut Self>, item: &T) -> std::result::Result<Bytes, Self::Error> {
let mut bytes = BytesMut::new();
bincode::serialize_into((&mut bytes).writer(), item)?;
Ok(bytes.freeze())
}
}
impl<T> tokio_serde::Deserializer<T> for BincodeCodec
where
T: serde::de::DeserializeOwned,
{
type Error = Error;
fn deserialize(self: Pin<&mut Self>, buf: &BytesMut) -> std::result::Result<T, Self::Error> {
let ret = bincode::deserialize(buf)?;
Ok(ret)
}
}
/// Implementation of `Stream + Sink` that tokio-proto is expecting
///
/// This type is composed of a few layers:
///
/// * First there's `I`, the I/O object implementing `AsyncRead` and
/// `AsyncWrite`
/// * Next that's framed using the `length_delimited` module in tokio-io giving
/// us a `Sink` and `Stream` of `BytesMut`.
/// * Next that sink/stream is wrapped in `ReadBincode` which will cause the
/// `Stream` implementation to switch from `BytesMut` to `Request` by parsing
/// the bytes bincode.
/// * Finally that sink/stream is wrapped in `WriteBincode` which will cause the
/// `Sink` implementation to switch from `BytesMut` to `Response` meaning that
/// all `Response` types pushed in will be converted to `BytesMut` and pushed
/// below.
struct SccacheTransport<I: AsyncRead + AsyncWrite + Unpin> {
inner: Framed<
futures::stream::ErrInto<
futures::sink::SinkErrInto<
tokio_util::codec::Framed<I, LengthDelimitedCodec>,
Bytes,
Error,
>,
Error,
>,
Request,
Response,
BincodeCodec,
>,
}
impl<I: AsyncRead + AsyncWrite + Unpin> Stream for SccacheTransport<I> {
type Item = Result<Message<Request, Body<()>>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.inner)
.poll_next(cx)
.map(|r| r.map(|s| s.map(Message::WithoutBody)))
}
}
impl<I: AsyncRead + AsyncWrite + Unpin> Sink<Frame<Response, Response>> for SccacheTransport<I> {
type Error = Error;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
Pin::new(&mut self.inner).poll_ready(cx)
}
fn start_send(mut self: Pin<&mut Self>, item: Frame<Response, Response>) -> Result<()> {
match item {
Frame::Message { message } => Pin::new(&mut self.inner).start_send(message),
Frame::Body { chunk: Some(chunk) } => Pin::new(&mut self.inner).start_send(chunk),
Frame::Body { chunk: None } => Ok(()),
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
Pin::new(&mut self.inner).poll_close(cx)
}
}
struct ShutdownOrInactive {
rx: mpsc::Receiver<ServerMessage>,
timeout: Option<Pin<Box<Sleep>>>,
timeout_dur: Duration,
}
impl Future for ShutdownOrInactive {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
loop {
match Pin::new(&mut self.rx).poll_next(cx) {
Poll::Pending => break,
// Shutdown received!
Poll::Ready(Some(ServerMessage::Shutdown)) => return Poll::Ready(()),
Poll::Ready(Some(ServerMessage::Request)) => {
if self.timeout_dur != Duration::new(0, 0) {
self.timeout = Some(Box::pin(sleep(self.timeout_dur)));
}
}
// All services have shut down, in theory this isn't possible...
Poll::Ready(None) => return Poll::Ready(()),
}
}
match self.timeout {
None => Poll::Pending,
Some(ref mut timeout) => timeout.as_mut().poll(cx),
}
}
}
/// Helper future which tracks the `ActiveInfo` below. This future will resolve
/// once all instances of `ActiveInfo` have been dropped.
struct WaitUntilZero {
info: std::sync::Weak<Mutex<Info>>,
}
#[derive(Clone)]
struct ActiveInfo {
info: Arc<Mutex<Info>>,
}
struct Info {
waker: Option<Waker>,
}
impl Drop for Info {
fn drop(&mut self) {
if let Some(waker) = self.waker.as_ref() {
waker.wake_by_ref();
}
}
}
impl WaitUntilZero {
#[rustfmt::skip]
fn new() -> (WaitUntilZero, ActiveInfo) {
let info = Arc::new(Mutex::new(Info { waker: None }));
(WaitUntilZero { info: Arc::downgrade(&info) }, ActiveInfo { info })
}
}
impl std::future::Future for WaitUntilZero {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
match self.info.upgrade() {
None => std::task::Poll::Ready(()),
Some(arc) => {
let mut info = arc.lock().expect("we can't panic when holding lock");
info.waker = Some(cx.waker().clone());
std::task::Poll::Pending
}
}
}
}
#[test]
fn waits_until_zero() {
let (wait, _active) = WaitUntilZero::new();
assert_eq!(wait.now_or_never(), None);
let (wait, active) = WaitUntilZero::new();
let _active2 = active.clone();
drop(active);
assert_eq!(wait.now_or_never(), None);
let (wait, _) = WaitUntilZero::new();
assert_eq!(wait.now_or_never(), Some(()));
let (wait, active) = WaitUntilZero::new();
let active2 = active.clone();
drop(active);
drop(active2);
assert_eq!(wait.now_or_never(), Some(()));
}
Add SccacheServer::with_listener (#1072)
Currently a SccacheServer can only be constructed via SccacheServer::new
with a port number. That prevents third-party crates from creating a server
instance e.g. not bound to 127.0.0.1 or with an existing socket, e.g. from
systemd socket activation.
This adds SccacheServer::with_listener to allow creating a server with
an externally provided TcpListener.
// Copyright 2016 Mozilla Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::cache::{storage_from_config, Storage};
use crate::compiler::{
get_compiler_info, CacheControl, CompileResult, Compiler, CompilerArguments, CompilerHasher,
CompilerKind, CompilerProxy, DistType, MissType,
};
#[cfg(feature = "dist-client")]
use crate::config;
use crate::config::Config;
use crate::dist;
use crate::jobserver::Client;
use crate::mock_command::{CommandCreatorSync, ProcessCommandCreator};
use crate::protocol::{Compile, CompileFinished, CompileResponse, Request, Response};
use crate::util;
#[cfg(feature = "dist-client")]
use anyhow::Context as _;
use bytes::{buf::BufMut, Bytes, BytesMut};
use filetime::FileTime;
use futures::channel::mpsc;
use futures::future::FutureExt;
use futures::{future, stream, Sink, SinkExt, Stream, StreamExt, TryFutureExt};
use futures_locks::RwLock;
use number_prefix::NumberPrefix;
use std::collections::HashMap;
use std::env;
use std::ffi::{OsStr, OsString};
use std::fs::metadata;
use std::future::Future;
use std::io::{self, Write};
use std::marker::Unpin;
#[cfg(feature = "dist-client")]
use std::mem;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::path::PathBuf;
use std::pin::Pin;
use std::process::{ExitStatus, Output};
use std::sync::Arc;
use std::sync::Mutex;
use std::task::{Context, Poll, Waker};
use std::time::Duration;
#[cfg(feature = "dist-client")]
use std::time::Instant;
use std::u64;
use tokio::{
io::{AsyncRead, AsyncWrite},
net::TcpListener,
runtime::Runtime,
time::{self, sleep, Sleep},
};
use tokio_serde::Framed;
use tokio_util::codec::{length_delimited, LengthDelimitedCodec};
use tower::Service;
use crate::errors::*;
/// If the server is idle for this many seconds, shut down.
const DEFAULT_IDLE_TIMEOUT: u64 = 600;
/// If the dist client couldn't be created, retry creation at this number
/// of seconds from now (or later)
#[cfg(feature = "dist-client")]
const DIST_CLIENT_RECREATE_TIMEOUT: Duration = Duration::from_secs(30);
/// Result of background server startup.
#[derive(Debug, Serialize, Deserialize)]
pub enum ServerStartup {
/// Server started successfully on `port`.
Ok { port: u16 },
/// Server Addr already in suse
AddrInUse,
/// Timed out waiting for server startup.
TimedOut,
/// Server encountered an error.
Err { reason: String },
}
/// Get the time the server should idle for before shutting down.
fn get_idle_timeout() -> u64 {
// A value of 0 disables idle shutdown entirely.
env::var("SCCACHE_IDLE_TIMEOUT")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(DEFAULT_IDLE_TIMEOUT)
}
fn notify_server_startup_internal<W: Write>(mut w: W, status: ServerStartup) -> Result<()> {
util::write_length_prefixed_bincode(&mut w, status)
}
#[cfg(unix)]
fn notify_server_startup(name: &Option<OsString>, status: ServerStartup) -> Result<()> {
use std::os::unix::net::UnixStream;
let name = match *name {
Some(ref s) => s,
None => return Ok(()),
};
debug!("notify_server_startup({:?})", status);
let stream = UnixStream::connect(name)?;
notify_server_startup_internal(stream, status)
}
#[cfg(windows)]
fn notify_server_startup(name: &Option<OsString>, status: ServerStartup) -> Result<()> {
use std::fs::OpenOptions;
let name = match *name {
Some(ref s) => s,
None => return Ok(()),
};
let pipe = OpenOptions::new().write(true).read(true).open(name)?;
notify_server_startup_internal(pipe, status)
}
#[cfg(unix)]
fn get_signal(status: ExitStatus) -> i32 {
use std::os::unix::prelude::*;
status.signal().expect("must have signal")
}
#[cfg(windows)]
fn get_signal(_status: ExitStatus) -> i32 {
panic!("no signals on windows")
}
pub struct DistClientContainer {
// The actual dist client state
#[cfg(feature = "dist-client")]
state: Mutex<DistClientState>,
}
#[cfg(feature = "dist-client")]
struct DistClientConfig {
// Reusable items tied to an SccacheServer instance
pool: tokio::runtime::Handle,
// From the static dist configuration
scheduler_url: Option<config::HTTPUrl>,
auth: config::DistAuth,
cache_dir: PathBuf,
toolchain_cache_size: u64,
toolchains: Vec<config::DistToolchainConfig>,
rewrite_includes_only: bool,
}
#[cfg(feature = "dist-client")]
enum DistClientState {
#[cfg(feature = "dist-client")]
Some(Box<DistClientConfig>, Arc<dyn dist::Client>),
#[cfg(feature = "dist-client")]
FailWithMessage(Box<DistClientConfig>, String),
#[cfg(feature = "dist-client")]
RetryCreateAt(Box<DistClientConfig>, Instant),
Disabled,
}
#[cfg(not(feature = "dist-client"))]
impl DistClientContainer {
#[cfg(not(feature = "dist-client"))]
fn new(config: &Config, _: &tokio::runtime::Handle) -> Self {
if config.dist.scheduler_url.is_some() {
warn!("Scheduler address configured but dist feature disabled, disabling distributed sccache")
}
Self {}
}
pub fn new_disabled() -> Self {
Self {}
}
pub fn reset_state(&self) {}
pub async fn get_status(&self) -> DistInfo {
DistInfo::Disabled("dist-client feature not selected".to_string())
}
fn get_client(&self) -> Result<Option<Arc<dyn dist::Client>>> {
Ok(None)
}
}
#[cfg(feature = "dist-client")]
impl DistClientContainer {
fn new(config: &Config, pool: &tokio::runtime::Handle) -> Self {
let config = DistClientConfig {
pool: pool.clone(),
scheduler_url: config.dist.scheduler_url.clone(),
auth: config.dist.auth.clone(),
cache_dir: config.dist.cache_dir.clone(),
toolchain_cache_size: config.dist.toolchain_cache_size,
toolchains: config.dist.toolchains.clone(),
rewrite_includes_only: config.dist.rewrite_includes_only,
};
let state = Self::create_state(config);
Self {
state: Mutex::new(state),
}
}
pub fn new_disabled() -> Self {
Self {
state: Mutex::new(DistClientState::Disabled),
}
}
pub fn reset_state(&self) {
let mut guard = self.state.lock();
let state = guard.as_mut().unwrap();
let state: &mut DistClientState = &mut **state;
match mem::replace(state, DistClientState::Disabled) {
DistClientState::Some(cfg, _)
| DistClientState::FailWithMessage(cfg, _)
| DistClientState::RetryCreateAt(cfg, _) => {
warn!("State reset. Will recreate");
*state =
DistClientState::RetryCreateAt(cfg, Instant::now() - Duration::from_secs(1));
}
DistClientState::Disabled => (),
}
}
pub fn get_status(&self) -> impl Future<Output = DistInfo> {
// This function can't be wholly async because we can't hold mutex guard
// across the yield point - instead, either return an immediately ready
// future or perform async query with the client cloned beforehand.
let mut guard = self.state.lock();
let state = guard.as_mut().unwrap();
let state: &mut DistClientState = &mut **state;
let (client, scheduler_url) = match state {
DistClientState::Disabled => {
return Either::Left(future::ready(DistInfo::Disabled("disabled".to_string())))
}
DistClientState::FailWithMessage(cfg, _) => {
return Either::Left(future::ready(DistInfo::NotConnected(
cfg.scheduler_url.clone(),
"enabled, auth not configured".to_string(),
)))
}
DistClientState::RetryCreateAt(cfg, _) => {
return Either::Left(future::ready(DistInfo::NotConnected(
cfg.scheduler_url.clone(),
"enabled, not connected, will retry".to_string(),
)))
}
DistClientState::Some(cfg, client) => (Arc::clone(client), cfg.scheduler_url.clone()),
};
Either::Right(Box::pin(async move {
match client.do_get_status().await {
Ok(res) => DistInfo::SchedulerStatus(scheduler_url.clone(), res),
Err(_) => DistInfo::NotConnected(
scheduler_url.clone(),
"could not communicate with scheduler".to_string(),
),
}
}))
}
fn get_client(&self) -> Result<Option<Arc<dyn dist::Client>>> {
let mut guard = self.state.lock();
let state = guard.as_mut().unwrap();
let state: &mut DistClientState = &mut **state;
Self::maybe_recreate_state(state);
let res = match state {
DistClientState::Some(_, dc) => Ok(Some(dc.clone())),
DistClientState::Disabled | DistClientState::RetryCreateAt(_, _) => Ok(None),
DistClientState::FailWithMessage(_, msg) => Err(anyhow!(msg.clone())),
};
if res.is_err() {
let config = match mem::replace(state, DistClientState::Disabled) {
DistClientState::FailWithMessage(config, _) => config,
_ => unreachable!(),
};
// The client is most likely mis-configured, make sure we
// re-create on our next attempt.
*state =
DistClientState::RetryCreateAt(config, Instant::now() - Duration::from_secs(1));
}
res
}
fn maybe_recreate_state(state: &mut DistClientState) {
if let DistClientState::RetryCreateAt(_, instant) = *state {
if instant > Instant::now() {
return;
}
let config = match mem::replace(state, DistClientState::Disabled) {
DistClientState::RetryCreateAt(config, _) => config,
_ => unreachable!(),
};
info!("Attempting to recreate the dist client");
*state = Self::create_state(*config)
}
}
// Attempt to recreate the dist client
fn create_state(config: DistClientConfig) -> DistClientState {
macro_rules! try_or_retry_later {
($v:expr) => {{
match $v {
Ok(v) => v,
Err(e) => {
// `{:?}` prints the full cause chain and backtrace.
error!("{:?}", e);
return DistClientState::RetryCreateAt(
Box::new(config),
Instant::now() + DIST_CLIENT_RECREATE_TIMEOUT,
);
}
}
}};
}
macro_rules! try_or_fail_with_message {
($v:expr) => {{
match $v {
Ok(v) => v,
Err(e) => {
// `{:?}` prints the full cause chain and backtrace.
let errmsg = format!("{:?}", e);
error!("{}", errmsg);
return DistClientState::FailWithMessage(
Box::new(config),
errmsg.to_string(),
);
}
}
}};
}
match config.scheduler_url {
Some(ref addr) => {
let url = addr.to_url();
info!("Enabling distributed sccache to {}", url);
let auth_token = match &config.auth {
config::DistAuth::Token { token } => Ok(token.to_owned()),
config::DistAuth::Oauth2CodeGrantPKCE { auth_url, .. }
| config::DistAuth::Oauth2Implicit { auth_url, .. } => {
Self::get_cached_config_auth_token(auth_url)
}
};
let auth_token = try_or_fail_with_message!(auth_token
.context("could not load client auth token, run |sccache --dist-auth|"));
let dist_client = dist::http::Client::new(
&config.pool,
url,
&config.cache_dir.join("client"),
config.toolchain_cache_size,
&config.toolchains,
auth_token,
config.rewrite_includes_only,
);
let dist_client =
try_or_retry_later!(dist_client.context("failure during dist client creation"));
use crate::dist::Client;
match config.pool.block_on(dist_client.do_get_status()) {
Ok(res) => {
info!(
"Successfully created dist client with {:?} cores across {:?} servers",
res.num_cpus, res.num_servers
);
DistClientState::Some(Box::new(config), Arc::new(dist_client))
}
Err(_) => {
warn!("Scheduler address configured, but could not communicate with scheduler");
DistClientState::RetryCreateAt(
Box::new(config),
Instant::now() + DIST_CLIENT_RECREATE_TIMEOUT,
)
}
}
}
None => {
info!("No scheduler address configured, disabling distributed sccache");
DistClientState::Disabled
}
}
}
fn get_cached_config_auth_token(auth_url: &str) -> Result<String> {
let cached_config = config::CachedConfig::reload()?;
cached_config
.with(|c| c.dist.auth_tokens.get(auth_url).map(String::to_owned))
.with_context(|| format!("token for url {} not present in cached config", auth_url))
}
}
/// Start an sccache server, listening on `port`.
///
/// Spins an event loop handling client connections until a client
/// requests a shutdown.
pub fn start_server(config: &Config, port: u16) -> Result<()> {
info!("start_server: port: {}", port);
let client = unsafe { Client::new() };
let runtime = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.worker_threads(std::cmp::max(20, 2 * num_cpus::get()))
.build()?;
let pool = runtime.handle().clone();
let dist_client = DistClientContainer::new(config, &pool);
let storage = storage_from_config(config, &pool);
let res =
SccacheServer::<ProcessCommandCreator>::new(port, runtime, client, dist_client, storage);
let notify = env::var_os("SCCACHE_STARTUP_NOTIFY");
match res {
Ok(srv) => {
let port = srv.port();
info!("server started, listening on port {}", port);
notify_server_startup(¬ify, ServerStartup::Ok { port })?;
srv.run(future::pending::<()>())?;
Ok(())
}
Err(e) => {
error!("failed to start server: {}", e);
match e.downcast_ref::<io::Error>() {
Some(io_err) if io::ErrorKind::AddrInUse == io_err.kind() => {
notify_server_startup(¬ify, ServerStartup::AddrInUse)?;
}
_ => {
let reason = e.to_string();
notify_server_startup(¬ify, ServerStartup::Err { reason })?;
}
};
Err(e)
}
}
}
pub struct SccacheServer<C: CommandCreatorSync> {
runtime: Runtime,
listener: TcpListener,
rx: mpsc::Receiver<ServerMessage>,
timeout: Duration,
service: SccacheService<C>,
wait: WaitUntilZero,
}
impl<C: CommandCreatorSync> SccacheServer<C> {
pub fn new(
port: u16,
runtime: Runtime,
client: Client,
dist_client: DistClientContainer,
storage: Arc<dyn Storage>,
) -> Result<SccacheServer<C>> {
let addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port);
let listener = runtime.block_on(TcpListener::bind(&SocketAddr::V4(addr)))?;
Ok(Self::with_listener(
listener,
runtime,
client,
dist_client,
storage,
))
}
pub fn with_listener(
listener: TcpListener,
runtime: Runtime,
client: Client,
dist_client: DistClientContainer,
storage: Arc<dyn Storage>,
) -> SccacheServer<C> {
// Prepare the service which we'll use to service all incoming TCP
// connections.
let (tx, rx) = mpsc::channel(1);
let (wait, info) = WaitUntilZero::new();
let pool = runtime.handle().clone();
let service = SccacheService::new(dist_client, storage, &client, pool, tx, info);
SccacheServer {
runtime,
listener,
rx,
service,
timeout: Duration::from_secs(get_idle_timeout()),
wait,
}
}
/// Configures how long this server will be idle before shutting down.
#[allow(dead_code)]
pub fn set_idle_timeout(&mut self, timeout: Duration) {
self.timeout = timeout;
}
/// Set the storage this server will use.
#[allow(dead_code)]
pub fn set_storage(&mut self, storage: Arc<dyn Storage>) {
self.service.storage = storage;
}
/// Returns a reference to a thread pool to run work on
#[allow(dead_code)]
pub fn pool(&self) -> &tokio::runtime::Handle {
&self.service.rt
}
/// Returns a reference to the command creator this server will use
#[allow(dead_code)]
pub fn command_creator(&self) -> &C {
&self.service.creator
}
/// Returns the port that this server is bound to
#[allow(dead_code)]
pub fn port(&self) -> u16 {
self.listener.local_addr().unwrap().port()
}
/// Runs this server to completion.
///
/// If the `shutdown` future resolves then the server will be shut down,
/// otherwise the server may naturally shut down if it becomes idle for too
/// long anyway.
pub fn run<F>(self, shutdown: F) -> io::Result<()>
where
F: Future,
C: Send,
{
let SccacheServer {
runtime,
listener,
rx,
service,
timeout,
wait,
} = self;
// Create our "server future" which will simply handle all incoming
// connections in separate tasks.
let server = async move {
loop {
let (socket, _) = listener.accept().await?;
trace!("incoming connection");
let conn = service.clone().bind(socket).map_err(|res| {
error!("Failed to bind socket: {}", res);
});
// We're not interested if the task panicked; immediately process
// another connection
let _ = tokio::spawn(conn);
}
};
// Right now there's a whole bunch of ways to shut down this server for
// various purposes. These include:
//
// 1. The `shutdown` future above.
// 2. An RPC indicating the server should shut down
// 3. A period of inactivity (no requests serviced)
//
// These are all encapsulated wih the future that we're creating below.
// The `ShutdownOrInactive` indicates the RPC or the period of
// inactivity, and this is then select'd with the `shutdown` future
// passed to this function.
let shutdown = shutdown.map(|_| {
info!("shutting down due to explicit signal");
});
let shutdown_idle = async {
ShutdownOrInactive {
rx,
timeout: if timeout != Duration::new(0, 0) {
Some(Box::pin(sleep(timeout)))
} else {
None
},
timeout_dur: timeout,
}
.await;
info!("shutting down due to being idle or request");
};
runtime.block_on(async {
futures::select! {
server = server.fuse() => server,
_res = shutdown.fuse() => Ok(()),
_res = shutdown_idle.fuse() => Ok::<_, io::Error>(()),
}
})?;
const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10);
info!(
"moving into the shutdown phase now, waiting at most {} seconds \
for all client requests to complete",
SHUTDOWN_TIMEOUT.as_secs()
);
// Once our server has shut down either due to inactivity or a manual
// request we still need to give a bit of time for all active
// connections to finish. This `wait` future will resolve once all
// instances of `SccacheService` have been dropped.
//
// Note that we cap the amount of time this can take, however, as we
// don't want to wait *too* long.
runtime.block_on(async { time::timeout(SHUTDOWN_TIMEOUT, wait).await })?;
info!("ok, fully shutting down now");
Ok(())
}
}
/// Maps a compiler proxy path to a compiler proxy and it's last modification time
type CompilerProxyMap<C> = HashMap<PathBuf, (Box<dyn CompilerProxy<C>>, FileTime)>;
type CompilerMap<C> = HashMap<PathBuf, Option<CompilerCacheEntry<C>>>;
/// entry of the compiler cache
struct CompilerCacheEntry<C> {
/// compiler argument trait obj
pub compiler: Box<dyn Compiler<C>>,
/// modification time of the compilers executable file
pub mtime: FileTime,
/// distributed compilation extra info
pub dist_info: Option<(PathBuf, FileTime)>,
}
impl<C> CompilerCacheEntry<C> {
fn new(
compiler: Box<dyn Compiler<C>>,
mtime: FileTime,
dist_info: Option<(PathBuf, FileTime)>,
) -> Self {
Self {
compiler,
mtime,
dist_info,
}
}
}
/// Service implementation for sccache
#[derive(Clone)]
struct SccacheService<C>
where
C: Send,
{
/// Server statistics.
stats: Arc<RwLock<ServerStats>>,
/// Distributed sccache client
dist_client: Arc<DistClientContainer>,
/// Cache storage.
storage: Arc<dyn Storage>,
/// A cache of known compiler info.
compilers: Arc<RwLock<CompilerMap<C>>>,
/// map the cwd with compiler proxy path to a proxy resolver, which
/// will dynamically resolve the input compiler for the current context
/// (usually file or current working directory)
/// the associated `FileTime` is the modification time of
/// the compiler proxy, in order to track updates of the proxy itself
compiler_proxies: Arc<RwLock<CompilerProxyMap<C>>>,
/// Task pool for blocking (used mostly for disk I/O-bound tasks) and
// non-blocking tasks
rt: tokio::runtime::Handle,
/// An object for creating commands.
///
/// This is mostly useful for unit testing, where we
/// can mock this out.
creator: C,
/// Message channel used to learn about requests received by this server.
///
/// Note that messages sent along this channel will keep the server alive
/// (reset the idle timer) and this channel can also be used to shut down
/// the entire server immediately via a message.
tx: mpsc::Sender<ServerMessage>,
/// Information tracking how many services (connected clients) are active.
info: ActiveInfo,
}
type SccacheRequest = Message<Request, Body<()>>;
type SccacheResponse = Message<Response, Body<Response>>;
/// Messages sent from all services to the main event loop indicating activity.
///
/// Whenever a request is receive a `Request` message is sent which will reset
/// the idle shutdown timer, and otherwise a `Shutdown` message indicates that
/// a server shutdown was requested via an RPC.
pub enum ServerMessage {
/// A message sent whenever a request is received.
Request,
/// Message sent whenever a shutdown request is received.
Shutdown,
}
impl<C> Service<SccacheRequest> for Arc<SccacheService<C>>
where
C: CommandCreatorSync + Send + Sync + 'static,
{
type Response = SccacheResponse;
type Error = Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response>> + Send + 'static>>;
fn call(&mut self, req: SccacheRequest) -> Self::Future {
trace!("handle_client");
// Opportunistically let channel know that we've received a request. We
// ignore failures here as well as backpressure as it's not imperative
// that every message is received.
drop(self.tx.clone().start_send(ServerMessage::Request));
let me = self.clone();
Box::pin(async move {
match req.into_inner() {
Request::Compile(compile) => {
debug!("handle_client: compile");
me.stats.write().await.compile_requests += 1;
me.handle_compile(compile).await
}
Request::GetStats => {
debug!("handle_client: get_stats");
me.get_info()
.await
.map(|i| Response::Stats(Box::new(i)))
.map(Message::WithoutBody)
}
Request::DistStatus => {
debug!("handle_client: dist_status");
me.get_dist_status()
.await
.map(Response::DistStatus)
.map(Message::WithoutBody)
}
Request::ZeroStats => {
debug!("handle_client: zero_stats");
me.zero_stats().await;
me.get_info()
.await
.map(|i| Response::Stats(Box::new(i)))
.map(Message::WithoutBody)
}
Request::Shutdown => {
debug!("handle_client: shutdown");
let mut tx = me.tx.clone();
future::try_join(
async {
let _ = tx.send(ServerMessage::Shutdown).await;
Ok(())
},
me.get_info(),
)
.await
.map(move |(_, info)| {
Message::WithoutBody(Response::ShuttingDown(Box::new(info)))
})
}
}
})
}
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<()>> {
Poll::Ready(Ok(()))
}
}
use futures::future::Either;
use futures::TryStreamExt;
impl<C> SccacheService<C>
where
C: CommandCreatorSync + Clone + Send + Sync + 'static,
{
pub fn new(
dist_client: DistClientContainer,
storage: Arc<dyn Storage>,
client: &Client,
rt: tokio::runtime::Handle,
tx: mpsc::Sender<ServerMessage>,
info: ActiveInfo,
) -> SccacheService<C> {
SccacheService {
stats: Arc::new(RwLock::new(ServerStats::default())),
dist_client: Arc::new(dist_client),
storage,
compilers: Arc::new(RwLock::new(HashMap::new())),
compiler_proxies: Arc::new(RwLock::new(HashMap::new())),
rt,
creator: C::new(client),
tx,
info,
}
}
fn bind<T>(self, socket: T) -> impl Future<Output = Result<()>> + Send + Sized + 'static
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
let mut builder = length_delimited::Builder::new();
if let Ok(max_frame_length_str) = env::var("SCCACHE_MAX_FRAME_LENGTH") {
if let Ok(max_frame_length) = max_frame_length_str.parse::<usize>() {
builder.max_frame_length(max_frame_length);
} else {
warn!("Content of SCCACHE_MAX_FRAME_LENGTH is not a valid number, using default");
}
}
let io = builder.new_framed(socket);
let (sink, stream) = SccacheTransport {
inner: Framed::new(io.sink_err_into().err_into(), BincodeCodec),
}
.split();
let sink = sink.sink_err_into::<Error>();
let me = Arc::new(self);
stream
.err_into::<Error>()
.and_then(move |input| me.clone().call(input))
.and_then(move |message| async move {
let fut = match message {
Message::WithoutBody(message) => {
let stream = stream::once(async move { Ok(Frame::Message { message }) });
Either::Left(stream)
}
Message::WithBody(message, body) => {
let stream = stream::once(async move { Ok(Frame::Message { message }) })
.chain(body.map_ok(|chunk| Frame::Body { chunk: Some(chunk) }))
.chain(stream::once(async move { Ok(Frame::Body { chunk: None }) }));
Either::Right(stream)
}
};
Ok(Box::pin(fut))
})
.try_flatten()
.forward(sink)
}
/// Get dist status.
async fn get_dist_status(&self) -> Result<DistInfo> {
Ok(self.dist_client.get_status().await)
}
/// Get info and stats about the cache.
async fn get_info(&self) -> Result<ServerInfo> {
let stats = self.stats.read().await.clone();
let cache_location = self.storage.location();
futures::try_join!(self.storage.current_size(), self.storage.max_size()).map(
move |(cache_size, max_cache_size)| ServerInfo {
stats,
cache_location,
cache_size,
max_cache_size,
},
)
}
/// Zero stats about the cache.
async fn zero_stats(&self) {
*self.stats.write().await = ServerStats::default();
}
/// Handle a compile request from a client.
///
/// This will handle a compile request entirely, generating a response with
/// the inital information and an optional body which will eventually
/// contain the results of the compilation.
async fn handle_compile(&self, compile: Compile) -> Result<SccacheResponse> {
let exe = compile.exe;
let cmd = compile.args;
let cwd: PathBuf = compile.cwd.into();
let env_vars = compile.env_vars;
let me = self.clone();
let info = self.compiler_info(exe.into(), cwd.clone(), &env_vars).await;
Ok(me.check_compiler(info, cmd, cwd, env_vars).await)
}
/// Look up compiler info from the cache for the compiler `path`.
/// If not cached, determine the compiler type and cache the result.
async fn compiler_info(
&self,
path: PathBuf,
cwd: PathBuf,
env: &[(OsString, OsString)],
) -> Result<Box<dyn Compiler<C>>> {
trace!("compiler_info");
let me = self.clone();
let me1 = self.clone();
// lookup if compiler proxy exists for the current compiler path
let path2 = path.clone();
let path1 = path.clone();
let env = env.to_vec();
let resolved_with_proxy = {
let compiler_proxies_borrow = self.compiler_proxies.read().await;
// Create an owned future - compiler proxy is not Send so we can't
// really await while borrowing the proxy since rustc is too conservative
let resolve_proxied_executable =
compiler_proxies_borrow
.get(&path)
.map(|(compiler_proxy, _filetime)| {
compiler_proxy.resolve_proxied_executable(
self.creator.clone(),
cwd.clone(),
env.as_slice(),
)
});
match resolve_proxied_executable {
Some(fut) => fut.await.ok(),
None => None,
}
};
// use the supplied compiler path as fallback, lookup its modification time too
let (resolved_compiler_path, mtime) = match resolved_with_proxy {
Some(x) => x, // TODO resolve the path right away
_ => {
// fallback to using the path directly
metadata(&path2)
.map(|attr| FileTime::from_last_modification_time(&attr))
.ok()
.map(move |filetime| (path2, filetime))
.expect("Must contain sane data, otherwise mtime is not avail")
}
};
let dist_info = match me1.dist_client.get_client() {
Ok(Some(ref client)) => {
if let Some(archive) = client.get_custom_toolchain(&resolved_compiler_path) {
match metadata(&archive)
.map(|attr| FileTime::from_last_modification_time(&attr))
{
Ok(mtime) => Some((archive, mtime)),
_ => None,
}
} else {
None
}
}
_ => None,
};
let opt = match me1.compilers.read().await.get(&resolved_compiler_path) {
// It's a hit only if the mtime and dist archive data matches.
Some(&Some(ref entry)) => {
if entry.mtime == mtime && entry.dist_info == dist_info {
Some(entry.compiler.box_clone())
} else {
None
}
}
_ => None,
};
match opt {
Some(info) => {
trace!("compiler_info cache hit");
Ok(info)
}
None => {
trace!("compiler_info cache miss");
// Check the compiler type and return the result when
// finished. This generally involves invoking the compiler,
// so do it asynchronously.
// the compiler path might be compiler proxy, so it is important to use
// `path` (or its clone `path1`) to resolve using that one, not using `resolved_compiler_path`
let info = get_compiler_info::<C>(
me.creator.clone(),
&path1,
&cwd,
env.as_slice(),
&me.rt,
dist_info.clone().map(|(p, _)| p),
)
.await;
let (c, proxy) = match info {
Ok((c, proxy)) => (c.clone(), proxy.clone()),
Err(err) => {
trace!("Inserting PLAIN cache map info for {:?}", &path);
me.compilers.write().await.insert(path, None);
return Err(err);
}
};
// register the proxy for this compiler, so it will be used directly from now on
// and the true/resolved compiler will create table hits in the hash map
// based on the resolved path
if let Some(proxy) = proxy {
trace!(
"Inserting new path proxy {:?} @ {:?} -> {:?}",
&path,
&cwd,
resolved_compiler_path
);
me.compiler_proxies
.write()
.await
.insert(path, (proxy, mtime));
}
// TODO add some safety checks in case a proxy exists, that the initial `path` is not
// TODO the same as the resolved compiler binary
// cache
let map_info = CompilerCacheEntry::new(c.clone(), mtime, dist_info);
trace!(
"Inserting POSSIBLY PROXIED cache map info for {:?}",
&resolved_compiler_path
);
me.compilers
.write()
.await
.insert(resolved_compiler_path, Some(map_info));
// drop the proxy information, response is compiler only
Ok(c)
}
}
}
/// Check that we can handle and cache `cmd` when run with `compiler`.
/// If so, run `start_compile_task` to execute it.
async fn check_compiler(
&self,
compiler: Result<Box<dyn Compiler<C>>>,
cmd: Vec<OsString>,
cwd: PathBuf,
env_vars: Vec<(OsString, OsString)>,
) -> SccacheResponse {
let mut stats = self.stats.write().await;
match compiler {
Err(e) => {
debug!("check_compiler: Unsupported compiler: {}", e.to_string());
stats.requests_unsupported_compiler += 1;
return Message::WithoutBody(Response::Compile(
CompileResponse::UnsupportedCompiler(OsString::from(e.to_string())),
));
}
Ok(c) => {
debug!("check_compiler: Supported compiler");
// Now check that we can handle this compiler with
// the provided commandline.
match c.parse_arguments(&cmd, &cwd) {
CompilerArguments::Ok(hasher) => {
debug!("parse_arguments: Ok: {:?}", cmd);
stats.requests_executed += 1;
let (tx, rx) = Body::pair();
self.start_compile_task(c, hasher, cmd, cwd, env_vars, tx);
let res = CompileResponse::CompileStarted;
return Message::WithBody(Response::Compile(res), rx);
}
CompilerArguments::CannotCache(why, extra_info) => {
if let Some(extra_info) = extra_info {
debug!(
"parse_arguments: CannotCache({}, {}): {:?}",
why, extra_info, cmd
)
} else {
debug!("parse_arguments: CannotCache({}): {:?}", why, cmd)
}
stats.requests_not_cacheable += 1;
*stats.not_cached.entry(why.to_string()).or_insert(0) += 1;
}
CompilerArguments::NotCompilation => {
debug!("parse_arguments: NotCompilation: {:?}", cmd);
stats.requests_not_compile += 1;
}
}
}
}
let res = CompileResponse::UnhandledCompile;
Message::WithoutBody(Response::Compile(res))
}
/// Given compiler arguments `arguments`, look up
/// a compile result in the cache or execute the compilation and store
/// the result in the cache.
fn start_compile_task(
&self,
compiler: Box<dyn Compiler<C>>,
hasher: Box<dyn CompilerHasher<C>>,
arguments: Vec<OsString>,
cwd: PathBuf,
env_vars: Vec<(OsString, OsString)>,
mut tx: mpsc::Sender<Result<Response>>,
) {
let force_recache = env_vars
.iter()
.any(|&(ref k, ref _v)| k.as_os_str() == OsStr::new("SCCACHE_RECACHE"));
let cache_control = if force_recache {
CacheControl::ForceRecache
} else {
CacheControl::Default
};
let out_pretty = hasher.output_pretty().into_owned();
let color_mode = hasher.color_mode();
let me = self.clone();
let kind = compiler.kind();
let dist_client = self.dist_client.get_client();
let creator = self.creator.clone();
let storage = self.storage.clone();
let pool = self.rt.clone();
let task = async move {
let result = match dist_client {
Ok(client) => {
hasher
.get_cached_or_compile(
client,
creator,
storage,
arguments,
cwd,
env_vars,
cache_control,
pool,
)
.await
}
Err(e) => Err(e),
};
let mut cache_write = None;
let mut res = CompileFinished {
color_mode,
..Default::default()
};
match result {
Ok((compiled, out)) => {
let mut stats = me.stats.write().await;
match compiled {
CompileResult::Error => {
stats.cache_errors.increment(&kind);
}
CompileResult::CacheHit(duration) => {
stats.cache_hits.increment(&kind);
stats.cache_read_hit_duration += duration;
}
CompileResult::CacheMiss(miss_type, dist_type, duration, future) => {
match dist_type {
DistType::NoDist => {}
DistType::Ok(id) => {
let server = id.addr().to_string();
let server_count =
stats.dist_compiles.entry(server).or_insert(0);
*server_count += 1;
}
DistType::Error => stats.dist_errors += 1,
}
match miss_type {
MissType::Normal => {}
MissType::ForcedRecache => {
stats.forced_recaches += 1;
}
MissType::TimedOut => {
stats.cache_timeouts += 1;
}
MissType::CacheReadError => {
stats.cache_errors.increment(&kind);
}
}
stats.cache_misses.increment(&kind);
stats.cache_read_miss_duration += duration;
cache_write = Some(future);
}
CompileResult::NotCacheable => {
stats.cache_misses.increment(&kind);
stats.non_cacheable_compilations += 1;
}
CompileResult::CompileFailed => {
stats.compile_fails += 1;
}
};
let Output {
status,
stdout,
stderr,
} = out;
trace!("CompileFinished retcode: {}", status);
match status.code() {
Some(code) => res.retcode = Some(code),
None => res.signal = Some(get_signal(status)),
};
res.stdout = stdout;
res.stderr = stderr;
}
Err(err) => {
let mut stats = me.stats.write().await;
match err.downcast::<ProcessError>() {
Ok(ProcessError(output)) => {
debug!("Compilation failed: {:?}", output);
stats.compile_fails += 1;
match output.status.code() {
Some(code) => res.retcode = Some(code),
None => res.signal = Some(get_signal(output.status)),
};
res.stdout = output.stdout;
res.stderr = output.stderr;
}
Err(err) => match err.downcast::<HttpClientError>() {
Ok(HttpClientError(msg)) => {
me.dist_client.reset_state();
let errmsg =
format!("[{:?}] http error status: {}", out_pretty, msg);
error!("{}", errmsg);
res.retcode = Some(1);
res.stderr = errmsg.as_bytes().to_vec();
}
Err(err) => {
use std::fmt::Write;
error!("[{:?}] fatal error: {}", out_pretty, err);
let mut error = "sccache: encountered fatal error\n".to_string();
let _ = writeln!(error, "sccache: error: {}", err);
for e in err.chain() {
error!("[{:?}] \t{}", out_pretty, e);
let _ = writeln!(error, "sccache: caused by: {}", e);
}
stats.cache_errors.increment(&kind);
//TODO: figure out a better way to communicate this?
res.retcode = Some(-2);
res.stderr = error.into_bytes();
}
},
}
}
};
let send = tx
.send(Ok(Response::CompileFinished(res)))
.map_err(|e| anyhow!("send on finish failed").context(e));
let me = me.clone();
let cache_write = async move {
if let Some(cache_write) = cache_write {
match cache_write.await {
Err(e) => {
debug!("Error executing cache write: {}", e);
me.stats.write().await.cache_write_errors += 1;
}
//TODO: save cache stats!
Ok(info) => {
debug!(
"[{}]: Cache write finished in {}",
info.object_file_pretty,
util::fmt_duration_as_secs(&info.duration)
);
let mut stats = me.stats.write().await;
stats.cache_writes += 1;
stats.cache_write_duration += info.duration;
}
}
}
Ok(())
};
futures::future::try_join(send, cache_write).await?;
Ok::<_, Error>(())
};
self.rt.spawn(async move {
task.await
.unwrap_or_else(|e| warn!("Failed to execute task: {:?}", e));
});
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct PerLanguageCount {
counts: HashMap<String, u64>,
}
impl PerLanguageCount {
fn increment(&mut self, kind: &CompilerKind) {
let key = kind.lang_kind();
let count = self.counts.entry(key).or_insert(0);
*count += 1;
}
pub fn all(&self) -> u64 {
self.counts.values().sum()
}
pub fn get(&self, key: &str) -> Option<&u64> {
self.counts.get(key)
}
pub fn new() -> PerLanguageCount {
Self::default()
}
}
/// Statistics about the server.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ServerStats {
/// The count of client compile requests.
pub compile_requests: u64,
/// The count of client requests that used an unsupported compiler.
pub requests_unsupported_compiler: u64,
/// The count of client requests that were not compilation.
pub requests_not_compile: u64,
/// The count of client requests that were not cacheable.
pub requests_not_cacheable: u64,
/// The count of client requests that were executed.
pub requests_executed: u64,
/// The count of errors handling compile requests (per language).
pub cache_errors: PerLanguageCount,
/// The count of cache hits for handled compile requests (per language).
pub cache_hits: PerLanguageCount,
/// The count of cache misses for handled compile requests (per language).
pub cache_misses: PerLanguageCount,
/// The count of cache misses because the cache took too long to respond.
pub cache_timeouts: u64,
/// The count of errors reading cache entries.
pub cache_read_errors: u64,
/// The count of compilations which were successful but couldn't be cached.
pub non_cacheable_compilations: u64,
/// The count of compilations which forcibly ignored the cache.
pub forced_recaches: u64,
/// The count of errors writing to cache.
pub cache_write_errors: u64,
/// The number of successful cache writes.
pub cache_writes: u64,
/// The total time spent writing cache entries.
pub cache_write_duration: Duration,
/// The total time spent reading cache hits.
pub cache_read_hit_duration: Duration,
/// The total time spent reading cache misses.
pub cache_read_miss_duration: Duration,
/// The count of compilation failures.
pub compile_fails: u64,
/// Counts of reasons why compiles were not cached.
pub not_cached: HashMap<String, usize>,
/// The count of compilations that were successfully distributed indexed
/// by the server that ran those compilations.
pub dist_compiles: HashMap<String, usize>,
/// The count of compilations that were distributed but failed and had to be re-run locally
pub dist_errors: u64,
}
/// Info and stats about the server.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ServerInfo {
pub stats: ServerStats,
pub cache_location: String,
pub cache_size: Option<u64>,
pub max_cache_size: Option<u64>,
}
/// Status of the dist client.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum DistInfo {
Disabled(String),
#[cfg(feature = "dist-client")]
NotConnected(Option<config::HTTPUrl>, String),
#[cfg(feature = "dist-client")]
SchedulerStatus(Option<config::HTTPUrl>, dist::SchedulerStatusResult),
}
impl Default for ServerStats {
fn default() -> ServerStats {
ServerStats {
compile_requests: u64::default(),
requests_unsupported_compiler: u64::default(),
requests_not_compile: u64::default(),
requests_not_cacheable: u64::default(),
requests_executed: u64::default(),
cache_errors: PerLanguageCount::new(),
cache_hits: PerLanguageCount::new(),
cache_misses: PerLanguageCount::new(),
cache_timeouts: u64::default(),
cache_read_errors: u64::default(),
non_cacheable_compilations: u64::default(),
forced_recaches: u64::default(),
cache_write_errors: u64::default(),
cache_writes: u64::default(),
cache_write_duration: Duration::new(0, 0),
cache_read_hit_duration: Duration::new(0, 0),
cache_read_miss_duration: Duration::new(0, 0),
compile_fails: u64::default(),
not_cached: HashMap::new(),
dist_compiles: HashMap::new(),
dist_errors: u64::default(),
}
}
}
impl ServerStats {
/// Print stats to stdout in a human-readable format.
///
/// Return the formatted width of each of the (name, value) columns.
fn print(&self) -> (usize, usize) {
macro_rules! set_stat {
($vec:ident, $var:expr, $name:expr) => {{
// name, value, suffix length
$vec.push(($name.to_string(), $var.to_string(), 0));
}};
}
macro_rules! set_lang_stat {
($vec:ident, $var:expr, $name:expr) => {{
$vec.push(($name.to_string(), $var.all().to_string(), 0));
let mut sorted_stats: Vec<_> = $var.counts.iter().collect();
sorted_stats.sort_by_key(|v| v.0);
for (lang, count) in sorted_stats.iter() {
$vec.push((format!("{} ({})", $name, lang), count.to_string(), 0));
}
}};
}
macro_rules! set_duration_stat {
($vec:ident, $dur:expr, $num:expr, $name:expr) => {{
let s = if $num > 0 {
$dur / $num as u32
} else {
Default::default()
};
// name, value, suffix length
$vec.push(($name.to_string(), util::fmt_duration_as_secs(&s), 2));
}};
}
let mut stats_vec = vec![];
//TODO: this would be nice to replace with a custom derive implementation.
set_stat!(stats_vec, self.compile_requests, "Compile requests");
set_stat!(
stats_vec,
self.requests_executed,
"Compile requests executed"
);
set_lang_stat!(stats_vec, self.cache_hits, "Cache hits");
set_lang_stat!(stats_vec, self.cache_misses, "Cache misses");
set_stat!(stats_vec, self.cache_timeouts, "Cache timeouts");
set_stat!(stats_vec, self.cache_read_errors, "Cache read errors");
set_stat!(stats_vec, self.forced_recaches, "Forced recaches");
set_stat!(stats_vec, self.cache_write_errors, "Cache write errors");
set_stat!(stats_vec, self.compile_fails, "Compilation failures");
set_lang_stat!(stats_vec, self.cache_errors, "Cache errors");
set_stat!(
stats_vec,
self.non_cacheable_compilations,
"Non-cacheable compilations"
);
set_stat!(
stats_vec,
self.requests_not_cacheable,
"Non-cacheable calls"
);
set_stat!(
stats_vec,
self.requests_not_compile,
"Non-compilation calls"
);
set_stat!(
stats_vec,
self.requests_unsupported_compiler,
"Unsupported compiler calls"
);
set_duration_stat!(
stats_vec,
self.cache_write_duration,
self.cache_writes,
"Average cache write"
);
set_duration_stat!(
stats_vec,
self.cache_read_miss_duration,
self.cache_misses.all(),
"Average cache read miss"
);
set_duration_stat!(
stats_vec,
self.cache_read_hit_duration,
self.cache_hits.all(),
"Average cache read hit"
);
set_stat!(
stats_vec,
self.dist_errors,
"Failed distributed compilations"
);
let name_width = stats_vec
.iter()
.map(|&(ref n, _, _)| n.len())
.max()
.unwrap();
let stat_width = stats_vec
.iter()
.map(|&(_, ref s, _)| s.len())
.max()
.unwrap();
for (name, stat, suffix_len) in stats_vec {
println!(
"{:<name_width$} {:>stat_width$}",
name,
stat,
name_width = name_width,
stat_width = stat_width + suffix_len
);
}
if !self.dist_compiles.is_empty() {
println!("\nSuccessful distributed compiles");
let mut counts: Vec<_> = self.dist_compiles.iter().collect();
counts.sort_by(|(_, c1), (_, c2)| c1.cmp(c2).reverse());
for (reason, count) in counts {
println!(
" {:<name_width$} {:>stat_width$}",
reason,
count,
name_width = name_width - 2,
stat_width = stat_width
);
}
}
if !self.not_cached.is_empty() {
println!("\nNon-cacheable reasons:");
let mut counts: Vec<_> = self.not_cached.iter().collect();
counts.sort_by(|(_, c1), (_, c2)| c1.cmp(c2).reverse());
for (reason, count) in counts {
println!(
"{:<name_width$} {:>stat_width$}",
reason,
count,
name_width = name_width,
stat_width = stat_width
);
}
println!();
}
(name_width, stat_width)
}
}
impl ServerInfo {
/// Print info to stdout in a human-readable format.
pub fn print(&self) {
let (name_width, stat_width) = self.stats.print();
println!(
"{:<name_width$} {}",
"Cache location",
self.cache_location,
name_width = name_width
);
for &(name, val) in &[
("Cache size", &self.cache_size),
("Max cache size", &self.max_cache_size),
] {
if let Some(val) = *val {
let (val, suffix) = match NumberPrefix::binary(val as f64) {
NumberPrefix::Standalone(bytes) => (bytes.to_string(), "bytes".to_string()),
NumberPrefix::Prefixed(prefix, n) => {
(format!("{:.0}", n), format!("{}B", prefix))
}
};
println!(
"{:<name_width$} {:>stat_width$} {}",
name,
val,
suffix,
name_width = name_width,
stat_width = stat_width
);
}
}
}
}
enum Frame<R, R1> {
Body { chunk: Option<R1> },
Message { message: R },
}
struct Body<R> {
receiver: mpsc::Receiver<Result<R>>,
}
impl<R> Body<R> {
fn pair() -> (mpsc::Sender<Result<R>>, Self) {
let (tx, rx) = mpsc::channel(0);
(tx, Body { receiver: rx })
}
}
impl<R> futures::Stream for Body<R> {
type Item = Result<R>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
Pin::new(&mut self.receiver).poll_next(cx)
}
}
enum Message<R, B> {
WithBody(R, B),
WithoutBody(R),
}
impl<R, B> Message<R, B> {
fn into_inner(self) -> R {
match self {
Message::WithBody(r, _) => r,
Message::WithoutBody(r) => r,
}
}
}
struct BincodeCodec;
impl<T> tokio_serde::Serializer<T> for BincodeCodec
where
T: serde::Serialize,
{
type Error = Error;
fn serialize(self: Pin<&mut Self>, item: &T) -> std::result::Result<Bytes, Self::Error> {
let mut bytes = BytesMut::new();
bincode::serialize_into((&mut bytes).writer(), item)?;
Ok(bytes.freeze())
}
}
impl<T> tokio_serde::Deserializer<T> for BincodeCodec
where
T: serde::de::DeserializeOwned,
{
type Error = Error;
fn deserialize(self: Pin<&mut Self>, buf: &BytesMut) -> std::result::Result<T, Self::Error> {
let ret = bincode::deserialize(buf)?;
Ok(ret)
}
}
/// Implementation of `Stream + Sink` that tokio-proto is expecting
///
/// This type is composed of a few layers:
///
/// * First there's `I`, the I/O object implementing `AsyncRead` and
/// `AsyncWrite`
/// * Next that's framed using the `length_delimited` module in tokio-io giving
/// us a `Sink` and `Stream` of `BytesMut`.
/// * Next that sink/stream is wrapped in `ReadBincode` which will cause the
/// `Stream` implementation to switch from `BytesMut` to `Request` by parsing
/// the bytes bincode.
/// * Finally that sink/stream is wrapped in `WriteBincode` which will cause the
/// `Sink` implementation to switch from `BytesMut` to `Response` meaning that
/// all `Response` types pushed in will be converted to `BytesMut` and pushed
/// below.
struct SccacheTransport<I: AsyncRead + AsyncWrite + Unpin> {
inner: Framed<
futures::stream::ErrInto<
futures::sink::SinkErrInto<
tokio_util::codec::Framed<I, LengthDelimitedCodec>,
Bytes,
Error,
>,
Error,
>,
Request,
Response,
BincodeCodec,
>,
}
impl<I: AsyncRead + AsyncWrite + Unpin> Stream for SccacheTransport<I> {
type Item = Result<Message<Request, Body<()>>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.inner)
.poll_next(cx)
.map(|r| r.map(|s| s.map(Message::WithoutBody)))
}
}
impl<I: AsyncRead + AsyncWrite + Unpin> Sink<Frame<Response, Response>> for SccacheTransport<I> {
type Error = Error;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
Pin::new(&mut self.inner).poll_ready(cx)
}
fn start_send(mut self: Pin<&mut Self>, item: Frame<Response, Response>) -> Result<()> {
match item {
Frame::Message { message } => Pin::new(&mut self.inner).start_send(message),
Frame::Body { chunk: Some(chunk) } => Pin::new(&mut self.inner).start_send(chunk),
Frame::Body { chunk: None } => Ok(()),
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
Pin::new(&mut self.inner).poll_close(cx)
}
}
struct ShutdownOrInactive {
rx: mpsc::Receiver<ServerMessage>,
timeout: Option<Pin<Box<Sleep>>>,
timeout_dur: Duration,
}
impl Future for ShutdownOrInactive {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
loop {
match Pin::new(&mut self.rx).poll_next(cx) {
Poll::Pending => break,
// Shutdown received!
Poll::Ready(Some(ServerMessage::Shutdown)) => return Poll::Ready(()),
Poll::Ready(Some(ServerMessage::Request)) => {
if self.timeout_dur != Duration::new(0, 0) {
self.timeout = Some(Box::pin(sleep(self.timeout_dur)));
}
}
// All services have shut down, in theory this isn't possible...
Poll::Ready(None) => return Poll::Ready(()),
}
}
match self.timeout {
None => Poll::Pending,
Some(ref mut timeout) => timeout.as_mut().poll(cx),
}
}
}
/// Helper future which tracks the `ActiveInfo` below. This future will resolve
/// once all instances of `ActiveInfo` have been dropped.
struct WaitUntilZero {
info: std::sync::Weak<Mutex<Info>>,
}
#[derive(Clone)]
struct ActiveInfo {
info: Arc<Mutex<Info>>,
}
struct Info {
waker: Option<Waker>,
}
impl Drop for Info {
fn drop(&mut self) {
if let Some(waker) = self.waker.as_ref() {
waker.wake_by_ref();
}
}
}
impl WaitUntilZero {
#[rustfmt::skip]
fn new() -> (WaitUntilZero, ActiveInfo) {
let info = Arc::new(Mutex::new(Info { waker: None }));
(WaitUntilZero { info: Arc::downgrade(&info) }, ActiveInfo { info })
}
}
impl std::future::Future for WaitUntilZero {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
match self.info.upgrade() {
None => std::task::Poll::Ready(()),
Some(arc) => {
let mut info = arc.lock().expect("we can't panic when holding lock");
info.waker = Some(cx.waker().clone());
std::task::Poll::Pending
}
}
}
}
#[test]
fn waits_until_zero() {
let (wait, _active) = WaitUntilZero::new();
assert_eq!(wait.now_or_never(), None);
let (wait, active) = WaitUntilZero::new();
let _active2 = active.clone();
drop(active);
assert_eq!(wait.now_or_never(), None);
let (wait, _) = WaitUntilZero::new();
assert_eq!(wait.now_or_never(), Some(()));
let (wait, active) = WaitUntilZero::new();
let active2 = active.clone();
drop(active);
drop(active2);
assert_eq!(wait.now_or_never(), Some(()));
}
|
// Copyright 2015 Nathan Sizemore <nathanrsizemore@gmail.com>
//
// This Source Code Form is subject to the terms of the
// Mozilla Public License, v. 2.0. If a copy of the MPL was not
// distributed with this file, You can obtain one at
// http://mozilla.org/MPL/2.0/.
use std::io::{Error, ErrorKind};
use std::ops::DerefMut;
use std::sync::{Arc, Mutex};
use std::{mem, thread};
use std::net::{TcpStream, TcpListener};
use std::os::unix::io::{RawFd, AsRawFd, IntoRawFd};
use std::collections::LinkedList;
use libc;
use errno::errno;
use epoll;
use epoll::util::*;
use epoll::EpollEvent;
use libc::{c_int, c_void};
use config::Config;
use openssl::ssl::{SslStream, SslContext};
use stats;
use types::*;
use resources::ResourcePool;
use ss::nonblocking::plain::Plain;
use ss::nonblocking::secure::Secure;
use ss::{Socket, Stream, SRecv, SSend, TcpOptions, SocketOptions};
// We need to be able to access our resource pool from several methods
static mut pool: *mut ResourcePool = 0 as *mut ResourcePool;
// Global SslContext
static mut ssl_context: *mut SslContext = 0 as *mut SslContext;
// When added to epoll, these will be the conditions of kernel notification:
//
// EPOLLET - Fd is in EdgeTriggered mode (notification on state changes)
// EPOLLIN - Data is available in kerndl buffer
const EVENTS: u32 = event_type::EPOLLET | event_type::EPOLLIN;
/// Starts the epoll wait and incoming connection listener threads.
pub fn begin(config: Config, handler: Box<EventHandler>) {
// Master socket list
let sockets = Arc::new(Mutex::new(LinkedList::<Stream>::new()));
// Resource pool
let mut rp = ResourcePool::new(config.workers);
unsafe {
pool = &mut rp;
}
// Wrap our event handler into something that can be safely shared
// between threads.
let e_handler = Handler(Box::into_raw(handler));
// Epoll instance
let result = epoll::create1(0);
if result.is_err() {
let err = result.unwrap_err();
error!("Unable to create epoll instance: {}", err);
panic!()
}
let epfd = result.unwrap();
// Epoll wait thread
let epfd2 = epfd.clone();
let streams2 = sockets.clone();
thread::Builder::new()
.name("Epoll Wait".to_string())
.spawn(move || {
event_loop(epfd2, streams2, e_handler);
})
.unwrap();
// New connection thread
let epfd3 = epfd.clone();
let streams3 = sockets.clone();
let prox = thread::Builder::new()
.name("TCP Incoming Listener".to_string())
.spawn(move || {
listen(config, epfd3, streams3);
})
.unwrap();
// Stay alive forever, or at least we hope
let _ = prox.join();
}
/// Incoming connection listening thread
fn listen(config: Config, epfd: RawFd, streams: StreamList) {
// Setup server and listening port
let listener_result = try_setup_tcp_listener(&config);
if listener_result.is_err() {
error!("Setting up server: {}", listener_result.unwrap_err());
return;
}
// If we're using SSL, setup our context reference
if config.ssl.is_some() {
setup_ssl_context(&config);
}
// Begin listening for new connections
let listener = listener_result.unwrap();
for accept_result in listener.incoming() {
match accept_result {
Ok(tcp_stream) => handle_new_connection(tcp_stream, &config, epfd, streams.clone()),
Err(e) => error!("Accepting connection: {}", e)
}
}
drop(listener);
}
fn try_setup_tcp_listener(config: &Config) -> Result<TcpListener, Error> {
let create_result = TcpListener::bind((&config.addr[..], config.port));
if create_result.is_err() {
return create_result;
}
let listener = create_result.unwrap();
let server_fd = listener.as_raw_fd();
// Set the SO_REUSEADDR so that restarts after crashes do not take 5min to unbind
// the initial port
unsafe {
let optval: c_int = 1;
let opt_result = libc::setsockopt(server_fd,
libc::SOL_SOCKET,
libc::SO_REUSEADDR,
&optval as *const _ as *const c_void,
mem::size_of::<c_int>() as u32);
if opt_result < 0 {
return Err(Error::from_raw_os_error(errno().0 as i32));
}
}
Ok(listener)
}
fn setup_ssl_context(config: &Config) {
unsafe {
ssl_context = Box::into_raw(Box::new(config.ssl.clone().unwrap()));
}
}
fn handle_new_connection(tcp_stream: TcpStream, config: &Config, epfd: RawFd, streams: StreamList) {
// Update our total opened file descriptors
stats::fd_opened();
// Create and configure a new socket
let mut socket = Socket::new(tcp_stream.into_raw_fd());
let result = setup_new_socket(&mut socket);
if result.is_err() {
close_fd(socket.as_raw_fd());
return;
}
// Setup our stream
let stream = match config.ssl {
Some(_) => {
let sock_fd = socket.as_raw_fd();
let ssl_result = unsafe { SslStream::accept(&(*ssl_context), socket) };
match ssl_result {
Ok(ssl_stream) => {
let secure_stream = Secure::new(ssl_stream);
Stream::new(Box::new(secure_stream))
}
Err(ssl_error) => {
error!("Creating SslStream: {}", ssl_error);
close_fd(sock_fd);
return;
}
}
}
None => {
let plain_text = Plain::new(socket);
Stream::new(Box::new(plain_text))
}
};
// Add stream to our server
let fd = stream.as_raw_fd();
add_stream_to_master_list(stream, streams.clone());
add_to_epoll(epfd, fd, streams.clone());
}
fn setup_new_socket(socket: &mut Socket) -> Result<(), ()> {
let result = socket.set_nonblocking();
if result.is_err() {
error!("Setting fd to nonblocking: {}", result.unwrap_err());
return Err(());
}
let result = socket.set_tcp_nodelay(true);
if result.is_err() {
error!("Setting tcp_nodelay: {}", result.unwrap_err());
return Err(());
}
let result = socket.set_tcp_keepalive(true);
if result.is_err() {
error!("Setting tcp_keepalive: {}", result.unwrap_err());
return Err(());
}
Ok(())
}
/// Event loop for handling all epoll events
fn event_loop(epfd: RawFd, streams: StreamList, handler: Handler) {
let mut events = Vec::<EpollEvent>::with_capacity(100);
unsafe {
events.set_len(100);
}
loop {
match epoll::wait(epfd, &mut events[..], -1) {
Ok(num_events) => {
for x in 0..num_events as usize {
handle_epoll_event(epfd, &events[x], streams.clone(), handler.clone());
}
}
Err(e) => {
error!("Error on epoll::wait(): {}", e);
panic!()
}
};
}
}
/// Finds the stream the epoll event is associated with and parses the event type
/// to hand off to specific handlers
fn handle_epoll_event(epfd: RawFd, event: &EpollEvent, streams: StreamList, handler: Handler) {
const READ_EVENT: u32 = event_type::EPOLLIN;
// Locate the stream the event was for
let mut stream;
{
// Mutex lock
// Find the stream the event was for
let mut guard = match streams.lock() {
Ok(guard) => guard,
Err(poisoned) => {
warn!("StreamList Mutex was poisoned, using anyway");
poisoned.into_inner()
}
};
let list = guard.deref_mut();
let mut found = false;
let mut index = 1usize;
for s in list.iter() {
if s.as_raw_fd() == event.data as RawFd {
found = true;
break;
}
index += 1;
}
if !found {
let fd = event.data as RawFd;
remove_fd_from_epoll(epfd, fd);
close_fd(fd);
return;
}
if index == 1 {
stream = list.pop_front().unwrap();
} else {
let mut split = list.split_off(index - 1);
stream = split.pop_front().unwrap();
list.append(&mut split);
}
} // Mutex unlock
if (event.events & READ_EVENT) > 0 {
let _ = handle_read_event(epfd, &mut stream, handler).map(|_| {
add_stream_to_master_list(stream, streams.clone());
});
} else {
let fd = stream.as_raw_fd();
remove_fd_from_epoll(epfd, fd);
close_fd(fd);
let stream_fd = stream.as_raw_fd();
unsafe {
(*pool).run(move || {
let Handler(ptr) = handler;
(*ptr).on_stream_closed(stream_fd);
});
}
}
}
/// Reads all available data on the stream.
///
/// If a complete message(s) is available, each message will be routed through the
/// resource pool.
///
/// If an error occurs during the read, the stream is dropped from the server.
fn handle_read_event(epfd: RawFd, stream: &mut Stream, handler: Handler) -> Result<(), ()> {
match stream.recv() {
Ok(_) => {
let mut rx_queue = stream.drain_rx_queue();
for payload in rx_queue.iter_mut() {
// Check if this is a request for stats
if payload.len() == 6 && payload[0] == 0x04 && payload[1] == 0x04 {
let u8ptr: *const u8 = &payload[2] as *const _;
let f32ptr: *const f32 = u8ptr as *const _;
let sec = unsafe { *f32ptr };
let stream_cpy = stream.clone();
unsafe {
(*pool).run(move || {
let mut s = stream_cpy.clone();
let result = stats::as_serialized_buffer(sec);
if result.is_ok() {
let _ = s.send(&result.unwrap()[..]);
}
});
}
return Ok(());
}
// TODO - Refactor once better function passing traits are available in stable.
let handler_cpy = handler.clone();
let stream_cpy = stream.clone();
let payload_cpy = payload.clone();
unsafe {
(*pool).run(move || {
let Handler(ptr) = handler_cpy;
(*ptr).on_data_received(stream_cpy.clone(), payload_cpy.clone());
});
}
}
Ok(())
}
Err(e) => {
remove_fd_from_epoll(epfd, stream.as_raw_fd());
close_fd(stream.as_raw_fd());
let stream_fd = stream.as_raw_fd();
unsafe {
(*pool).run(move || {
let Handler(ptr) = handler;
(*ptr).on_stream_closed(stream_fd.clone());
});
}
Err(())
}
}
}
/// Inserts the stream back into the master list of streams
fn add_stream_to_master_list(stream: Stream, streams: StreamList) {
let mut guard = match streams.lock() {
Ok(guard) => guard,
Err(poisoned) => {
warn!("StreamList Mutex failed, using anyway...");
poisoned.into_inner()
}
};
let stream_list = guard.deref_mut();
stream_list.push_back(stream);
stats::conn_recv();
}
/// Adds a new fd to the epoll instance
fn add_to_epoll(epfd: RawFd, fd: RawFd, streams: StreamList) {
let result = epoll::ctl(epfd,
ctl_op::ADD,
fd,
&mut EpollEvent {
data: fd as u64,
events: EVENTS,
});
if result.is_err() {
let e = result.unwrap_err();
error!("poll::CtrlError during add: {}", e);
remove_fd_from_list(fd, streams.clone());
close_fd(fd);
}
}
/// Removes a fd from the epoll instance
fn remove_fd_from_epoll(epfd: RawFd, fd: RawFd) {
// In kernel versions before 2.6.9, the EPOLL_CTL_DEL operation required
// a non-null pointer in event, even though this argument is ignored.
// Since Linux 2.6.9, event can be specified as NULL when using
// EPOLL_CTL_DEL. We'll be as backwards compatible as possible.
let _ = epoll::ctl(epfd,
ctl_op::DEL,
fd,
&mut EpollEvent {
data: 0 as u64,
events: 0 as u32,
})
.map_err(|e| warn!("Epoll CtrlError during del: {}", e));
}
/// Removes stream with fd from master list
fn remove_fd_from_list(fd: RawFd, streams: StreamList) {
let mut guard = match streams.lock() {
Ok(guard) => guard,
Err(poisoned) => {
warn!("StreamList Mutex was poisoned, using anyway");
poisoned.into_inner()
}
};
let list = guard.deref_mut();
let mut found = false;
let mut index = 1usize;
for s in list.iter() {
if s.as_raw_fd() == fd {
found = true;
break;
}
index += 1;
}
if !found {
trace!("fd: {} not found in list", fd);
return;
}
if index == 1 {
list.pop_front();
} else {
let mut split = list.split_off(index - 1);
split.pop_front();
list.append(&mut split);
}
stats::conn_lost();
}
/// Closes a fd with the kernel
fn close_fd(fd: RawFd) {
unsafe {
let result = libc::close(fd);
if result < 0 {
error!("Error closing fd: {}",
Error::from_raw_os_error(result as i32));
return;
}
}
stats::fd_closed();
}
Warning cleanup
// Copyright 2015 Nathan Sizemore <nathanrsizemore@gmail.com>
//
// This Source Code Form is subject to the terms of the
// Mozilla Public License, v. 2.0. If a copy of the MPL was not
// distributed with this file, You can obtain one at
// http://mozilla.org/MPL/2.0/.
use std::io::Error;
use std::ops::DerefMut;
use std::sync::{Arc, Mutex};
use std::{mem, thread};
use std::net::{TcpStream, TcpListener};
use std::os::unix::io::{RawFd, AsRawFd, IntoRawFd};
use std::collections::LinkedList;
use libc;
use errno::errno;
use epoll;
use epoll::util::*;
use epoll::EpollEvent;
use libc::{c_int, c_void};
use config::Config;
use openssl::ssl::{SslStream, SslContext};
use stats;
use types::*;
use resources::ResourcePool;
use ss::nonblocking::plain::Plain;
use ss::nonblocking::secure::Secure;
use ss::{Socket, Stream, SRecv, SSend, TcpOptions, SocketOptions};
// We need to be able to access our resource pool from several methods
static mut pool: *mut ResourcePool = 0 as *mut ResourcePool;
// Global SslContext
static mut ssl_context: *mut SslContext = 0 as *mut SslContext;
// When added to epoll, these will be the conditions of kernel notification:
//
// EPOLLET - Fd is in EdgeTriggered mode (notification on state changes)
// EPOLLIN - Data is available in kerndl buffer
const EVENTS: u32 = event_type::EPOLLET | event_type::EPOLLIN;
/// Starts the epoll wait and incoming connection listener threads.
pub fn begin(config: Config, handler: Box<EventHandler>) {
// Master socket list
let sockets = Arc::new(Mutex::new(LinkedList::<Stream>::new()));
// Resource pool
let mut rp = ResourcePool::new(config.workers);
unsafe {
pool = &mut rp;
}
// Wrap our event handler into something that can be safely shared
// between threads.
let e_handler = Handler(Box::into_raw(handler));
// Epoll instance
let result = epoll::create1(0);
if result.is_err() {
let err = result.unwrap_err();
error!("Unable to create epoll instance: {}", err);
panic!()
}
let epfd = result.unwrap();
// Epoll wait thread
let epfd2 = epfd.clone();
let streams2 = sockets.clone();
thread::Builder::new()
.name("Epoll Wait".to_string())
.spawn(move || {
event_loop(epfd2, streams2, e_handler);
})
.unwrap();
// New connection thread
let epfd3 = epfd.clone();
let streams3 = sockets.clone();
let prox = thread::Builder::new()
.name("TCP Incoming Listener".to_string())
.spawn(move || {
listen(config, epfd3, streams3);
})
.unwrap();
// Stay alive forever, or at least we hope
let _ = prox.join();
}
/// Incoming connection listening thread
fn listen(config: Config, epfd: RawFd, streams: StreamList) {
// Setup server and listening port
let listener_result = try_setup_tcp_listener(&config);
if listener_result.is_err() {
error!("Setting up server: {}", listener_result.unwrap_err());
return;
}
// If we're using SSL, setup our context reference
if config.ssl.is_some() {
setup_ssl_context(&config);
}
// Begin listening for new connections
let listener = listener_result.unwrap();
for accept_result in listener.incoming() {
match accept_result {
Ok(tcp_stream) => handle_new_connection(tcp_stream, &config, epfd, streams.clone()),
Err(e) => error!("Accepting connection: {}", e)
}
}
drop(listener);
}
fn try_setup_tcp_listener(config: &Config) -> Result<TcpListener, Error> {
let create_result = TcpListener::bind((&config.addr[..], config.port));
if create_result.is_err() {
return create_result;
}
let listener = create_result.unwrap();
let server_fd = listener.as_raw_fd();
// Set the SO_REUSEADDR so that restarts after crashes do not take 5min to unbind
// the initial port
unsafe {
let optval: c_int = 1;
let opt_result = libc::setsockopt(server_fd,
libc::SOL_SOCKET,
libc::SO_REUSEADDR,
&optval as *const _ as *const c_void,
mem::size_of::<c_int>() as u32);
if opt_result < 0 {
return Err(Error::from_raw_os_error(errno().0 as i32));
}
}
Ok(listener)
}
fn setup_ssl_context(config: &Config) {
unsafe {
ssl_context = Box::into_raw(Box::new(config.ssl.clone().unwrap()));
}
}
fn handle_new_connection(tcp_stream: TcpStream, config: &Config, epfd: RawFd, streams: StreamList) {
// Update our total opened file descriptors
stats::fd_opened();
// Create and configure a new socket
let mut socket = Socket::new(tcp_stream.into_raw_fd());
let result = setup_new_socket(&mut socket);
if result.is_err() {
close_fd(socket.as_raw_fd());
return;
}
// Setup our stream
let stream = match config.ssl {
Some(_) => {
let sock_fd = socket.as_raw_fd();
let ssl_result = unsafe { SslStream::accept(&(*ssl_context), socket) };
match ssl_result {
Ok(ssl_stream) => {
let secure_stream = Secure::new(ssl_stream);
Stream::new(Box::new(secure_stream))
}
Err(ssl_error) => {
error!("Creating SslStream: {}", ssl_error);
close_fd(sock_fd);
return;
}
}
}
None => {
let plain_text = Plain::new(socket);
Stream::new(Box::new(plain_text))
}
};
// Add stream to our server
let fd = stream.as_raw_fd();
add_stream_to_master_list(stream, streams.clone());
add_to_epoll(epfd, fd, streams.clone());
}
fn setup_new_socket(socket: &mut Socket) -> Result<(), ()> {
let result = socket.set_nonblocking();
if result.is_err() {
error!("Setting fd to nonblocking: {}", result.unwrap_err());
return Err(());
}
let result = socket.set_tcp_nodelay(true);
if result.is_err() {
error!("Setting tcp_nodelay: {}", result.unwrap_err());
return Err(());
}
let result = socket.set_tcp_keepalive(true);
if result.is_err() {
error!("Setting tcp_keepalive: {}", result.unwrap_err());
return Err(());
}
Ok(())
}
/// Event loop for handling all epoll events
fn event_loop(epfd: RawFd, streams: StreamList, handler: Handler) {
let mut events = Vec::<EpollEvent>::with_capacity(100);
unsafe {
events.set_len(100);
}
loop {
match epoll::wait(epfd, &mut events[..], -1) {
Ok(num_events) => {
for x in 0..num_events as usize {
handle_epoll_event(epfd, &events[x], streams.clone(), handler.clone());
}
}
Err(e) => {
error!("Error on epoll::wait(): {}", e);
panic!()
}
};
}
}
/// Finds the stream the epoll event is associated with and parses the event type
/// to hand off to specific handlers
fn handle_epoll_event(epfd: RawFd, event: &EpollEvent, streams: StreamList, handler: Handler) {
const READ_EVENT: u32 = event_type::EPOLLIN;
// Locate the stream the event was for
let mut stream;
{
// Mutex lock
// Find the stream the event was for
let mut guard = match streams.lock() {
Ok(guard) => guard,
Err(poisoned) => {
warn!("StreamList Mutex was poisoned, using anyway");
poisoned.into_inner()
}
};
let list = guard.deref_mut();
let mut found = false;
let mut index = 1usize;
for s in list.iter() {
if s.as_raw_fd() == event.data as RawFd {
found = true;
break;
}
index += 1;
}
if !found {
let fd = event.data as RawFd;
remove_fd_from_epoll(epfd, fd);
close_fd(fd);
return;
}
if index == 1 {
stream = list.pop_front().unwrap();
} else {
let mut split = list.split_off(index - 1);
stream = split.pop_front().unwrap();
list.append(&mut split);
}
} // Mutex unlock
if (event.events & READ_EVENT) > 0 {
let _ = handle_read_event(epfd, &mut stream, handler).map(|_| {
add_stream_to_master_list(stream, streams.clone());
});
} else {
let fd = stream.as_raw_fd();
remove_fd_from_epoll(epfd, fd);
close_fd(fd);
let stream_fd = stream.as_raw_fd();
unsafe {
(*pool).run(move || {
let Handler(ptr) = handler;
(*ptr).on_stream_closed(stream_fd);
});
}
}
}
/// Reads all available data on the stream.
///
/// If a complete message(s) is available, each message will be routed through the
/// resource pool.
///
/// If an error occurs during the read, the stream is dropped from the server.
fn handle_read_event(epfd: RawFd, stream: &mut Stream, handler: Handler) -> Result<(), ()> {
match stream.recv() {
Ok(_) => {
let mut rx_queue = stream.drain_rx_queue();
for payload in rx_queue.iter_mut() {
// Check if this is a request for stats
if payload.len() == 6 && payload[0] == 0x04 && payload[1] == 0x04 {
let u8ptr: *const u8 = &payload[2] as *const _;
let f32ptr: *const f32 = u8ptr as *const _;
let sec = unsafe { *f32ptr };
let stream_cpy = stream.clone();
unsafe {
(*pool).run(move || {
let mut s = stream_cpy.clone();
let result = stats::as_serialized_buffer(sec);
if result.is_ok() {
let _ = s.send(&result.unwrap()[..]);
}
});
}
return Ok(());
}
// TODO - Refactor once better function passing traits are available in stable.
let handler_cpy = handler.clone();
let stream_cpy = stream.clone();
let payload_cpy = payload.clone();
unsafe {
(*pool).run(move || {
let Handler(ptr) = handler_cpy;
(*ptr).on_data_received(stream_cpy.clone(), payload_cpy.clone());
});
}
}
Ok(())
}
Err(_) => {
remove_fd_from_epoll(epfd, stream.as_raw_fd());
close_fd(stream.as_raw_fd());
let stream_fd = stream.as_raw_fd();
unsafe {
(*pool).run(move || {
let Handler(ptr) = handler;
(*ptr).on_stream_closed(stream_fd.clone());
});
}
Err(())
}
}
}
/// Inserts the stream back into the master list of streams
fn add_stream_to_master_list(stream: Stream, streams: StreamList) {
let mut guard = match streams.lock() {
Ok(guard) => guard,
Err(poisoned) => {
warn!("StreamList Mutex failed, using anyway...");
poisoned.into_inner()
}
};
let stream_list = guard.deref_mut();
stream_list.push_back(stream);
stats::conn_recv();
}
/// Adds a new fd to the epoll instance
fn add_to_epoll(epfd: RawFd, fd: RawFd, streams: StreamList) {
let result = epoll::ctl(epfd,
ctl_op::ADD,
fd,
&mut EpollEvent {
data: fd as u64,
events: EVENTS,
});
if result.is_err() {
let e = result.unwrap_err();
error!("poll::CtrlError during add: {}", e);
remove_fd_from_list(fd, streams.clone());
close_fd(fd);
}
}
/// Removes a fd from the epoll instance
fn remove_fd_from_epoll(epfd: RawFd, fd: RawFd) {
// In kernel versions before 2.6.9, the EPOLL_CTL_DEL operation required
// a non-null pointer in event, even though this argument is ignored.
// Since Linux 2.6.9, event can be specified as NULL when using
// EPOLL_CTL_DEL. We'll be as backwards compatible as possible.
let _ = epoll::ctl(epfd,
ctl_op::DEL,
fd,
&mut EpollEvent {
data: 0 as u64,
events: 0 as u32,
})
.map_err(|e| warn!("Epoll CtrlError during del: {}", e));
}
/// Removes stream with fd from master list
fn remove_fd_from_list(fd: RawFd, streams: StreamList) {
let mut guard = match streams.lock() {
Ok(guard) => guard,
Err(poisoned) => {
warn!("StreamList Mutex was poisoned, using anyway");
poisoned.into_inner()
}
};
let list = guard.deref_mut();
let mut found = false;
let mut index = 1usize;
for s in list.iter() {
if s.as_raw_fd() == fd {
found = true;
break;
}
index += 1;
}
if !found {
trace!("fd: {} not found in list", fd);
return;
}
if index == 1 {
list.pop_front();
} else {
let mut split = list.split_off(index - 1);
split.pop_front();
list.append(&mut split);
}
stats::conn_lost();
}
/// Closes a fd with the kernel
fn close_fd(fd: RawFd) {
unsafe {
let result = libc::close(fd);
if result < 0 {
error!("Error closing fd: {}",
Error::from_raw_os_error(result as i32));
return;
}
}
stats::fd_closed();
}
|
// Copyright 2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
//! `IMPL` Low level signal support.
use libc::{c_void, c_uint, c_ulong};
use gobject_ffi::{self, GCallback};
use ffi::{gboolean, GQuark};
use object::{IsA, Object};
use source::CallbackGuard;
use translate::{ToGlib, ToGlibPtr};
/// Whether to propagate the signal to the default handler.
///
/// Don't inhibit default handlers without a reason, they're usually helpful.
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct Inhibit(pub bool);
#[doc(hidden)]
impl ToGlib for Inhibit {
type GlibType = gboolean;
#[inline]
fn to_glib(&self) -> gboolean {
self.0.to_glib()
}
}
pub unsafe fn connect(receiver: *mut gobject_ffi::GObject, signal_name: &str, trampoline: GCallback,
closure: *mut Box<Fn() + 'static>) -> u64 {
let handle = gobject_ffi::g_signal_connect_data(receiver, signal_name.to_glib_none().0,
trampoline, closure as *mut _, Some(destroy_closure),
gobject_ffi::GConnectFlags::empty()) as u64;
assert!(handle > 0);
handle
}
pub fn signal_handler_block<T: IsA<Object>>(instance: &T, handler_id: u64) {
unsafe {
gobject_ffi::g_signal_handler_block(instance.to_glib_none().0, handler_id as c_ulong);
}
}
pub fn signal_handler_unblock<T: IsA<Object>>(instance: &T, handler_id: u64) {
unsafe {
gobject_ffi::g_signal_handler_unblock(instance.to_glib_none().0, handler_id as c_ulong);
}
}
pub fn signal_stop_emission<T: IsA<Object>>(instance: &T, signal_id: u32, detail: GQuark) {
unsafe {
gobject_ffi::g_signal_stop_emission(instance.to_glib_none().0, signal_id as c_uint, detail);
}
}
pub fn signal_stop_emission_by_name<T: IsA<Object>>(instance: &T, signal_name: &str) {
unsafe {
gobject_ffi::g_signal_stop_emission_by_name(instance.to_glib_none().0, signal_name.to_glib_none().0);
}
}
unsafe extern "C" fn destroy_closure(ptr: *mut c_void, _: *mut gobject_ffi::GClosure) {
let _guard = CallbackGuard::new();
// destroy
Box::<Box<Fn()>>::from_raw(ptr as *mut _);
}
Add signal::signal_handler_disconnect()
// Copyright 2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
//! `IMPL` Low level signal support.
use libc::{c_void, c_uint, c_ulong};
use gobject_ffi::{self, GCallback};
use ffi::{gboolean, GQuark};
use object::{IsA, Object};
use source::CallbackGuard;
use translate::{ToGlib, ToGlibPtr};
/// Whether to propagate the signal to the default handler.
///
/// Don't inhibit default handlers without a reason, they're usually helpful.
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct Inhibit(pub bool);
#[doc(hidden)]
impl ToGlib for Inhibit {
type GlibType = gboolean;
#[inline]
fn to_glib(&self) -> gboolean {
self.0.to_glib()
}
}
pub unsafe fn connect(receiver: *mut gobject_ffi::GObject, signal_name: &str, trampoline: GCallback,
closure: *mut Box<Fn() + 'static>) -> u64 {
let handle = gobject_ffi::g_signal_connect_data(receiver, signal_name.to_glib_none().0,
trampoline, closure as *mut _, Some(destroy_closure),
gobject_ffi::GConnectFlags::empty()) as u64;
assert!(handle > 0);
handle
}
pub fn signal_handler_block<T: IsA<Object>>(instance: &T, handler_id: u64) {
unsafe {
gobject_ffi::g_signal_handler_block(instance.to_glib_none().0, handler_id as c_ulong);
}
}
pub fn signal_handler_unblock<T: IsA<Object>>(instance: &T, handler_id: u64) {
unsafe {
gobject_ffi::g_signal_handler_unblock(instance.to_glib_none().0, handler_id as c_ulong);
}
}
pub fn signal_handler_disconnect<T: IsA<Object>>(instance: &T, handler_id: u64) {
unsafe {
gobject_ffi::g_signal_handler_disconnect(instance.to_glib_none().0, handler_id as c_ulong);
}
}
pub fn signal_stop_emission<T: IsA<Object>>(instance: &T, signal_id: u32, detail: GQuark) {
unsafe {
gobject_ffi::g_signal_stop_emission(instance.to_glib_none().0, signal_id as c_uint, detail);
}
}
pub fn signal_stop_emission_by_name<T: IsA<Object>>(instance: &T, signal_name: &str) {
unsafe {
gobject_ffi::g_signal_stop_emission_by_name(instance.to_glib_none().0, signal_name.to_glib_none().0);
}
}
unsafe extern "C" fn destroy_closure(ptr: *mut c_void, _: *mut gobject_ffi::GClosure) {
let _guard = CallbackGuard::new();
// destroy
Box::<Box<Fn()>>::from_raw(ptr as *mut _);
}
|
use std::io::{IoResult, IoError, IoErrorKind, TcpStream, ConnectionRefused,
ConnectionFailed, OtherIoError};
enum AuthMethod<'s> {
NoAuth,
UPass(&'s str, &'s str)
}
pub struct Socks5<'a> {
socks_host: &'a str,
socks_port: u16,
socks_auth: AuthMethod<'a>,
}
impl<'a> Socks5<'a> {
pub fn new(host: &'a str, port: u16) -> Socks5 {
Socks5 {
socks_host: host,
socks_port: port,
socks_auth: NoAuth
}
}
pub fn login(&mut self, uname: &'a str, passwd: &'a str) {
self.socks_auth = UPass(uname, passwd);
}
pub fn connect(&mut self, host: &str, port: u16) -> IoResult<TcpStream> {
let mut stream = try!(TcpStream::connect(self.socks_host, self.socks_port));
try!(stream.write([0x05u8]));
match self.socks_auth {
NoAuth => { try!(stream.write([0x01u8, 0x00])); },
UPass(..) => { try!(stream.write([0x01u8, 0x02])); }
}
if try!(stream.read_u8()) != 0x05 {
return io_err(OtherIoError, "Unexpected SOCKS version number");
}
match try!(stream.read_u8()) {
0x00 => {
match self.socks_auth {
NoAuth => { /* Continue */ },
_ => return io_err(OtherIoError,
"Wrong authentication method from server")
}
}
0x02 => {
match self.socks_auth {
UPass(uname, passwd) => {
try!(stream.write([0x01u8, uname.len() as u8]));
try!(stream.write_str(uname));
try!(stream.write([passwd.len() as u8]));
try!(stream.write_str(passwd));
if try!(stream.read_u8()) != 0x01 {
return io_err(OtherIoError,
"Invalid authentication version");
}
if try!(stream.read_u8()) != 0x00 {
return io_err(ConnectionRefused, "Authentication failed");
}
}
_ => { return io_err(OtherIoError,
"Wrong authentication method from server");
}
}
}
0xFF => { return io_err(ConnectionRefused,
"Server refused authentication methods"); }
_ => { return io_err(OtherIoError,
"Wrong authentication method from server"); }
}
try!(stream.write([0x05u8, 0x01, 0x00, 0x03, host.len() as u8]));
try!(stream.write_str(host));
try!(stream.write_be_u16(port));
if try!(stream.read_u8()) != 0x05 {
return io_err(OtherIoError, "Invalid SOCKS version number");
}
match try!(stream.read_u8()) {
0x00 => {
let _null = try!(stream.read_u8());
let addrtype = try!(stream.read_u8());
if addrtype != 0x03 {
return io_err(OtherIoError, "Unimplemented");
}
let _addrlen = try!(stream.read_u8());
let mut _addr: Vec<u8> = vec![];
for i in range(0, _addrlen) {
_addr.push(try!(stream.read_u8()));
}
let _port = try!(stream.read_be_u16());
Ok(stream)
}
0x01 => io_err(OtherIoError, "General failure"),
0x02 => io_err(OtherIoError, "Connection not allowed by ruleset"),
0x03 => io_err(ConnectionFailed, "Network unreachable"),
0x04 => io_err(ConnectionFailed, "Host unreachable"),
0x05 => io_err(ConnectionRefused, "Connection refused by destination"),
0x06 => io_err(ConnectionFailed, "TTL expired"),
0x07 => io_err(OtherIoError, "Protocol Error"),
0x08 => io_err(OtherIoError, "Address type not supported"),
_ => io_err(OtherIoError, "Unknown error")
}
}
}
fn io_err<T>(kind: IoErrorKind, desc: &'static str) -> IoResult<T> {
Err(IoError { kind: kind, desc: desc, detail: None })
}
Handle different address types from server.
use std::io::{IoResult, IoError, IoErrorKind, TcpStream, ConnectionRefused,
ConnectionFailed, OtherIoError};
enum AuthMethod<'s> {
NoAuth,
UPass(&'s str, &'s str)
}
pub struct Socks5<'a> {
socks_host: &'a str,
socks_port: u16,
socks_auth: AuthMethod<'a>,
}
impl<'a> Socks5<'a> {
pub fn new(host: &'a str, port: u16) -> Socks5 {
Socks5 {
socks_host: host,
socks_port: port,
socks_auth: NoAuth
}
}
pub fn login(&mut self, uname: &'a str, passwd: &'a str) {
self.socks_auth = UPass(uname, passwd);
}
pub fn connect(&mut self, host: &str, port: u16) -> IoResult<TcpStream> {
let mut stream = try!(TcpStream::connect(self.socks_host, self.socks_port));
try!(stream.write([0x05u8]));
match self.socks_auth {
NoAuth => { try!(stream.write([0x01u8, 0x00])); },
UPass(..) => { try!(stream.write([0x01u8, 0x02])); }
}
if try!(stream.read_u8()) != 0x05 {
return io_err(OtherIoError, "Unexpected SOCKS version number");
}
match try!(stream.read_u8()) {
0x00 => {
match self.socks_auth {
NoAuth => { /* Continue */ },
_ => return io_err(OtherIoError,
"Wrong authentication method from server")
}
}
0x02 => {
match self.socks_auth {
UPass(uname, passwd) => {
try!(stream.write([0x01u8, uname.len() as u8]));
try!(stream.write_str(uname));
try!(stream.write([passwd.len() as u8]));
try!(stream.write_str(passwd));
if try!(stream.read_u8()) != 0x01 {
return io_err(OtherIoError,
"Invalid authentication version");
}
if try!(stream.read_u8()) != 0x00 {
return io_err(ConnectionRefused, "Authentication failed");
}
}
_ => { return io_err(OtherIoError,
"Wrong authentication method from server");
}
}
}
0xFF => { return io_err(ConnectionRefused,
"Server refused authentication methods"); }
_ => { return io_err(OtherIoError,
"Wrong authentication method from server"); }
}
try!(stream.write([0x05u8, 0x01, 0x00, 0x03, host.len() as u8]));
try!(stream.write_str(host));
try!(stream.write_be_u16(port));
if try!(stream.read_u8()) != 0x05 {
return io_err(OtherIoError, "Invalid SOCKS version number");
}
match try!(stream.read_u8()) {
0x00 => {
let _null = try!(stream.read_u8());
match try!(stream.read_u8()) {
0x01 => {
let mut _ipv4 = [0,.. 4];
try!(stream.read_at_least(4, &mut _ipv4));
}
0x03 => {
let addrlen = try!(stream.read_u8());
let _domain = try!(stream.read_exact(addrlen as uint));
}
0x04 => {
let mut _ipv6 = [0,.. 16];
try!(stream.read_at_least(16, &mut _ipv6));
}
_ => return io_err(OtherIoError, "Invalid address type"),
}
let _port = try!(stream.read_be_u16());
Ok(stream)
}
0x01 => io_err(OtherIoError, "General failure"),
0x02 => io_err(OtherIoError, "Connection not allowed by ruleset"),
0x03 => io_err(ConnectionFailed, "Network unreachable"),
0x04 => io_err(ConnectionFailed, "Host unreachable"),
0x05 => io_err(ConnectionRefused, "Connection refused by destination"),
0x06 => io_err(ConnectionFailed, "TTL expired"),
0x07 => io_err(OtherIoError, "Protocol Error"),
0x08 => io_err(OtherIoError, "Address type not supported"),
_ => io_err(OtherIoError, "Unknown error")
}
}
}
fn io_err<T>(kind: IoErrorKind, desc: &'static str) -> IoResult<T> {
Err(IoError { kind: kind, desc: desc, detail: None })
}
|
use std::os::raw::{c_int, c_double, c_void};
use std::{ptr, slice};
use std::ffi::CString;
use ffi;
use base::*;
macro_rules! write_stream {
($name:ident, $t:ty) => (
/// Expects a vector of `channel_count` channel `buffers`
/// containing audio data that is written into
/// the buffer of the output device.
/// NOTE: This is subject of change.
/// Passing an iterator gives more flexibility and should be more performant
/// in most cases.
///
/// Returns the number of actually written frames.
/// If the provided buffers contain less frames
/// than `min_frame_count`, or less buffers
/// as `channel_count` are provided,
/// then a `ffi::enums::SioError::Invalid` is returned.
pub fn $name(&self, min_frame_count: u32, buffers: &Vec<Vec<$t>>) -> SioResult<u32> {
let channel_count = self.layout().channel_count();
// check if buffer contains frames for all channels
if buffers.len() < channel_count as usize {
return Err(ffi::enums::SioError::Invalid);
}
// check if there are at least min_frame_count frames for all channels
if !buffers.iter().map(|c| c.len()).all(|l| l >= min_frame_count as usize) {
return Err(ffi::enums::SioError::Invalid);
}
// assuming that every channel buffer has the same length
let frame_count = buffers[0].len() as c_int;
let mut raw_areas: *mut ffi::SoundIoChannelArea = ptr::null_mut();
let actual_frame_count = try!(self.begin_write(&mut raw_areas, &frame_count));
let areas = unsafe { slice::from_raw_parts_mut(raw_areas, channel_count as usize) };
for idx in 0..actual_frame_count as usize {
for channel in 0..channel_count as usize {
let area = areas[channel];
let addr = (area.ptr as usize + area.step as usize * idx) as *mut $t;
unsafe { *addr = buffers[channel][idx] };
}
}
self.end_write().map_or(Ok(actual_frame_count), |err| Err(err))
}
)
}
extern "C" fn write_wrapper(raw_out: *mut ffi::SoundIoOutStream, min: c_int, max: c_int) {
let mut out = OutStream::new(raw_out);
out.marker = true;
let callbacks_ptr = unsafe { (*out.stream).userdata as *mut Box<OutStreamCallbacks> };
let callbacks: &mut Box<OutStreamCallbacks> = unsafe { &mut *callbacks_ptr };
callbacks.write.as_mut().map(|f| f(out, min as u32, max as u32));
}
extern "C" fn underflow_wrapper(raw_out: *mut ffi::SoundIoOutStream) {
let mut out = OutStream::new(raw_out);
out.marker = true;
let callbacks_ptr = unsafe { (*out.stream).userdata as *mut Box<OutStreamCallbacks> };
let callbacks: &mut Box<OutStreamCallbacks> = unsafe { &mut *callbacks_ptr };
callbacks.underflow.as_mut().map(|f| f(out));
}
extern "C" fn error_wrapper(raw_out: *mut ffi::SoundIoOutStream, error: ffi::enums::SioError) {
let mut out = OutStream::new(raw_out);
out.marker = true;
let callbacks_ptr = unsafe { (*out.stream).userdata as *mut Box<OutStreamCallbacks> };
let callbacks: &mut Box<OutStreamCallbacks> = unsafe { &mut *callbacks_ptr };
callbacks.error.as_mut().map(|f| f(out, error));
}
struct OutStreamCallbacks<'a> {
write: Option<Box<FnMut(OutStream, u32, u32) + 'a>>,
underflow: Option<Box<FnMut(OutStream) + 'a>>,
error: Option<Box<FnMut(OutStream, ffi::enums::SioError) + 'a>>,
}
impl<'a> Default for OutStreamCallbacks<'a> {
fn default() -> Self {
OutStreamCallbacks {
write: None,
underflow: None,
error: None,
}
}
}
/// An audio output stream, returned from a `Device`.
pub struct OutStream<'a> {
stream: *mut ffi::SoundIoOutStream,
callbacks: Box<OutStreamCallbacks<'a>>,
name: CString,
marker: bool,
}
impl<'a> OutStream<'a> {
pub fn new(raw_stream: *mut ffi::SoundIoOutStream) -> Self {
let callbacks = Box::new(OutStreamCallbacks::default());
OutStream {
stream: raw_stream,
callbacks: callbacks,
name: CString::new("outstream").unwrap(),
marker: false,
}
}
/// Change settings (e.g. `set_format`) **before** calling `open`.
/// After you call this function, `OutStream::software_latency` is set to
/// the correct value.
///
/// The next thing to do is call `start`.
/// If this function returns an error, the outstream is in an invalid state and
/// you must call `destroy` on it.
///
/// Possible errors:
///
/// - `ffi::enums::SioErrorInvalid`
/// - device is not an *output* device
/// - format is not valid
/// - `channel_count` is greater than 24
/// - `ffi::enums::SioError::NoMem`
/// - `ffi::enums::SioError::OpeningDevice`
/// - `ffi::enums::SioError::BackendDisconnected`
/// - `ffi::enums::SioError::SystemResources`
/// - `ffi::enums::SioError::NoSuchClient` - when JACK returns `JackNoSuchClient`
/// - `ffi::enums::SioErrorOpeningDevice`
/// - `ffi::enums::SioErrorIncompatibleBackend` - `OutStream::channel_count` is
/// greater than the number of channels the backend can handle.
/// - `ffi::enums::SioErrorIncompatibleDevice` - stream parameters requested are not
/// compatible with the chosen device.
pub fn open(&self) -> SioResult<()> {
match unsafe { ffi::soundio_outstream_open(self.stream) } {
ffi::enums::SioError::None => Ok(()),
err @ _ => Err(err),
}
}
/// After you call this function, the registered `write_callback` will be called.
///
/// This function might directly call the `write_callback`.
///
/// Possible errors:
///
/// - `ffi::enums::SioError::Streaming`
/// - `ffi::enums::SioError::NoMem`
/// - `ffi::enums::SioError::SystemResources`
/// - `ffi::enums::SioError::BackendDisconnected`
pub fn start(&self) -> SioResult<()> {
match unsafe { ffi::soundio_outstream_start(self.stream) } {
ffi::enums::SioError::None => Ok(()),
err @ _ => Err(err),
}
}
/// Registers the given callback as `write_callback` that is called as soon as you call `start`.
///
/// In this callback, you call `OutStream::write_stream_FMT` where `FMT` is one of the supported
/// format, `u16`, `f32` etc.
///
/// `frame_count_max` will always be greater than 0. Note that you
/// should write as many frames as you can; `frame_count_min` might be 0 and
/// you can still get a buffer underflow if you always write
/// `frame_count_min` frames.
///
/// For Dummy, ALSA, and PulseAudio, `frame_count_min` will be 0. For JACK
/// and CoreAudio `frame_count_min` will be equal to `frame_count_max`.
///
/// The code in the supplied function must be suitable for real-time
/// execution. That means that it cannot call functions that might block
/// for a long time. This includes all I/O functions (disk, TTY, network),
/// malloc, free, printf, pthread_mutex_lock, sleep, wait, poll, select,
/// pthread_join, pthread_cond_wait, etc.
pub fn register_write_callback<W>(&mut self, callback: W)
where W: FnMut(OutStream, u32, u32) + 'a
{
// stored box reference to callback closure
self.callbacks.write = Some(Box::new(callback));
unsafe {
// register wrapper for write_callback
(*self.stream).write_callback = Some(write_wrapper);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
/// Registers the given callback as `underflow_callback`.
/// This *optional* callback happens when the sound device runs out of buffered audio data to play.
/// After this occurs, the outstream waits until the buffer is full to resume playback.
/// This is called from the `OutStream::write_callback` thread context.
pub fn register_underflow_callback<U>(&mut self, callback: U)
where U: FnMut(OutStream) + 'a
{
self.callbacks.underflow = Some(Box::new(callback));
unsafe {
// register wrapper for write_callback
(*self.stream).underflow_callback = Some(underflow_wrapper);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
/// *Optional* callback. `err` is always `ffi::enums::SioError::ErrorStreaming`.
/// This is an unrecoverable error. The stream is in an
/// invalid state and must be destroyed, call `OutStream::destroy`.
/// If you do not supply `error_callback`, the default callback will print
/// a message to stderr and then call `abort`.
/// This is called from the `OutStream::write_callback` thread context.
pub fn register_error_callback<E>(&mut self, callback: E)
where E: FnMut(OutStream, ffi::enums::SioError) + 'a
{
self.callbacks.error = Some(Box::new(callback));
unsafe {
// register wrapper for write_callback
(*self.stream).error_callback = Some(error_wrapper);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
write_stream!(write_stream_i8, i8);
write_stream!(write_stream_u8, u8);
write_stream!(write_stream_i16, i16);
write_stream!(write_stream_u16, u16);
write_stream!(write_stream_i32, i32);
write_stream!(write_stream_u32, u32);
write_stream!(write_stream_f32, f32);
write_stream!(write_stream_f64, f64);
fn begin_write(&self,
areas: *mut *mut ffi::SoundIoChannelArea,
frame_count: &c_int)
-> SioResult<u32> {
let mut actual_frame_count = *frame_count as c_int;
match unsafe {
ffi::soundio_outstream_begin_write(self.stream,
areas,
&mut actual_frame_count as *mut c_int)
} {
ffi::enums::SioError::None => Ok(actual_frame_count as u32),
err @ _ => Err(err),
}
}
fn end_write(&self) -> Option<ffi::enums::SioError> {
match unsafe { ffi::soundio_outstream_end_write(self.stream) } {
ffi::enums::SioError::None => None,
err @ _ => Some(err),
}
}
/// Clears the output stream buffer.
/// This function can be called from any thread.
/// This function can be called regardless of whether the outstream is paused
/// or not.
/// Some backends do not support clearing the buffer. On these backends this
/// function will return `ffi::enums::SioError::IncompatibleBackend`.
/// Some devices do not support clearing the buffer. On these devices this
/// function might return `ffi::enums::SioError::IncompatibleDevice`.
/// Possible errors:
///
/// - `ffi::enums::SioError::Streaming`
/// - `ffi::enums::SioError::IncompatibleBackend`
/// - `ffi::enums::SioError::IncompatibleDevice`
pub fn clear_buffer(&self) -> Option<ffi::enums::SioError> {
match unsafe { ffi::soundio_outstream_clear_buffer(self.stream) } {
ffi::enums::SioError::None => None,
err @ _ => Some(err),
}
}
/// If the underlying backend and device support pausing, this pauses the
/// stream. `OutStream::write_callback` may be called a few more times if
/// the buffer is not full.
/// Pausing might put the hardware into a low power state which is ideal if your
/// software is silent for some time.
/// This function may be called from any thread context, including
/// `OutStream::write_callback`.
/// Pausing when already paused or unpausing when already unpaused has no
/// effect and returns `None`.
///
/// Possible errors:
///
/// - `ffi::enums::SioError::BackendDisconnected`
/// - `ffi::enums::SioError::Streaming`
/// - `ffi::enums::SioError::IncompatibleDevice` - device does not support
/// pausing/unpausing. This error code might not be returned even if the
/// device does not support pausing/unpausing.
/// - `ffi::enums::SioError::IncompatibleBackend` - backend does not support
/// pausing/unpausing.
/// - `ffi::enums::SioError::Invalid` - outstream not opened and started
pub fn pause(&self) -> Option<ffi::enums::SioError> {
self.stream_pause(true)
}
/// Unpauses the stream. See `pause` for more details.
///
/// Possible errors:
///
/// - `ffi::enums::SioError::BackendDisconnected`
/// - `ffi::enums::SioError::Streaming`
/// - `ffi::enums::SioError::IncompatibleDevice` - device does not support
/// pausing/unpausing. This error code might not be returned even if the
/// device does not support pausing/unpausing.
/// - `ffi::enums::SioError::IncompatibleBackend` - backend does not support
/// pausing/unpausing.
/// - `ffi::enums::SioError::Invalid` - outstream not opened and started
pub fn unpause(&self) -> Option<ffi::enums::SioError> {
self.stream_pause(false)
}
fn stream_pause(&self, pause: bool) -> Option<ffi::enums::SioError> {
let pause_c_bool = match pause {
true => 1u8,
false => 0u8,
};
match unsafe { ffi::soundio_outstream_pause(self.stream, pause_c_bool) } {
ffi::enums::SioError::None => None,
err @ _ => Some(err),
}
}
/// Obtain the total number of seconds that the next frame written after the
/// last frame written will take to become
/// audible.
/// This includes both software and hardware latency.
///
/// This function must be called only from within `OutStream::write_callback`.
///
/// Possible errors:
///
/// - `ffi::enums::SioError::Streaming`
pub fn latency(&self) -> SioResult<f64> {
let mut latency = 0.0f64;
match unsafe {
ffi::soundio_outstream_get_latency(self.stream, &mut latency as *mut c_double)
} {
ffi::enums::SioError::None => Ok(latency),
err @ _ => Err(err),
}
}
/// Returns the current `format` or a `ffi::enums::SioError::Invalid` if
/// the format is not set.
pub fn format(&self) -> SioResult<ffi::enums::SioFormat> {
match unsafe { (*self.stream).format } {
ffi::enums::SioFormat::Invalid => Err(ffi::enums::SioError::Invalid),
fmt @ _ => Ok(fmt),
}
}
/// Sets the stream format to `format`.
/// **Must** be called before `open`ing the stream.
///
/// If the device doesn't support the format
/// `ffi::enums::SioError::IncompatibleDevice` is returned.
pub fn set_format(&self, format: ffi::enums::SioFormat) -> SioResult<()> {
let dev = self.device();
if dev.supports_format(format) {
unsafe { (*self.stream).format = format };
Ok(())
} else {
Err(ffi::enums::SioError::IncompatibleDevice)
}
}
/// Returns the channel layout of the output stream.
pub fn layout(&self) -> ChannelLayout {
ChannelLayout::new(unsafe { &(*self.stream).layout })
}
/// Returns the sample rate of the output stream.
pub fn sample_rate(&self) -> u32 {
unsafe { (*self.stream).sample_rate as u32 }
}
/// Sets the stream sample rate.
/// Make sure that the device supports the given sample rate to avoid
/// sample rate conversions. A `Device` provides `supports_sample_rate` and
/// `nearest_sample_rate` methods for this purpose.
pub fn set_sample_rate(&mut self, sample_rate: u32) {
unsafe { (*self.stream).sample_rate = sample_rate as c_int }
}
/// Returns the underlying device of the output stream.
pub fn device(&self) -> Device {
let dev = Device::new(unsafe { (*self.stream).device });
dev
}
/// Sets the stream name to `name`.
/// PulseAudio uses this for the stream name.
/// JACK uses this for the client name of the client that connects when you
/// open the stream.
/// WASAPI uses this for the session display name.
/// Colons (`:`) contained in `name` will be replaced with `_`.
/// If the `name` contains a `NULL` byte, `SioError::EncodingString` is returned.
pub fn set_name<T: Into<String>>(&mut self, name: T) -> SioResult<()> {
let s = name.into().replace(":", "_");
self.name = try!(CString::new(s).map_err(|_| ffi::enums::SioError::EncodingString));
unsafe { (*self.stream).name = self.name.as_ptr() };
Ok(())
}
/// Returns the stream name or `None` if the name wasn't set.
pub fn name(&self) -> Option<String> {
let s_ptr = unsafe { (*self.stream).name };
if !s_ptr.is_null() {
match ffi::utils::ptr_to_string(s_ptr) {
Ok(s) => Some(s),
Err(_) => None,
}
} else {
None
}
}
/// Returns an `ffi::enums::SioError` if the layout is incompatible
/// with the audio output device.
/// If the layout is compatible `()` is returned.
pub fn layout_error(&self) -> SioResult<()> {
match unsafe { (*self.stream).layout_error } {
0 => Ok(()),
e @ _ => {
println!("layout error: {}", e);
Ok(())
}
}
}
/// Destroys the output stream.
/// Calls this when your application shuts down.
fn destroy(&self) {
unsafe { ffi::soundio_outstream_destroy(self.stream) }
}
}
impl<'a> Drop for OutStream<'a> {
fn drop(&mut self) {
// Only drop if usage `marker` is false.
// The usage marker is set by the callback function to prevent the
// source stream from dropping on the context switch of the callback function.
if !self.marker {
self.destroy()
} else {
// reset usage marker.
self.marker = false
}
}
}
Remove redundant `let` binding
use std::os::raw::{c_int, c_double, c_void};
use std::{ptr, slice};
use std::ffi::CString;
use ffi;
use base::*;
macro_rules! write_stream {
($name:ident, $t:ty) => (
/// Expects a vector of `channel_count` channel `buffers`
/// containing audio data that is written into
/// the buffer of the output device.
/// NOTE: This is subject of change.
/// Passing an iterator gives more flexibility and should be more performant
/// in most cases.
///
/// Returns the number of actually written frames.
/// If the provided buffers contain less frames
/// than `min_frame_count`, or less buffers
/// as `channel_count` are provided,
/// then a `ffi::enums::SioError::Invalid` is returned.
pub fn $name(&self, min_frame_count: u32, buffers: &Vec<Vec<$t>>) -> SioResult<u32> {
let channel_count = self.layout().channel_count();
// check if buffer contains frames for all channels
if buffers.len() < channel_count as usize {
return Err(ffi::enums::SioError::Invalid);
}
// check if there are at least min_frame_count frames for all channels
if !buffers.iter().map(|c| c.len()).all(|l| l >= min_frame_count as usize) {
return Err(ffi::enums::SioError::Invalid);
}
// assuming that every channel buffer has the same length
let frame_count = buffers[0].len() as c_int;
let mut raw_areas: *mut ffi::SoundIoChannelArea = ptr::null_mut();
let actual_frame_count = try!(self.begin_write(&mut raw_areas, &frame_count));
let areas = unsafe { slice::from_raw_parts_mut(raw_areas, channel_count as usize) };
for idx in 0..actual_frame_count as usize {
for channel in 0..channel_count as usize {
let area = areas[channel];
let addr = (area.ptr as usize + area.step as usize * idx) as *mut $t;
unsafe { *addr = buffers[channel][idx] };
}
}
self.end_write().map_or(Ok(actual_frame_count), |err| Err(err))
}
)
}
extern "C" fn write_wrapper(raw_out: *mut ffi::SoundIoOutStream, min: c_int, max: c_int) {
let mut out = OutStream::new(raw_out);
out.marker = true;
let callbacks_ptr = unsafe { (*out.stream).userdata as *mut Box<OutStreamCallbacks> };
let callbacks: &mut Box<OutStreamCallbacks> = unsafe { &mut *callbacks_ptr };
callbacks.write.as_mut().map(|f| f(out, min as u32, max as u32));
}
extern "C" fn underflow_wrapper(raw_out: *mut ffi::SoundIoOutStream) {
let mut out = OutStream::new(raw_out);
out.marker = true;
let callbacks_ptr = unsafe { (*out.stream).userdata as *mut Box<OutStreamCallbacks> };
let callbacks: &mut Box<OutStreamCallbacks> = unsafe { &mut *callbacks_ptr };
callbacks.underflow.as_mut().map(|f| f(out));
}
extern "C" fn error_wrapper(raw_out: *mut ffi::SoundIoOutStream, error: ffi::enums::SioError) {
let mut out = OutStream::new(raw_out);
out.marker = true;
let callbacks_ptr = unsafe { (*out.stream).userdata as *mut Box<OutStreamCallbacks> };
let callbacks: &mut Box<OutStreamCallbacks> = unsafe { &mut *callbacks_ptr };
callbacks.error.as_mut().map(|f| f(out, error));
}
struct OutStreamCallbacks<'a> {
write: Option<Box<FnMut(OutStream, u32, u32) + 'a>>,
underflow: Option<Box<FnMut(OutStream) + 'a>>,
error: Option<Box<FnMut(OutStream, ffi::enums::SioError) + 'a>>,
}
impl<'a> Default for OutStreamCallbacks<'a> {
fn default() -> Self {
OutStreamCallbacks {
write: None,
underflow: None,
error: None,
}
}
}
/// An audio output stream, returned from a `Device`.
pub struct OutStream<'a> {
stream: *mut ffi::SoundIoOutStream,
callbacks: Box<OutStreamCallbacks<'a>>,
name: CString,
marker: bool,
}
impl<'a> OutStream<'a> {
pub fn new(raw_stream: *mut ffi::SoundIoOutStream) -> Self {
let callbacks = Box::new(OutStreamCallbacks::default());
OutStream {
stream: raw_stream,
callbacks: callbacks,
name: CString::new("outstream").unwrap(),
marker: false,
}
}
/// Change settings (e.g. `set_format`) **before** calling `open`.
/// After you call this function, `OutStream::software_latency` is set to
/// the correct value.
///
/// The next thing to do is call `start`.
/// If this function returns an error, the outstream is in an invalid state and
/// you must call `destroy` on it.
///
/// Possible errors:
///
/// - `ffi::enums::SioErrorInvalid`
/// - device is not an *output* device
/// - format is not valid
/// - `channel_count` is greater than 24
/// - `ffi::enums::SioError::NoMem`
/// - `ffi::enums::SioError::OpeningDevice`
/// - `ffi::enums::SioError::BackendDisconnected`
/// - `ffi::enums::SioError::SystemResources`
/// - `ffi::enums::SioError::NoSuchClient` - when JACK returns `JackNoSuchClient`
/// - `ffi::enums::SioErrorOpeningDevice`
/// - `ffi::enums::SioErrorIncompatibleBackend` - `OutStream::channel_count` is
/// greater than the number of channels the backend can handle.
/// - `ffi::enums::SioErrorIncompatibleDevice` - stream parameters requested are not
/// compatible with the chosen device.
pub fn open(&self) -> SioResult<()> {
match unsafe { ffi::soundio_outstream_open(self.stream) } {
ffi::enums::SioError::None => Ok(()),
err @ _ => Err(err),
}
}
/// After you call this function, the registered `write_callback` will be called.
///
/// This function might directly call the `write_callback`.
///
/// Possible errors:
///
/// - `ffi::enums::SioError::Streaming`
/// - `ffi::enums::SioError::NoMem`
/// - `ffi::enums::SioError::SystemResources`
/// - `ffi::enums::SioError::BackendDisconnected`
pub fn start(&self) -> SioResult<()> {
match unsafe { ffi::soundio_outstream_start(self.stream) } {
ffi::enums::SioError::None => Ok(()),
err @ _ => Err(err),
}
}
/// Registers the given callback as `write_callback` that is called as soon as you call `start`.
///
/// In this callback, you call `OutStream::write_stream_FMT` where `FMT` is one of the supported
/// format, `u16`, `f32` etc.
///
/// `frame_count_max` will always be greater than 0. Note that you
/// should write as many frames as you can; `frame_count_min` might be 0 and
/// you can still get a buffer underflow if you always write
/// `frame_count_min` frames.
///
/// For Dummy, ALSA, and PulseAudio, `frame_count_min` will be 0. For JACK
/// and CoreAudio `frame_count_min` will be equal to `frame_count_max`.
///
/// The code in the supplied function must be suitable for real-time
/// execution. That means that it cannot call functions that might block
/// for a long time. This includes all I/O functions (disk, TTY, network),
/// malloc, free, printf, pthread_mutex_lock, sleep, wait, poll, select,
/// pthread_join, pthread_cond_wait, etc.
pub fn register_write_callback<W>(&mut self, callback: W)
where W: FnMut(OutStream, u32, u32) + 'a
{
// stored box reference to callback closure
self.callbacks.write = Some(Box::new(callback));
unsafe {
// register wrapper for write_callback
(*self.stream).write_callback = Some(write_wrapper);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
/// Registers the given callback as `underflow_callback`.
/// This *optional* callback happens when the sound device runs out of buffered audio data to play.
/// After this occurs, the outstream waits until the buffer is full to resume playback.
/// This is called from the `OutStream::write_callback` thread context.
pub fn register_underflow_callback<U>(&mut self, callback: U)
where U: FnMut(OutStream) + 'a
{
self.callbacks.underflow = Some(Box::new(callback));
unsafe {
// register wrapper for write_callback
(*self.stream).underflow_callback = Some(underflow_wrapper);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
/// *Optional* callback. `err` is always `ffi::enums::SioError::ErrorStreaming`.
/// This is an unrecoverable error. The stream is in an
/// invalid state and must be destroyed, call `OutStream::destroy`.
/// If you do not supply `error_callback`, the default callback will print
/// a message to stderr and then call `abort`.
/// This is called from the `OutStream::write_callback` thread context.
pub fn register_error_callback<E>(&mut self, callback: E)
where E: FnMut(OutStream, ffi::enums::SioError) + 'a
{
self.callbacks.error = Some(Box::new(callback));
unsafe {
// register wrapper for write_callback
(*self.stream).error_callback = Some(error_wrapper);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
write_stream!(write_stream_i8, i8);
write_stream!(write_stream_u8, u8);
write_stream!(write_stream_i16, i16);
write_stream!(write_stream_u16, u16);
write_stream!(write_stream_i32, i32);
write_stream!(write_stream_u32, u32);
write_stream!(write_stream_f32, f32);
write_stream!(write_stream_f64, f64);
fn begin_write(&self,
areas: *mut *mut ffi::SoundIoChannelArea,
frame_count: &c_int)
-> SioResult<u32> {
let mut actual_frame_count = *frame_count as c_int;
match unsafe {
ffi::soundio_outstream_begin_write(self.stream,
areas,
&mut actual_frame_count as *mut c_int)
} {
ffi::enums::SioError::None => Ok(actual_frame_count as u32),
err @ _ => Err(err),
}
}
fn end_write(&self) -> Option<ffi::enums::SioError> {
match unsafe { ffi::soundio_outstream_end_write(self.stream) } {
ffi::enums::SioError::None => None,
err @ _ => Some(err),
}
}
/// Clears the output stream buffer.
/// This function can be called from any thread.
/// This function can be called regardless of whether the outstream is paused
/// or not.
/// Some backends do not support clearing the buffer. On these backends this
/// function will return `ffi::enums::SioError::IncompatibleBackend`.
/// Some devices do not support clearing the buffer. On these devices this
/// function might return `ffi::enums::SioError::IncompatibleDevice`.
/// Possible errors:
///
/// - `ffi::enums::SioError::Streaming`
/// - `ffi::enums::SioError::IncompatibleBackend`
/// - `ffi::enums::SioError::IncompatibleDevice`
pub fn clear_buffer(&self) -> Option<ffi::enums::SioError> {
match unsafe { ffi::soundio_outstream_clear_buffer(self.stream) } {
ffi::enums::SioError::None => None,
err @ _ => Some(err),
}
}
/// If the underlying backend and device support pausing, this pauses the
/// stream. `OutStream::write_callback` may be called a few more times if
/// the buffer is not full.
/// Pausing might put the hardware into a low power state which is ideal if your
/// software is silent for some time.
/// This function may be called from any thread context, including
/// `OutStream::write_callback`.
/// Pausing when already paused or unpausing when already unpaused has no
/// effect and returns `None`.
///
/// Possible errors:
///
/// - `ffi::enums::SioError::BackendDisconnected`
/// - `ffi::enums::SioError::Streaming`
/// - `ffi::enums::SioError::IncompatibleDevice` - device does not support
/// pausing/unpausing. This error code might not be returned even if the
/// device does not support pausing/unpausing.
/// - `ffi::enums::SioError::IncompatibleBackend` - backend does not support
/// pausing/unpausing.
/// - `ffi::enums::SioError::Invalid` - outstream not opened and started
pub fn pause(&self) -> Option<ffi::enums::SioError> {
self.stream_pause(true)
}
/// Unpauses the stream. See `pause` for more details.
///
/// Possible errors:
///
/// - `ffi::enums::SioError::BackendDisconnected`
/// - `ffi::enums::SioError::Streaming`
/// - `ffi::enums::SioError::IncompatibleDevice` - device does not support
/// pausing/unpausing. This error code might not be returned even if the
/// device does not support pausing/unpausing.
/// - `ffi::enums::SioError::IncompatibleBackend` - backend does not support
/// pausing/unpausing.
/// - `ffi::enums::SioError::Invalid` - outstream not opened and started
pub fn unpause(&self) -> Option<ffi::enums::SioError> {
self.stream_pause(false)
}
fn stream_pause(&self, pause: bool) -> Option<ffi::enums::SioError> {
let pause_c_bool = match pause {
true => 1u8,
false => 0u8,
};
match unsafe { ffi::soundio_outstream_pause(self.stream, pause_c_bool) } {
ffi::enums::SioError::None => None,
err @ _ => Some(err),
}
}
/// Obtain the total number of seconds that the next frame written after the
/// last frame written will take to become
/// audible.
/// This includes both software and hardware latency.
///
/// This function must be called only from within `OutStream::write_callback`.
///
/// Possible errors:
///
/// - `ffi::enums::SioError::Streaming`
pub fn latency(&self) -> SioResult<f64> {
let mut latency = 0.0f64;
match unsafe {
ffi::soundio_outstream_get_latency(self.stream, &mut latency as *mut c_double)
} {
ffi::enums::SioError::None => Ok(latency),
err @ _ => Err(err),
}
}
/// Returns the current `format` or a `ffi::enums::SioError::Invalid` if
/// the format is not set.
pub fn format(&self) -> SioResult<ffi::enums::SioFormat> {
match unsafe { (*self.stream).format } {
ffi::enums::SioFormat::Invalid => Err(ffi::enums::SioError::Invalid),
fmt @ _ => Ok(fmt),
}
}
/// Sets the stream format to `format`.
/// **Must** be called before `open`ing the stream.
///
/// If the device doesn't support the format
/// `ffi::enums::SioError::IncompatibleDevice` is returned.
pub fn set_format(&self, format: ffi::enums::SioFormat) -> SioResult<()> {
let dev = self.device();
if dev.supports_format(format) {
unsafe { (*self.stream).format = format };
Ok(())
} else {
Err(ffi::enums::SioError::IncompatibleDevice)
}
}
/// Returns the channel layout of the output stream.
pub fn layout(&self) -> ChannelLayout {
ChannelLayout::new(unsafe { &(*self.stream).layout })
}
/// Returns the sample rate of the output stream.
pub fn sample_rate(&self) -> u32 {
unsafe { (*self.stream).sample_rate as u32 }
}
/// Sets the stream sample rate.
/// Make sure that the device supports the given sample rate to avoid
/// sample rate conversions. A `Device` provides `supports_sample_rate` and
/// `nearest_sample_rate` methods for this purpose.
pub fn set_sample_rate(&mut self, sample_rate: u32) {
unsafe { (*self.stream).sample_rate = sample_rate as c_int }
}
/// Returns the underlying device of the output stream.
pub fn device(&self) -> Device {
Device::new(unsafe { (*self.stream).device })
}
/// Sets the stream name to `name`.
/// PulseAudio uses this for the stream name.
/// JACK uses this for the client name of the client that connects when you
/// open the stream.
/// WASAPI uses this for the session display name.
/// Colons (`:`) contained in `name` will be replaced with `_`.
/// If the `name` contains a `NULL` byte, `SioError::EncodingString` is returned.
pub fn set_name<T: Into<String>>(&mut self, name: T) -> SioResult<()> {
let s = name.into().replace(":", "_");
self.name = try!(CString::new(s).map_err(|_| ffi::enums::SioError::EncodingString));
unsafe { (*self.stream).name = self.name.as_ptr() };
Ok(())
}
/// Returns the stream name or `None` if the name wasn't set.
pub fn name(&self) -> Option<String> {
let s_ptr = unsafe { (*self.stream).name };
if !s_ptr.is_null() {
match ffi::utils::ptr_to_string(s_ptr) {
Ok(s) => Some(s),
Err(_) => None,
}
} else {
None
}
}
/// Returns an `ffi::enums::SioError` if the layout is incompatible
/// with the audio output device.
/// If the layout is compatible `()` is returned.
pub fn layout_error(&self) -> SioResult<()> {
match unsafe { (*self.stream).layout_error } {
0 => Ok(()),
e @ _ => {
println!("layout error: {}", e);
Ok(())
}
}
}
/// Destroys the output stream.
/// Calls this when your application shuts down.
fn destroy(&self) {
unsafe { ffi::soundio_outstream_destroy(self.stream) }
}
}
impl<'a> Drop for OutStream<'a> {
fn drop(&mut self) {
// Only drop if usage `marker` is false.
// The usage marker is set by the callback function to prevent the
// source stream from dropping on the context switch of the callback function.
if !self.marker {
self.destroy()
} else {
// reset usage marker.
self.marker = false
}
}
}
|
use std::os::raw::{c_int, c_double, c_void};
use ffi;
use base::*;
extern "C" fn write_wrapper<W>(raw_out: *mut ffi::SoundIoOutStream, min: c_int, max: c_int)
where W: Fn(OutStream, i32, i32)
{
let out = OutStream::new(raw_out);
let callbacks_ptr = unsafe { (*out.stream).userdata as *const Box<OutStreamCallbacks> };
let callbacks: &Box<OutStreamCallbacks> = unsafe { &*callbacks_ptr };
callbacks.write.as_ref().map(|ref f| f(out, min as i32, max as i32));
}
extern "C" fn underflow_wrapper<U>(raw_out: *mut ffi::SoundIoOutStream)
where U: Fn(OutStream)
{
let out = OutStream::new(raw_out);
let callbacks_ptr = unsafe { (*out.stream).userdata as *const Box<OutStreamCallbacks> };
let callbacks: &Box<OutStreamCallbacks> = unsafe { &*callbacks_ptr };
callbacks.underflow.as_ref().map(|ref f| f(out));
}
extern "C" fn error_wrapper<E>(raw_out: *mut ffi::SoundIoOutStream, error: ffi::SioError)
where E: Fn(OutStream, ffi::SioError)
{
let out = OutStream::new(raw_out);
let callbacks_ptr = unsafe { (*out.stream).userdata as *const Box<OutStreamCallbacks> };
let callbacks: &Box<OutStreamCallbacks> = unsafe { &*callbacks_ptr };
callbacks.error.as_ref().map(|ref f| f(out, error));
}
struct OutStreamCallbacks<'a> {
write: Option<Box<Fn(OutStream, i32, i32) + 'a>>,
underflow: Option<Box<Fn(OutStream) + 'a>>,
error: Option<Box<Fn(OutStream, ffi::SioError) + 'a>>,
}
impl<'a> Default for OutStreamCallbacks<'a> {
fn default() -> Self {
OutStreamCallbacks {
write: None,
underflow: None,
error: None,
}
}
}
impl<'a> Drop for OutStreamCallbacks<'a> {
fn drop(&mut self) {}
}
pub struct OutStream<'a> {
stream: *mut ffi::SoundIoOutStream,
callbacks: Box<OutStreamCallbacks<'a>>,
}
impl<'a> OutStream<'a> {
pub fn new(raw_stream: *mut ffi::SoundIoOutStream) -> Self {
let callbacks = Box::new(OutStreamCallbacks::default());
OutStream {
stream: raw_stream,
callbacks: callbacks,
}
}
pub fn open(&self) -> Option<ffi::SioError> {
match unsafe { ffi::soundio_outstream_open(self.stream) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn start(&self) -> Option<ffi::SioError> {
match unsafe { ffi::soundio_outstream_start(self.stream) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn register_write_callback<W>(&mut self, callback: Box<W>)
where W: Fn(OutStream, i32, i32) + 'a
{
// stored box reference to callback closure
self.callbacks.write = Some(callback);
unsafe {
// register wrapper for write_callback
(*self.stream).write_callback = Some(write_wrapper::<W>);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
pub fn register_underflow_callback<U>(&mut self, callback: Box<U>)
where U: Fn(OutStream) + 'a
{
self.callbacks.underflow = Some(callback);
unsafe {
// register wrapper for write_callback
(*self.stream).underflow_callback = Some(underflow_wrapper::<U>);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
pub fn register_error_callback<E>(&mut self, callback: Box<E>)
where E: Fn(OutStream, ffi::SioError) + 'a
{
self.callbacks.error = Some(callback);
unsafe {
// register wrapper for write_callback
(*self.stream).error_callback = Some(error_wrapper::<E>);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
pub fn begin_write(&self,
areas: *mut *mut ffi::SoundIoChannelArea,
frame_count: *mut c_int)
-> Option<ffi::SioError> {
match unsafe { ffi::soundio_outstream_begin_write(self.stream, areas, frame_count) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn end_write(&self) -> Option<ffi::SioError> {
match unsafe { ffi::soundio_outstream_end_write(self.stream) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn clear_buffer(&self) -> Option<ffi::SioError> {
match unsafe { ffi::soundio_outstream_clear_buffer(self.stream) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn pause(&self, pause: bool) -> Option<ffi::SioError> {
let pause_c_bool = match pause {
true => 1u8,
false => 0u8,
};
match unsafe { ffi::soundio_outstream_pause(self.stream, pause_c_bool) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn get_latency(&self) -> Result<f64, ffi::SioError> {
let mut latency = 0.0f64;
match unsafe {
ffi::soundio_outstream_get_latency(self.stream, &mut latency as *mut c_double)
} {
ffi::SioError::None => Ok(latency),
err @ _ => Err(err),
}
}
pub fn current_format(&self) -> Result<ffi::SioFormat, ffi::SioError> {
match unsafe { (*self.stream).format } {
ffi::SioFormat::Invalid => Err(ffi::SioError::Invalid),
fmt @ _ => Ok(fmt),
}
}
pub fn get_layout(&self) -> ChannelLayout {
ChannelLayout::new(unsafe { &(*self.stream).layout })
}
pub fn get_sample_rate(&self) -> i32 {
unsafe { (*self.stream).sample_rate as i32 }
}
pub fn get_device(&self) -> Device {
let dev = Device::new(unsafe { (*self.stream).device });
dev.inc_ref();
dev
}
pub fn destroy(&self) {
unsafe { ffi::soundio_outstream_destroy(self.stream) }
}
}
impl<'a> Drop for OutStream<'a> {
fn drop(&mut self) {
// TODO: call destroy manually.
// OutStream will get dropped each time a new
// struct is created from the same *mut pointer.
}
}
Implement `write_stream`
This function should be called from the write callback and
writes the buffer contents to the output stream.
TODO: Implement a macro to generate this method for all supported
formats.
use std::os::raw::{c_int, c_double, c_void};
use std::ptr;
use ffi;
use base::*;
extern "C" fn write_wrapper<W>(raw_out: *mut ffi::SoundIoOutStream, min: c_int, max: c_int)
where W: Fn(OutStream, i32, i32)
{
let out = OutStream::new(raw_out);
let callbacks_ptr = unsafe { (*out.stream).userdata as *const Box<OutStreamCallbacks> };
let callbacks: &Box<OutStreamCallbacks> = unsafe { &*callbacks_ptr };
callbacks.write.as_ref().map(|ref f| f(out, min as i32, max as i32));
}
extern "C" fn underflow_wrapper<U>(raw_out: *mut ffi::SoundIoOutStream)
where U: Fn(OutStream)
{
let out = OutStream::new(raw_out);
let callbacks_ptr = unsafe { (*out.stream).userdata as *const Box<OutStreamCallbacks> };
let callbacks: &Box<OutStreamCallbacks> = unsafe { &*callbacks_ptr };
callbacks.underflow.as_ref().map(|ref f| f(out));
}
extern "C" fn error_wrapper<E>(raw_out: *mut ffi::SoundIoOutStream, error: ffi::SioError)
where E: Fn(OutStream, ffi::SioError)
{
let out = OutStream::new(raw_out);
let callbacks_ptr = unsafe { (*out.stream).userdata as *const Box<OutStreamCallbacks> };
let callbacks: &Box<OutStreamCallbacks> = unsafe { &*callbacks_ptr };
callbacks.error.as_ref().map(|ref f| f(out, error));
}
struct OutStreamCallbacks<'a> {
write: Option<Box<Fn(OutStream, i32, i32) + 'a>>,
underflow: Option<Box<Fn(OutStream) + 'a>>,
error: Option<Box<Fn(OutStream, ffi::SioError) + 'a>>,
}
impl<'a> Default for OutStreamCallbacks<'a> {
fn default() -> Self {
OutStreamCallbacks {
write: None,
underflow: None,
error: None,
}
}
}
impl<'a> Drop for OutStreamCallbacks<'a> {
fn drop(&mut self) {}
}
pub struct OutStream<'a> {
stream: *mut ffi::SoundIoOutStream,
callbacks: Box<OutStreamCallbacks<'a>>,
}
impl<'a> OutStream<'a> {
pub fn new(raw_stream: *mut ffi::SoundIoOutStream) -> Self {
let callbacks = Box::new(OutStreamCallbacks::default());
OutStream {
stream: raw_stream,
callbacks: callbacks,
}
}
pub fn open(&self) -> Option<ffi::SioError> {
match unsafe { ffi::soundio_outstream_open(self.stream) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn start(&self) -> Option<ffi::SioError> {
match unsafe { ffi::soundio_outstream_start(self.stream) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn register_write_callback<W>(&mut self, callback: Box<W>)
where W: Fn(OutStream, i32, i32) + 'a
{
// stored box reference to callback closure
self.callbacks.write = Some(callback);
unsafe {
// register wrapper for write_callback
(*self.stream).write_callback = Some(write_wrapper::<W>);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
pub fn register_underflow_callback<U>(&mut self, callback: Box<U>)
where U: Fn(OutStream) + 'a
{
self.callbacks.underflow = Some(callback);
unsafe {
// register wrapper for write_callback
(*self.stream).underflow_callback = Some(underflow_wrapper::<U>);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
pub fn register_error_callback<E>(&mut self, callback: Box<E>)
where E: Fn(OutStream, ffi::SioError) + 'a
{
self.callbacks.error = Some(callback);
unsafe {
// register wrapper for write_callback
(*self.stream).error_callback = Some(error_wrapper::<E>);
// store reference to callbacks struct in userdata pointer
(*self.stream).userdata =
&self.callbacks as *const Box<OutStreamCallbacks> as *mut c_void
}
}
pub fn write_stream(&self,
min_frame_count: i32,
buffers: &Vec<Vec<f32>>)
-> Result<i32, ffi::SioError> {
let channel_count = self.get_layout().channel_count();
// check if buffer contains frames for all channels
if buffers.len() < channel_count as usize {
return Err(ffi::SioError::Invalid);
}
// check if there are at least min_frame_count frames for all channels
if !buffers.iter().map(|c| c.len()).all(|l| l >= min_frame_count as usize) {
return Err(ffi::SioError::Invalid);
}
// assuming that every channel buffer has the same length
let mut frame_count = buffers[0].len() as c_int;
let mut raw_areas: *mut ffi::SoundIoChannelArea = ptr::null_mut();
let actual_frame_count = try!(self.begin_write(&mut raw_areas, &frame_count));
let areas = unsafe { ::std::slice::from_raw_parts_mut(raw_areas, channel_count as usize) };
for idx in 0..actual_frame_count as usize {
for channel in 0..channel_count as usize {
let area = areas[channel];
let addr = (area.ptr as usize + area.step as usize * idx) as *mut f32;
unsafe { *addr = buffers[channel][idx] }
}
}
self.end_write().map_or(Ok(actual_frame_count), |err| Err(err))
}
pub fn begin_write(&self,
areas: *mut *mut ffi::SoundIoChannelArea,
frame_count: *mut c_int)
-> Option<ffi::SioError> {
match unsafe { ffi::soundio_outstream_begin_write(self.stream, areas, frame_count) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn end_write(&self) -> Option<ffi::SioError> {
match unsafe { ffi::soundio_outstream_end_write(self.stream) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn clear_buffer(&self) -> Option<ffi::SioError> {
match unsafe { ffi::soundio_outstream_clear_buffer(self.stream) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn pause(&self, pause: bool) -> Option<ffi::SioError> {
let pause_c_bool = match pause {
true => 1u8,
false => 0u8,
};
match unsafe { ffi::soundio_outstream_pause(self.stream, pause_c_bool) } {
ffi::SioError::None => None,
err @ _ => Some(err),
}
}
pub fn get_latency(&self) -> Result<f64, ffi::SioError> {
let mut latency = 0.0f64;
match unsafe {
ffi::soundio_outstream_get_latency(self.stream, &mut latency as *mut c_double)
} {
ffi::SioError::None => Ok(latency),
err @ _ => Err(err),
}
}
pub fn current_format(&self) -> Result<ffi::SioFormat, ffi::SioError> {
match unsafe { (*self.stream).format } {
ffi::SioFormat::Invalid => Err(ffi::SioError::Invalid),
fmt @ _ => Ok(fmt),
}
}
pub fn get_layout(&self) -> ChannelLayout {
ChannelLayout::new(unsafe { &(*self.stream).layout })
}
pub fn get_sample_rate(&self) -> i32 {
unsafe { (*self.stream).sample_rate as i32 }
}
pub fn get_device(&self) -> Device {
let dev = Device::new(unsafe { (*self.stream).device });
dev.inc_ref();
dev
}
pub fn destroy(&self) {
unsafe { ffi::soundio_outstream_destroy(self.stream) }
}
}
impl<'a> Drop for OutStream<'a> {
fn drop(&mut self) {
// TODO: call destroy manually.
// OutStream will get dropped each time a new
// struct is created from the same *mut pointer.
}
}
|
// Copyright (c) 2017-2021 Rene van der Meer
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Raspberry Pi system-related tools.
//!
//! Use [`DeviceInfo`] to identify the Raspberry Pi's model and SoC.
//!
//! [`DeviceInfo`]: struct.DeviceInfo.html
use std::error;
use std::fmt;
use std::fs;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::result;
const PERIPHERAL_BASE_RPI: u32 = 0x2000_0000;
const PERIPHERAL_BASE_RPI2: u32 = 0x3f00_0000;
const PERIPHERAL_BASE_RPI4: u32 = 0xfe00_0000;
const GPIO_OFFSET: u32 = 0x20_0000;
/// Errors that can occur when trying to identify the Raspberry Pi hardware.
#[derive(Debug)]
pub enum Error {
/// Unknown model.
///
/// `DeviceInfo` was unable to identify the Raspberry Pi model based on the
/// contents of `/proc/cpuinfo`, `/sys/firmware/devicetree/base/compatible`
/// and `/sys/firmware/devicetree/base/model`.
///
/// Support for new models is usually added shortly after they are officially
/// announced and available to the public. Make sure you're using the latest
/// release of RPPAL.
///
/// You may also encounter this error if your Linux distribution
/// doesn't provide any of the common user-accessible system files
/// that are used to identify the model and SoC.
UnknownModel,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::UnknownModel => write!(f, "Unknown Raspberry Pi model"),
}
}
}
impl error::Error for Error {}
/// Result type returned from methods that can have `system::Error`s.
pub type Result<T> = result::Result<T, Error>;
/// Identifiable Raspberry Pi models.
///
/// `Model` might be extended with additional variants in a minor or
/// patch revision, and must not be exhaustively matched against.
/// Instead, add a `_` catch-all arm to match future variants.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[non_exhaustive]
pub enum Model {
RaspberryPiA,
RaspberryPiAPlus,
RaspberryPiBRev1,
RaspberryPiBRev2,
RaspberryPiBPlus,
RaspberryPi2B,
RaspberryPi3APlus,
RaspberryPi3B,
RaspberryPi3BPlus,
RaspberryPi4B,
RaspberryPi400,
RaspberryPiComputeModule,
RaspberryPiComputeModule3,
RaspberryPiComputeModule3Plus,
RaspberryPiComputeModule4,
RaspberryPiZero,
RaspberryPiZeroW,
}
impl fmt::Display for Model {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Model::RaspberryPiA => write!(f, "Raspberry Pi A"),
Model::RaspberryPiAPlus => write!(f, "Raspberry Pi A+"),
Model::RaspberryPiBRev1 => write!(f, "Raspberry Pi B Rev 1"),
Model::RaspberryPiBRev2 => write!(f, "Raspberry Pi B Rev 2"),
Model::RaspberryPiBPlus => write!(f, "Raspberry Pi B+"),
Model::RaspberryPi2B => write!(f, "Raspberry Pi 2 B"),
Model::RaspberryPi3B => write!(f, "Raspberry Pi 3 B"),
Model::RaspberryPi3BPlus => write!(f, "Raspberry Pi 3 B+"),
Model::RaspberryPi3APlus => write!(f, "Raspberry Pi 3 A+"),
Model::RaspberryPi4B => write!(f, "Raspberry Pi 4 B"),
Model::RaspberryPi400 => write!(f, "Raspberry Pi 400"),
Model::RaspberryPiComputeModule => write!(f, "Raspberry Pi Compute Module"),
Model::RaspberryPiComputeModule3 => write!(f, "Raspberry Pi Compute Module 3"),
Model::RaspberryPiComputeModule3Plus => write!(f, "Raspberry Pi Compute Module 3+"),
Model::RaspberryPiComputeModule4 => write!(f, "Raspberry Pi Compute Module 4"),
Model::RaspberryPiZero => write!(f, "Raspberry Pi Zero"),
Model::RaspberryPiZeroW => write!(f, "Raspberry Pi Zero W"),
}
}
}
/// Identifiable Raspberry Pi SoCs.
///
/// `SoC` might be extended with additional variants in a minor or
/// patch revision, and must not be exhaustively matched against.
/// Instead, add a `_` catch-all arm to match future variants.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[non_exhaustive]
pub enum SoC {
Bcm2835,
Bcm2836,
Bcm2837A1,
Bcm2837B0,
Bcm2711,
}
impl fmt::Display for SoC {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SoC::Bcm2835 => write!(f, "BCM2835"),
SoC::Bcm2836 => write!(f, "BCM2836"),
SoC::Bcm2837A1 => write!(f, "BCM2837A1"),
SoC::Bcm2837B0 => write!(f, "BCM2837B0"),
SoC::Bcm2711 => write!(f, "BCM2711"),
}
}
}
// Identify Pi model based on /proc/cpuinfo
fn parse_proc_cpuinfo() -> Result<Model> {
let proc_cpuinfo = BufReader::new(match File::open("/proc/cpuinfo") {
Ok(file) => file,
Err(_) => return Err(Error::UnknownModel),
});
let mut hardware: String = String::new();
let mut revision: String = String::new();
for line_result in proc_cpuinfo.lines() {
if let Ok(line) = line_result {
if let Some(line_value) = line.strip_prefix("Hardware\t: ") {
hardware = String::from(line_value);
} else if let Some(line_value) = line.strip_prefix("Revision\t: ") {
revision = String::from(line_value).to_lowercase();
}
}
}
// Return an error if we don't recognize the SoC. This check is
// done to prevent accidentally identifying a non-Pi SBC as a Pi
// solely based on the revision field.
match &hardware[..] {
"BCM2708" | "BCM2835" | "BCM2709" | "BCM2836" | "BCM2710" | "BCM2837" | "BCM2837A1"
| "BCM2837B0" | "BCM2711" => {}
_ => return Err(Error::UnknownModel),
}
let model = if (revision.len() == 4) || (revision.len() == 8) {
// Older revisions are 4 characters long, or 8 if they've been over-volted
match &revision[revision.len() - 4..] {
"0007" | "0008" | "0009" | "0015" => Model::RaspberryPiA,
"beta" | "0002" | "0003" => Model::RaspberryPiBRev1,
"0004" | "0005" | "0006" | "000d" | "000e" | "000f" => Model::RaspberryPiBRev2,
"0012" => Model::RaspberryPiAPlus,
"0010" | "0013" => Model::RaspberryPiBPlus,
"0011" | "0014" => Model::RaspberryPiComputeModule,
_ => return Err(Error::UnknownModel),
}
} else if revision.len() >= 6 {
// Newer revisions consist of at least 6 characters
match &revision[..] {
"900021" => Model::RaspberryPiAPlus,
"900032" => Model::RaspberryPiBPlus,
"a01040" | "a01041" | "a21041" | "a22042" => Model::RaspberryPi2B,
"a02082" | "a22082" | "a32082" | "a52082" => Model::RaspberryPi3B,
"900092" | "900093" | "920092" | "920093" => Model::RaspberryPiZero,
"a020a0" | "a220a0" => Model::RaspberryPiComputeModule3,
"9000c1" => Model::RaspberryPiZeroW,
"a020d3" => Model::RaspberryPi3BPlus,
"9020e0" => Model::RaspberryPi3APlus,
"a02100" => Model::RaspberryPiComputeModule3Plus,
"a03111" | "a03112" | "b03111" | "b03112" | "b03114" | "c03111" | "c03112"
| "c03114" | "d03114" => Model::RaspberryPi4B,
"c03130" => Model::RaspberryPi400,
"a03140" | "b03140" => Model::RaspberryPiComputeModule4,
_ => return Err(Error::UnknownModel),
}
} else {
return Err(Error::UnknownModel);
};
Ok(model)
}
// Identify Pi model based on /sys/firmware/devicetree/base/compatible
fn parse_base_compatible() -> Result<Model> {
let base_compatible = match fs::read_to_string("/sys/firmware/devicetree/base/compatible") {
Ok(buffer) => buffer,
Err(_) => return Err(Error::UnknownModel),
};
// Based on /arch/arm/boot/dts/ and /Documentation/devicetree/bindings/arm/bcm/
for comp_id in base_compatible.split('\0') {
let model = match comp_id {
"raspberrypi,model-b-i2c0" => Model::RaspberryPiBRev1,
"raspberrypi,model-b" => Model::RaspberryPiBRev1,
"raspberrypi,model-a" => Model::RaspberryPiA,
"raspberrypi,model-b-rev2" => Model::RaspberryPiBRev2,
"raspberrypi,model-a-plus" => Model::RaspberryPiAPlus,
"raspberrypi,model-b-plus" => Model::RaspberryPiBPlus,
"raspberrypi,2-model-b" => Model::RaspberryPi2B,
"raspberrypi,compute-module" => Model::RaspberryPiComputeModule,
"raspberrypi,3-model-b" => Model::RaspberryPi3B,
"raspberrypi,model-zero" => Model::RaspberryPiZero,
"raspberrypi,3-compute-module" => Model::RaspberryPiComputeModule3,
"raspberrypi,3-compute-module-plus" => Model::RaspberryPiComputeModule3Plus,
"raspberrypi,model-zero-w" => Model::RaspberryPiZeroW,
"raspberrypi,3-model-b-plus" => Model::RaspberryPi3BPlus,
"raspberrypi,3-model-a-plus" => Model::RaspberryPi3APlus,
"raspberrypi,4-model-b" => Model::RaspberryPi4B,
"raspberrypi,400" => Model::RaspberryPi400,
"raspberrypi,4-compute-module" => Model::RaspberryPiComputeModule4,
_ => continue,
};
return Ok(model);
}
Err(Error::UnknownModel)
}
// Identify Pi model based on /sys/firmware/devicetree/base/model
fn parse_base_model() -> Result<Model> {
let mut base_model = match fs::read_to_string("/sys/firmware/devicetree/base/model") {
Ok(mut buffer) => {
if let Some(idx) = buffer.find('\0') {
buffer.truncate(idx);
buffer
} else {
buffer
}
}
Err(_) => return Err(Error::UnknownModel),
};
// Check if this is a Pi B rev 2 before we remove the revision part, assuming the
// PCB Revision numbers on https://elinux.org/RPi_HardwareHistory are correct, and
// the installed distro appends the revision to the model name.
match &base_model[..] {
"Raspberry Pi Model B Rev 2.0" => return Ok(Model::RaspberryPiBRev2),
"Raspberry Pi Model B rev2 Rev 2.0" => return Ok(Model::RaspberryPiBRev2),
_ => (),
}
if let Some(idx) = base_model.find(" Rev ") {
base_model.truncate(idx);
}
// Based on /arch/arm/boot/dts/ and /Documentation/devicetree/bindings/arm/bcm/
let model = match &base_model[..] {
"Raspberry Pi Model B (no P5)" => Model::RaspberryPiBRev1,
"Raspberry Pi Model B" => Model::RaspberryPiBRev1,
"Raspberry Pi Model A" => Model::RaspberryPiA,
"Raspberry Pi Model B rev2" => Model::RaspberryPiBRev2,
"Raspberry Pi Model A+" => Model::RaspberryPiAPlus,
"Raspberry Pi Model A Plus" => Model::RaspberryPiAPlus,
"Raspberry Pi Model B+" => Model::RaspberryPiBPlus,
"Raspberry Pi Model B Plus" => Model::RaspberryPiBPlus,
"Raspberry Pi 2 Model B" => Model::RaspberryPi2B,
"Raspberry Pi Compute Module" => Model::RaspberryPiComputeModule,
"Raspberry Pi 3 Model B" => Model::RaspberryPi3B,
"Raspberry Pi Zero" => Model::RaspberryPiZero,
"Raspberry Pi Compute Module 3" => Model::RaspberryPiComputeModule3,
"Raspberry Pi Compute Module 3 Plus" => Model::RaspberryPiComputeModule3Plus,
"Raspberry Pi Zero W" => Model::RaspberryPiZeroW,
"Raspberry Pi 3 Model B+" => Model::RaspberryPi3BPlus,
"Raspberry Pi 3 Model B Plus" => Model::RaspberryPi3BPlus,
"Raspberry Pi 3 Model A Plus" => Model::RaspberryPi3APlus,
"Raspberry Pi 4 Model B" => Model::RaspberryPi4B,
"Raspberry Pi 400" => Model::RaspberryPi400,
"Raspberry Pi Compute Module 4" => Model::RaspberryPiComputeModule4,
_ => return Err(Error::UnknownModel),
};
Ok(model)
}
/// Retrieves Raspberry Pi device information.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct DeviceInfo {
model: Model,
soc: SoC,
peripheral_base: u32,
gpio_offset: u32,
}
impl DeviceInfo {
/// Constructs a new `DeviceInfo`.
///
/// `new` attempts to identify the Raspberry Pi's model and SoC based on
/// the contents of `/proc/cpuinfo`, `/sys/firmware/devicetree/base/compatible`
/// and `/sys/firmware/devicetree/base/model`.
pub fn new() -> Result<DeviceInfo> {
// Parse order from most-detailed to least-detailed info
let model = parse_proc_cpuinfo()
.or_else(|_| parse_base_compatible().or_else(|_| parse_base_model()))?;
// Set SoC and memory offsets based on model
match model {
Model::RaspberryPiA
| Model::RaspberryPiAPlus
| Model::RaspberryPiBRev1
| Model::RaspberryPiBRev2
| Model::RaspberryPiBPlus
| Model::RaspberryPiComputeModule
| Model::RaspberryPiZero
| Model::RaspberryPiZeroW => Ok(DeviceInfo {
model,
soc: SoC::Bcm2835,
peripheral_base: PERIPHERAL_BASE_RPI,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi2B => Ok(DeviceInfo {
model,
soc: SoC::Bcm2836,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi3B | Model::RaspberryPiComputeModule3 => Ok(DeviceInfo {
model,
soc: SoC::Bcm2837A1,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi3BPlus
| Model::RaspberryPi3APlus
| Model::RaspberryPiComputeModule3Plus => Ok(DeviceInfo {
model,
soc: SoC::Bcm2837B0,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi4B | Model::RaspberryPi400 | Model::RaspberryPiComputeModule4 => {
Ok(DeviceInfo {
model,
soc: SoC::Bcm2711,
peripheral_base: PERIPHERAL_BASE_RPI4,
gpio_offset: GPIO_OFFSET,
})
}
}
}
/// Returns the Raspberry Pi's model.
pub fn model(&self) -> Model {
self.model
}
/// Returns the Raspberry Pi's SoC.
pub fn soc(&self) -> SoC {
self.soc
}
/// Returns the base memory address for the BCM283x peripherals.
pub(crate) fn peripheral_base(&self) -> u32 {
self.peripheral_base
}
/// Returns the offset from the base memory address for the GPIO section.
pub(crate) fn gpio_offset(&self) -> u32 {
self.gpio_offset
}
}
Refactor code based on clippy suggestions
// Copyright (c) 2017-2021 Rene van der Meer
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Raspberry Pi system-related tools.
//!
//! Use [`DeviceInfo`] to identify the Raspberry Pi's model and SoC.
//!
//! [`DeviceInfo`]: struct.DeviceInfo.html
use std::error;
use std::fmt;
use std::fs;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::result;
const PERIPHERAL_BASE_RPI: u32 = 0x2000_0000;
const PERIPHERAL_BASE_RPI2: u32 = 0x3f00_0000;
const PERIPHERAL_BASE_RPI4: u32 = 0xfe00_0000;
const GPIO_OFFSET: u32 = 0x20_0000;
/// Errors that can occur when trying to identify the Raspberry Pi hardware.
#[derive(Debug)]
pub enum Error {
/// Unknown model.
///
/// `DeviceInfo` was unable to identify the Raspberry Pi model based on the
/// contents of `/proc/cpuinfo`, `/sys/firmware/devicetree/base/compatible`
/// and `/sys/firmware/devicetree/base/model`.
///
/// Support for new models is usually added shortly after they are officially
/// announced and available to the public. Make sure you're using the latest
/// release of RPPAL.
///
/// You may also encounter this error if your Linux distribution
/// doesn't provide any of the common user-accessible system files
/// that are used to identify the model and SoC.
UnknownModel,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::UnknownModel => write!(f, "Unknown Raspberry Pi model"),
}
}
}
impl error::Error for Error {}
/// Result type returned from methods that can have `system::Error`s.
pub type Result<T> = result::Result<T, Error>;
/// Identifiable Raspberry Pi models.
///
/// `Model` might be extended with additional variants in a minor or
/// patch revision, and must not be exhaustively matched against.
/// Instead, add a `_` catch-all arm to match future variants.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[non_exhaustive]
pub enum Model {
RaspberryPiA,
RaspberryPiAPlus,
RaspberryPiBRev1,
RaspberryPiBRev2,
RaspberryPiBPlus,
RaspberryPi2B,
RaspberryPi3APlus,
RaspberryPi3B,
RaspberryPi3BPlus,
RaspberryPi4B,
RaspberryPi400,
RaspberryPiComputeModule,
RaspberryPiComputeModule3,
RaspberryPiComputeModule3Plus,
RaspberryPiComputeModule4,
RaspberryPiZero,
RaspberryPiZeroW,
}
impl fmt::Display for Model {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Model::RaspberryPiA => write!(f, "Raspberry Pi A"),
Model::RaspberryPiAPlus => write!(f, "Raspberry Pi A+"),
Model::RaspberryPiBRev1 => write!(f, "Raspberry Pi B Rev 1"),
Model::RaspberryPiBRev2 => write!(f, "Raspberry Pi B Rev 2"),
Model::RaspberryPiBPlus => write!(f, "Raspberry Pi B+"),
Model::RaspberryPi2B => write!(f, "Raspberry Pi 2 B"),
Model::RaspberryPi3B => write!(f, "Raspberry Pi 3 B"),
Model::RaspberryPi3BPlus => write!(f, "Raspberry Pi 3 B+"),
Model::RaspberryPi3APlus => write!(f, "Raspberry Pi 3 A+"),
Model::RaspberryPi4B => write!(f, "Raspberry Pi 4 B"),
Model::RaspberryPi400 => write!(f, "Raspberry Pi 400"),
Model::RaspberryPiComputeModule => write!(f, "Raspberry Pi Compute Module"),
Model::RaspberryPiComputeModule3 => write!(f, "Raspberry Pi Compute Module 3"),
Model::RaspberryPiComputeModule3Plus => write!(f, "Raspberry Pi Compute Module 3+"),
Model::RaspberryPiComputeModule4 => write!(f, "Raspberry Pi Compute Module 4"),
Model::RaspberryPiZero => write!(f, "Raspberry Pi Zero"),
Model::RaspberryPiZeroW => write!(f, "Raspberry Pi Zero W"),
}
}
}
/// Identifiable Raspberry Pi SoCs.
///
/// `SoC` might be extended with additional variants in a minor or
/// patch revision, and must not be exhaustively matched against.
/// Instead, add a `_` catch-all arm to match future variants.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[non_exhaustive]
pub enum SoC {
Bcm2835,
Bcm2836,
Bcm2837A1,
Bcm2837B0,
Bcm2711,
}
impl fmt::Display for SoC {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SoC::Bcm2835 => write!(f, "BCM2835"),
SoC::Bcm2836 => write!(f, "BCM2836"),
SoC::Bcm2837A1 => write!(f, "BCM2837A1"),
SoC::Bcm2837B0 => write!(f, "BCM2837B0"),
SoC::Bcm2711 => write!(f, "BCM2711"),
}
}
}
// Identify Pi model based on /proc/cpuinfo
fn parse_proc_cpuinfo() -> Result<Model> {
let proc_cpuinfo = BufReader::new(match File::open("/proc/cpuinfo") {
Ok(file) => file,
Err(_) => return Err(Error::UnknownModel),
});
let mut hardware: String = String::new();
let mut revision: String = String::new();
for line in proc_cpuinfo.lines().flatten() {
if let Some(line_value) = line.strip_prefix("Hardware\t: ") {
hardware = String::from(line_value);
} else if let Some(line_value) = line.strip_prefix("Revision\t: ") {
revision = String::from(line_value).to_lowercase();
}
}
// Return an error if we don't recognize the SoC. This check is
// done to prevent accidentally identifying a non-Pi SBC as a Pi
// solely based on the revision field.
match &hardware[..] {
"BCM2708" | "BCM2835" | "BCM2709" | "BCM2836" | "BCM2710" | "BCM2837" | "BCM2837A1"
| "BCM2837B0" | "BCM2711" => {}
_ => return Err(Error::UnknownModel),
}
let model = if (revision.len() == 4) || (revision.len() == 8) {
// Older revisions are 4 characters long, or 8 if they've been over-volted
match &revision[revision.len() - 4..] {
"0007" | "0008" | "0009" | "0015" => Model::RaspberryPiA,
"beta" | "0002" | "0003" => Model::RaspberryPiBRev1,
"0004" | "0005" | "0006" | "000d" | "000e" | "000f" => Model::RaspberryPiBRev2,
"0012" => Model::RaspberryPiAPlus,
"0010" | "0013" => Model::RaspberryPiBPlus,
"0011" | "0014" => Model::RaspberryPiComputeModule,
_ => return Err(Error::UnknownModel),
}
} else if revision.len() >= 6 {
// Newer revisions consist of at least 6 characters
match &revision[..] {
"900021" => Model::RaspberryPiAPlus,
"900032" => Model::RaspberryPiBPlus,
"a01040" | "a01041" | "a21041" | "a22042" => Model::RaspberryPi2B,
"a02082" | "a22082" | "a32082" | "a52082" => Model::RaspberryPi3B,
"900092" | "900093" | "920092" | "920093" => Model::RaspberryPiZero,
"a020a0" | "a220a0" => Model::RaspberryPiComputeModule3,
"9000c1" => Model::RaspberryPiZeroW,
"a020d3" => Model::RaspberryPi3BPlus,
"9020e0" => Model::RaspberryPi3APlus,
"a02100" => Model::RaspberryPiComputeModule3Plus,
"a03111" | "a03112" | "b03111" | "b03112" | "b03114" | "c03111" | "c03112"
| "c03114" | "d03114" => Model::RaspberryPi4B,
"c03130" => Model::RaspberryPi400,
"a03140" | "b03140" => Model::RaspberryPiComputeModule4,
_ => return Err(Error::UnknownModel),
}
} else {
return Err(Error::UnknownModel);
};
Ok(model)
}
// Identify Pi model based on /sys/firmware/devicetree/base/compatible
fn parse_base_compatible() -> Result<Model> {
let base_compatible = match fs::read_to_string("/sys/firmware/devicetree/base/compatible") {
Ok(buffer) => buffer,
Err(_) => return Err(Error::UnknownModel),
};
// Based on /arch/arm/boot/dts/ and /Documentation/devicetree/bindings/arm/bcm/
for comp_id in base_compatible.split('\0') {
let model = match comp_id {
"raspberrypi,model-b-i2c0" => Model::RaspberryPiBRev1,
"raspberrypi,model-b" => Model::RaspberryPiBRev1,
"raspberrypi,model-a" => Model::RaspberryPiA,
"raspberrypi,model-b-rev2" => Model::RaspberryPiBRev2,
"raspberrypi,model-a-plus" => Model::RaspberryPiAPlus,
"raspberrypi,model-b-plus" => Model::RaspberryPiBPlus,
"raspberrypi,2-model-b" => Model::RaspberryPi2B,
"raspberrypi,compute-module" => Model::RaspberryPiComputeModule,
"raspberrypi,3-model-b" => Model::RaspberryPi3B,
"raspberrypi,model-zero" => Model::RaspberryPiZero,
"raspberrypi,3-compute-module" => Model::RaspberryPiComputeModule3,
"raspberrypi,3-compute-module-plus" => Model::RaspberryPiComputeModule3Plus,
"raspberrypi,model-zero-w" => Model::RaspberryPiZeroW,
"raspberrypi,3-model-b-plus" => Model::RaspberryPi3BPlus,
"raspberrypi,3-model-a-plus" => Model::RaspberryPi3APlus,
"raspberrypi,4-model-b" => Model::RaspberryPi4B,
"raspberrypi,400" => Model::RaspberryPi400,
"raspberrypi,4-compute-module" => Model::RaspberryPiComputeModule4,
_ => continue,
};
return Ok(model);
}
Err(Error::UnknownModel)
}
// Identify Pi model based on /sys/firmware/devicetree/base/model
fn parse_base_model() -> Result<Model> {
let mut base_model = match fs::read_to_string("/sys/firmware/devicetree/base/model") {
Ok(mut buffer) => {
if let Some(idx) = buffer.find('\0') {
buffer.truncate(idx);
buffer
} else {
buffer
}
}
Err(_) => return Err(Error::UnknownModel),
};
// Check if this is a Pi B rev 2 before we remove the revision part, assuming the
// PCB Revision numbers on https://elinux.org/RPi_HardwareHistory are correct, and
// the installed distro appends the revision to the model name.
match &base_model[..] {
"Raspberry Pi Model B Rev 2.0" => return Ok(Model::RaspberryPiBRev2),
"Raspberry Pi Model B rev2 Rev 2.0" => return Ok(Model::RaspberryPiBRev2),
_ => (),
}
if let Some(idx) = base_model.find(" Rev ") {
base_model.truncate(idx);
}
// Based on /arch/arm/boot/dts/ and /Documentation/devicetree/bindings/arm/bcm/
let model = match &base_model[..] {
"Raspberry Pi Model B (no P5)" => Model::RaspberryPiBRev1,
"Raspberry Pi Model B" => Model::RaspberryPiBRev1,
"Raspberry Pi Model A" => Model::RaspberryPiA,
"Raspberry Pi Model B rev2" => Model::RaspberryPiBRev2,
"Raspberry Pi Model A+" => Model::RaspberryPiAPlus,
"Raspberry Pi Model A Plus" => Model::RaspberryPiAPlus,
"Raspberry Pi Model B+" => Model::RaspberryPiBPlus,
"Raspberry Pi Model B Plus" => Model::RaspberryPiBPlus,
"Raspberry Pi 2 Model B" => Model::RaspberryPi2B,
"Raspberry Pi Compute Module" => Model::RaspberryPiComputeModule,
"Raspberry Pi 3 Model B" => Model::RaspberryPi3B,
"Raspberry Pi Zero" => Model::RaspberryPiZero,
"Raspberry Pi Compute Module 3" => Model::RaspberryPiComputeModule3,
"Raspberry Pi Compute Module 3 Plus" => Model::RaspberryPiComputeModule3Plus,
"Raspberry Pi Zero W" => Model::RaspberryPiZeroW,
"Raspberry Pi 3 Model B+" => Model::RaspberryPi3BPlus,
"Raspberry Pi 3 Model B Plus" => Model::RaspberryPi3BPlus,
"Raspberry Pi 3 Model A Plus" => Model::RaspberryPi3APlus,
"Raspberry Pi 4 Model B" => Model::RaspberryPi4B,
"Raspberry Pi 400" => Model::RaspberryPi400,
"Raspberry Pi Compute Module 4" => Model::RaspberryPiComputeModule4,
_ => return Err(Error::UnknownModel),
};
Ok(model)
}
/// Retrieves Raspberry Pi device information.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct DeviceInfo {
model: Model,
soc: SoC,
peripheral_base: u32,
gpio_offset: u32,
}
impl DeviceInfo {
/// Constructs a new `DeviceInfo`.
///
/// `new` attempts to identify the Raspberry Pi's model and SoC based on
/// the contents of `/proc/cpuinfo`, `/sys/firmware/devicetree/base/compatible`
/// and `/sys/firmware/devicetree/base/model`.
pub fn new() -> Result<DeviceInfo> {
// Parse order from most-detailed to least-detailed info
let model = parse_proc_cpuinfo()
.or_else(|_| parse_base_compatible().or_else(|_| parse_base_model()))?;
// Set SoC and memory offsets based on model
match model {
Model::RaspberryPiA
| Model::RaspberryPiAPlus
| Model::RaspberryPiBRev1
| Model::RaspberryPiBRev2
| Model::RaspberryPiBPlus
| Model::RaspberryPiComputeModule
| Model::RaspberryPiZero
| Model::RaspberryPiZeroW => Ok(DeviceInfo {
model,
soc: SoC::Bcm2835,
peripheral_base: PERIPHERAL_BASE_RPI,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi2B => Ok(DeviceInfo {
model,
soc: SoC::Bcm2836,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi3B | Model::RaspberryPiComputeModule3 => Ok(DeviceInfo {
model,
soc: SoC::Bcm2837A1,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi3BPlus
| Model::RaspberryPi3APlus
| Model::RaspberryPiComputeModule3Plus => Ok(DeviceInfo {
model,
soc: SoC::Bcm2837B0,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi4B | Model::RaspberryPi400 | Model::RaspberryPiComputeModule4 => {
Ok(DeviceInfo {
model,
soc: SoC::Bcm2711,
peripheral_base: PERIPHERAL_BASE_RPI4,
gpio_offset: GPIO_OFFSET,
})
}
}
}
/// Returns the Raspberry Pi's model.
pub fn model(&self) -> Model {
self.model
}
/// Returns the Raspberry Pi's SoC.
pub fn soc(&self) -> SoC {
self.soc
}
/// Returns the base memory address for the BCM283x peripherals.
pub(crate) fn peripheral_base(&self) -> u32 {
self.peripheral_base
}
/// Returns the offset from the base memory address for the GPIO section.
pub(crate) fn gpio_offset(&self) -> u32 {
self.gpio_offset
}
}
|
// Copyright (c) 2017-2018 Rene van der Meer
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Raspberry Pi system-related tools.
//!
//! Use [`DeviceInfo`] to identify what Raspberry Pi model and SoC the software is
//! running on. This information is used internally to calculate the correct memory
//! locations for the various BCM283x peripherals.
//!
//! [`DeviceInfo`]: struct.DeviceInfo.html
use std::fmt;
use std::fs;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::result;
const PERIPHERAL_BASE_RPI: u32 = 0x2000_0000;
const PERIPHERAL_BASE_RPI2: u32 = 0x3f00_0000;
const GPIO_OFFSET: u32 = 0x20_0000;
quick_error! {
/// Errors that can occur when trying to identify the Raspberry Pi hardware.
#[derive(Debug)]
pub enum Error {
/// Unknown model.
///
/// `DeviceInfo` was unable to identify the Raspberry Pi model based on the
/// contents of `/proc/cpuinfo`, `/sys/firmware/devicetree/base/compatible`
/// and `/sys/firmware/devicetree/base/model`.
UnknownModel { description("unknown Raspberry Pi model") }
}
}
/// Result type returned from methods that can have `system::Error`s.
pub type Result<T> = result::Result<T, Error>;
/// Identifiable Raspberry Pi models.
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum Model {
RaspberryPiA,
RaspberryPiAPlus,
RaspberryPiB,
RaspberryPiBRev1,
RaspberryPiBRev2,
RaspberryPiBPlus,
RaspberryPi2B,
RaspberryPi3B,
RaspberryPi3BPlus,
RaspberryPiComputeModule,
RaspberryPiComputeModule3,
RaspberryPiZero,
RaspberryPiZeroW,
}
impl fmt::Display for Model {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Model::RaspberryPiA => write!(f, "Raspberry Pi A"),
Model::RaspberryPiAPlus => write!(f, "Raspberry Pi A+"),
Model::RaspberryPiB => write!(f, "Raspberry Pi B"),
Model::RaspberryPiBRev1 => write!(f, "Raspberry Pi B Rev 1"),
Model::RaspberryPiBRev2 => write!(f, "Raspberry Pi B Rev 2"),
Model::RaspberryPiBPlus => write!(f, "Raspberry Pi B+"),
Model::RaspberryPi2B => write!(f, "Raspberry Pi 2 B"),
Model::RaspberryPi3B => write!(f, "Raspberry Pi 3 B"),
Model::RaspberryPi3BPlus => write!(f, "Raspberry Pi 3 B+"),
Model::RaspberryPiComputeModule => write!(f, "Raspberry Pi Compute Module"),
Model::RaspberryPiComputeModule3 => write!(f, "Raspberry Pi Compute Module 3"),
Model::RaspberryPiZero => write!(f, "Raspberry Pi Zero"),
Model::RaspberryPiZeroW => write!(f, "Raspberry Pi Zero W"),
}
}
}
/// Identifiable Raspberry Pi SoCs.
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum SoC {
Bcm2835,
Bcm2836,
Bcm2837A1,
Bcm2837B0,
}
impl fmt::Display for SoC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
SoC::Bcm2835 => write!(f, "BCM2835"),
SoC::Bcm2836 => write!(f, "BCM2836"),
SoC::Bcm2837A1 => write!(f, "BCM2837A1"),
SoC::Bcm2837B0 => write!(f, "BCM2837B0"),
}
}
}
// Identify Pi model based on /proc/cpuinfo
fn parse_proc_cpuinfo() -> Result<Model> {
let proc_cpuinfo = BufReader::new(match File::open("/proc/cpuinfo") {
Err(_) => return Err(Error::UnknownModel),
Ok(file) => file,
});
let mut hardware: String = String::new();
let mut revision: String = String::new();
for line_result in proc_cpuinfo.lines() {
if let Ok(line) = line_result {
if line.starts_with("Hardware\t: ") {
hardware = String::from(&line[11..]);
} else if line.starts_with("Revision\t: ") {
revision = String::from(&line[11..]).to_lowercase();
}
}
}
// Return an error if we don't recognize the SoC. This check is
// done to prevent accidentally identifying a non-Pi SBC as a Pi
// solely based on the revision field.
match &hardware[..] {
"BCM2708" | "BCM2835" | "BCM2709" | "BCM2836" | "BCM2710" | "BCM2837" | "BCM2837A1"
| "BCM2837B0" => {}
_ => return Err(Error::UnknownModel),
}
let model = if (revision.len() == 4) || (revision.len() == 8) {
// Older revisions are 4 characters long, or 8 if they've been over-volted
match &revision[revision.len() - 4..] {
"0007" | "0008" | "0009" | "0015" => Model::RaspberryPiA,
"Beta" | "0002" | "0003" => Model::RaspberryPiBRev1,
"0004" | "0005" | "0006" | "000d" | "000e" | "000f" => Model::RaspberryPiBRev2,
"0012" => Model::RaspberryPiAPlus,
"0010" | "0013" => Model::RaspberryPiBPlus,
"0011" | "0014" => Model::RaspberryPiComputeModule,
_ => return Err(Error::UnknownModel),
}
} else if revision.len() >= 6 {
// Newer revisions consist of at least 6 characters
match &revision[revision.len() - 3..revision.len() - 1] {
"00" => Model::RaspberryPiA,
"01" => Model::RaspberryPiBRev2,
"02" => Model::RaspberryPiAPlus,
"03" => Model::RaspberryPiBPlus,
"04" => Model::RaspberryPi2B,
"06" => Model::RaspberryPiComputeModule,
"08" => Model::RaspberryPi3B,
"09" => Model::RaspberryPiZero,
"0a" => Model::RaspberryPiComputeModule3,
"0c" => Model::RaspberryPiZeroW,
"0d" => Model::RaspberryPi3BPlus,
_ => return Err(Error::UnknownModel),
}
} else {
return Err(Error::UnknownModel);
};
Ok(model)
}
// Identify Pi model based on /sys/firmware/devicetree/base/compatible
fn parse_firmware_compatible() -> Result<Model> {
let compatible = match fs::read_to_string("/sys/firmware/devicetree/base/compatible") {
Err(_) => return Err(Error::UnknownModel),
Ok(s) => s,
};
// Based on /arch/arm/boot/dts/ and /Documentation/devicetree/bindings/arm/bcm/
for compid in compatible.split("\0") {
match compid {
"raspberrypi,model-b-i2c0" => return Ok(Model::RaspberryPiBRev1),
"raspberrypi,model-b" => return Ok(Model::RaspberryPiB),
"raspberrypi,model-a" => return Ok(Model::RaspberryPiA),
"raspberrypi,model-b-rev2" => return Ok(Model::RaspberryPiBRev2),
"raspberrypi,model-a-plus" => return Ok(Model::RaspberryPiAPlus),
"raspberrypi,model-b-plus" => return Ok(Model::RaspberryPiBPlus),
"raspberrypi,2-model-b" => return Ok(Model::RaspberryPi2B),
"raspberrypi,compute-module" => return Ok(Model::RaspberryPiComputeModule),
"raspberrypi,3-model-b" => return Ok(Model::RaspberryPi3B),
"raspberrypi,model-zero" => return Ok(Model::RaspberryPiZero),
"raspberrypi,3-compute-module" => return Ok(Model::RaspberryPiComputeModule3),
"raspberrypi,model-zero-w" => return Ok(Model::RaspberryPiZeroW),
"raspberrypi,3-model-b-plus" => return Ok(Model::RaspberryPi3BPlus),
_ => (),
}
}
Err(Error::UnknownModel)
}
// Identify Pi model based on /sys/firmware/devicetree/base/model
fn parse_firmware_model() -> Result<Model> {
let model = match fs::read_to_string("/sys/firmware/devicetree/base/model") {
Err(_) => return Err(Error::UnknownModel),
Ok(buffer) => if let Some(rev_idx) = buffer.find(" Rev ") {
// We don't want to strip rev2 here
buffer[0..rev_idx].to_owned()
} else {
buffer
},
};
// Based on /arch/arm/boot/dts/ and /Documentation/devicetree/bindings/arm/bcm/
match &model[..] {
"Raspberry Pi Model B (no P5)" => return Ok(Model::RaspberryPiBRev1),
"Raspberry Pi Model B" => return Ok(Model::RaspberryPiB),
"Raspberry Pi Model A" => return Ok(Model::RaspberryPiA),
"Raspberry Pi Model B rev2" => return Ok(Model::RaspberryPiBRev2),
"Raspberry Pi Model A+" => return Ok(Model::RaspberryPiAPlus),
"Raspberry Pi Model B+" => return Ok(Model::RaspberryPiBPlus),
"Raspberry Pi 2 Model B" => return Ok(Model::RaspberryPi2B),
"Raspberry Pi Compute Module" => return Ok(Model::RaspberryPiComputeModule),
"Raspberry Pi 3 Model B" => return Ok(Model::RaspberryPi3B),
"Raspberry Pi Zero" => return Ok(Model::RaspberryPiZero),
"Raspberry Pi Compute Module 3" => return Ok(Model::RaspberryPiComputeModule3),
"Raspberry Pi Zero W" => return Ok(Model::RaspberryPiZeroW),
"Raspberry Pi 3 Model B+" => return Ok(Model::RaspberryPi3BPlus),
_ => return Err(Error::UnknownModel),
}
}
/// Retrieves Raspberry Pi device information.
#[derive(Debug, PartialEq, Copy, Clone)]
pub struct DeviceInfo {
model: Model,
soc: SoC,
peripheral_base: u32,
gpio_offset: u32,
}
impl DeviceInfo {
/// Constructs a new `DeviceInfo`.
///
/// `new` parses the contents of `/proc/cpuinfo` to identify the Raspberry
/// Pi's model and SoC.
pub fn new() -> Result<DeviceInfo> {
let model =
parse_proc_cpuinfo().or(parse_firmware_compatible().or(parse_firmware_model()))?;
// Set SoC and memory offsets based on model
match model {
Model::RaspberryPiA
| Model::RaspberryPiAPlus
| Model::RaspberryPiB
| Model::RaspberryPiBRev1
| Model::RaspberryPiBRev2
| Model::RaspberryPiBPlus
| Model::RaspberryPiComputeModule
| Model::RaspberryPiZero
| Model::RaspberryPiZeroW => Ok(DeviceInfo {
model,
soc: SoC::Bcm2835,
peripheral_base: PERIPHERAL_BASE_RPI,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi2B => Ok(DeviceInfo {
model,
soc: SoC::Bcm2836,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi3B | Model::RaspberryPiComputeModule3 => Ok(DeviceInfo {
model,
soc: SoC::Bcm2837A1,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi3BPlus => Ok(DeviceInfo {
model,
soc: SoC::Bcm2837B0,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
}
}
/// Returns the Raspberry Pi's model.
pub fn model(&self) -> Model {
self.model
}
/// Returns the Raspberry Pi's SoC.
pub fn soc(&self) -> SoC {
self.soc
}
/// Returns the base memory address for the BCM283x peripherals.
pub fn peripheral_base(&self) -> u32 {
self.peripheral_base
}
/// Returns the offset from the base memory address for the GPIO section.
pub fn gpio_offset(&self) -> u32 {
self.gpio_offset
}
}
Identify all of the early Pi B boards as Rev1
// Copyright (c) 2017-2018 Rene van der Meer
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Raspberry Pi system-related tools.
//!
//! Use [`DeviceInfo`] to identify what Raspberry Pi model and SoC the software is
//! running on. This information is used internally to calculate the correct memory
//! locations for the various BCM283x peripherals.
//!
//! [`DeviceInfo`]: struct.DeviceInfo.html
use std::fmt;
use std::fs;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::result;
const PERIPHERAL_BASE_RPI: u32 = 0x2000_0000;
const PERIPHERAL_BASE_RPI2: u32 = 0x3f00_0000;
const GPIO_OFFSET: u32 = 0x20_0000;
quick_error! {
/// Errors that can occur when trying to identify the Raspberry Pi hardware.
#[derive(Debug)]
pub enum Error {
/// Unknown model.
///
/// `DeviceInfo` was unable to identify the Raspberry Pi model based on the
/// contents of `/proc/cpuinfo`, `/sys/firmware/devicetree/base/compatible`
/// and `/sys/firmware/devicetree/base/model`.
UnknownModel { description("unknown Raspberry Pi model") }
}
}
/// Result type returned from methods that can have `system::Error`s.
pub type Result<T> = result::Result<T, Error>;
/// Identifiable Raspberry Pi models.
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum Model {
RaspberryPiA,
RaspberryPiAPlus,
RaspberryPiBRev1,
RaspberryPiBRev2,
RaspberryPiBPlus,
RaspberryPi2B,
RaspberryPi3B,
RaspberryPi3BPlus,
RaspberryPiComputeModule,
RaspberryPiComputeModule3,
RaspberryPiZero,
RaspberryPiZeroW,
}
impl fmt::Display for Model {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Model::RaspberryPiA => write!(f, "Raspberry Pi A"),
Model::RaspberryPiAPlus => write!(f, "Raspberry Pi A+"),
Model::RaspberryPiBRev1 => write!(f, "Raspberry Pi B Rev 1"),
Model::RaspberryPiBRev2 => write!(f, "Raspberry Pi B Rev 2"),
Model::RaspberryPiBPlus => write!(f, "Raspberry Pi B+"),
Model::RaspberryPi2B => write!(f, "Raspberry Pi 2 B"),
Model::RaspberryPi3B => write!(f, "Raspberry Pi 3 B"),
Model::RaspberryPi3BPlus => write!(f, "Raspberry Pi 3 B+"),
Model::RaspberryPiComputeModule => write!(f, "Raspberry Pi Compute Module"),
Model::RaspberryPiComputeModule3 => write!(f, "Raspberry Pi Compute Module 3"),
Model::RaspberryPiZero => write!(f, "Raspberry Pi Zero"),
Model::RaspberryPiZeroW => write!(f, "Raspberry Pi Zero W"),
}
}
}
/// Identifiable Raspberry Pi SoCs.
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum SoC {
Bcm2835,
Bcm2836,
Bcm2837A1,
Bcm2837B0,
}
impl fmt::Display for SoC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
SoC::Bcm2835 => write!(f, "BCM2835"),
SoC::Bcm2836 => write!(f, "BCM2836"),
SoC::Bcm2837A1 => write!(f, "BCM2837A1"),
SoC::Bcm2837B0 => write!(f, "BCM2837B0"),
}
}
}
// Identify Pi model based on /proc/cpuinfo
fn parse_proc_cpuinfo() -> Result<Model> {
let proc_cpuinfo = BufReader::new(match File::open("/proc/cpuinfo") {
Err(_) => return Err(Error::UnknownModel),
Ok(file) => file,
});
let mut hardware: String = String::new();
let mut revision: String = String::new();
for line_result in proc_cpuinfo.lines() {
if let Ok(line) = line_result {
if line.starts_with("Hardware\t: ") {
hardware = String::from(&line[11..]);
} else if line.starts_with("Revision\t: ") {
revision = String::from(&line[11..]).to_lowercase();
}
}
}
// Return an error if we don't recognize the SoC. This check is
// done to prevent accidentally identifying a non-Pi SBC as a Pi
// solely based on the revision field.
match &hardware[..] {
"BCM2708" | "BCM2835" | "BCM2709" | "BCM2836" | "BCM2710" | "BCM2837" | "BCM2837A1"
| "BCM2837B0" => {}
_ => return Err(Error::UnknownModel),
}
let model = if (revision.len() == 4) || (revision.len() == 8) {
// Older revisions are 4 characters long, or 8 if they've been over-volted
match &revision[revision.len() - 4..] {
"0007" | "0008" | "0009" | "0015" => Model::RaspberryPiA,
"Beta" | "0002" | "0003" => Model::RaspberryPiBRev1,
"0004" | "0005" | "0006" | "000d" | "000e" | "000f" => Model::RaspberryPiBRev2,
"0012" => Model::RaspberryPiAPlus,
"0010" | "0013" => Model::RaspberryPiBPlus,
"0011" | "0014" => Model::RaspberryPiComputeModule,
_ => return Err(Error::UnknownModel),
}
} else if revision.len() >= 6 {
// Newer revisions consist of at least 6 characters
match &revision[revision.len() - 3..revision.len() - 1] {
"00" => Model::RaspberryPiA,
"01" => Model::RaspberryPiBRev2,
"02" => Model::RaspberryPiAPlus,
"03" => Model::RaspberryPiBPlus,
"04" => Model::RaspberryPi2B,
"06" => Model::RaspberryPiComputeModule,
"08" => Model::RaspberryPi3B,
"09" => Model::RaspberryPiZero,
"0a" => Model::RaspberryPiComputeModule3,
"0c" => Model::RaspberryPiZeroW,
"0d" => Model::RaspberryPi3BPlus,
_ => return Err(Error::UnknownModel),
}
} else {
return Err(Error::UnknownModel);
};
Ok(model)
}
// Identify Pi model based on /sys/firmware/devicetree/base/compatible
fn parse_firmware_compatible() -> Result<Model> {
let compatible = match fs::read_to_string("/sys/firmware/devicetree/base/compatible") {
Err(_) => return Err(Error::UnknownModel),
Ok(s) => s,
};
// Based on /arch/arm/boot/dts/ and /Documentation/devicetree/bindings/arm/bcm/
for compid in compatible.split("\0") {
match compid {
"raspberrypi,model-b-i2c0" => return Ok(Model::RaspberryPiBRev1),
"raspberrypi,model-b" => return Ok(Model::RaspberryPiBRev1),
"raspberrypi,model-a" => return Ok(Model::RaspberryPiA),
"raspberrypi,model-b-rev2" => return Ok(Model::RaspberryPiBRev2),
"raspberrypi,model-a-plus" => return Ok(Model::RaspberryPiAPlus),
"raspberrypi,model-b-plus" => return Ok(Model::RaspberryPiBPlus),
"raspberrypi,2-model-b" => return Ok(Model::RaspberryPi2B),
"raspberrypi,compute-module" => return Ok(Model::RaspberryPiComputeModule),
"raspberrypi,3-model-b" => return Ok(Model::RaspberryPi3B),
"raspberrypi,model-zero" => return Ok(Model::RaspberryPiZero),
"raspberrypi,3-compute-module" => return Ok(Model::RaspberryPiComputeModule3),
"raspberrypi,model-zero-w" => return Ok(Model::RaspberryPiZeroW),
"raspberrypi,3-model-b-plus" => return Ok(Model::RaspberryPi3BPlus),
_ => (),
}
}
Err(Error::UnknownModel)
}
// Identify Pi model based on /sys/firmware/devicetree/base/model
fn parse_firmware_model() -> Result<Model> {
let model = match fs::read_to_string("/sys/firmware/devicetree/base/model") {
Err(_) => return Err(Error::UnknownModel),
Ok(buffer) => if let Some(rev_idx) = buffer.find(" Rev ") {
// We don't want to strip rev2 here
buffer[0..rev_idx].to_owned()
} else {
buffer
},
};
// Based on /arch/arm/boot/dts/ and /Documentation/devicetree/bindings/arm/bcm/
match &model[..] {
"Raspberry Pi Model B (no P5)" => return Ok(Model::RaspberryPiBRev1),
"Raspberry Pi Model B" => return Ok(Model::RaspberryPiBRev1),
"Raspberry Pi Model A" => return Ok(Model::RaspberryPiA),
"Raspberry Pi Model B rev2" => return Ok(Model::RaspberryPiBRev2),
"Raspberry Pi Model A+" => return Ok(Model::RaspberryPiAPlus),
"Raspberry Pi Model B+" => return Ok(Model::RaspberryPiBPlus),
"Raspberry Pi 2 Model B" => return Ok(Model::RaspberryPi2B),
"Raspberry Pi Compute Module" => return Ok(Model::RaspberryPiComputeModule),
"Raspberry Pi 3 Model B" => return Ok(Model::RaspberryPi3B),
"Raspberry Pi Zero" => return Ok(Model::RaspberryPiZero),
"Raspberry Pi Compute Module 3" => return Ok(Model::RaspberryPiComputeModule3),
"Raspberry Pi Zero W" => return Ok(Model::RaspberryPiZeroW),
"Raspberry Pi 3 Model B+" => return Ok(Model::RaspberryPi3BPlus),
_ => return Err(Error::UnknownModel),
}
}
/// Retrieves Raspberry Pi device information.
#[derive(Debug, PartialEq, Copy, Clone)]
pub struct DeviceInfo {
model: Model,
soc: SoC,
peripheral_base: u32,
gpio_offset: u32,
}
impl DeviceInfo {
/// Constructs a new `DeviceInfo`.
///
/// `new` parses the contents of `/proc/cpuinfo` to identify the Raspberry
/// Pi's model and SoC.
pub fn new() -> Result<DeviceInfo> {
let model =
parse_proc_cpuinfo().or(parse_firmware_compatible().or(parse_firmware_model()))?;
// Set SoC and memory offsets based on model
match model {
Model::RaspberryPiA
| Model::RaspberryPiAPlus
| Model::RaspberryPiBRev1
| Model::RaspberryPiBRev2
| Model::RaspberryPiBPlus
| Model::RaspberryPiComputeModule
| Model::RaspberryPiZero
| Model::RaspberryPiZeroW => Ok(DeviceInfo {
model,
soc: SoC::Bcm2835,
peripheral_base: PERIPHERAL_BASE_RPI,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi2B => Ok(DeviceInfo {
model,
soc: SoC::Bcm2836,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi3B | Model::RaspberryPiComputeModule3 => Ok(DeviceInfo {
model,
soc: SoC::Bcm2837A1,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
Model::RaspberryPi3BPlus => Ok(DeviceInfo {
model,
soc: SoC::Bcm2837B0,
peripheral_base: PERIPHERAL_BASE_RPI2,
gpio_offset: GPIO_OFFSET,
}),
}
}
/// Returns the Raspberry Pi's model.
pub fn model(&self) -> Model {
self.model
}
/// Returns the Raspberry Pi's SoC.
pub fn soc(&self) -> SoC {
self.soc
}
/// Returns the base memory address for the BCM283x peripherals.
pub fn peripheral_base(&self) -> u32 {
self.peripheral_base
}
/// Returns the offset from the base memory address for the GPIO section.
pub fn gpio_offset(&self) -> u32 {
self.gpio_offset
}
}
|
//
// Sysinfo
//
// Copyright (c) 2015 Guillaume Gomez
//
// Once https://github.com/rust-lang/rfcs/blob/master/text/1422-pub-restricted.md
// feature gets stabilized, we can move common parts in here.
#[cfg(test)]
mod tests {
use ::{System, SystemExt};
use ::utils;
#[test]
fn test_refresh_system() {
let mut sys = System::new();
sys.refresh_system();
assert!(sys.get_total_memory() != 0);
assert!(sys.get_free_memory() != 0);
assert!(sys.get_total_memory() >= sys.get_free_memory());
assert!(sys.get_total_swap() >= sys.get_free_swap());
}
#[test]
fn test_refresh_process() {
let mut sys = System::new();
assert!(sys.refresh_process(utils::get_current_pid()));
}
}
Add another test
//
// Sysinfo
//
// Copyright (c) 2015 Guillaume Gomez
//
// Once https://github.com/rust-lang/rfcs/blob/master/text/1422-pub-restricted.md
// feature gets stabilized, we can move common parts in here.
#[cfg(test)]
mod tests {
use ::{ProcessExt, System, SystemExt};
use ::utils;
#[test]
fn test_refresh_system() {
let mut sys = System::new();
sys.refresh_system();
assert!(sys.get_total_memory() != 0);
assert!(sys.get_free_memory() != 0);
assert!(sys.get_total_memory() >= sys.get_free_memory());
assert!(sys.get_total_swap() >= sys.get_free_swap());
}
#[test]
fn test_refresh_process() {
let mut sys = System::new();
assert!(sys.refresh_process(utils::get_current_pid()));
}
#[test]
fn test_get_process() {
let mut sys = System::new();
sys.refresh_processes();
let p = sys.get_process(utils::get_current_pid()).expect("didn't find process");
assert!(p.memory() > 0);
}
}
|
use serde_json;
use op::Op;
pub struct Update {
pub rev: u64,
pub ops: Vec<Op>,
}
impl Update {
pub fn from_value(value: &serde_json::Value) -> Update {
let object = value.as_object().unwrap();
let ops = object.get("ops").unwrap().as_array().unwrap().iter().map(
|op| Op::from_value(op)
).collect();
Update {
rev: object.get("rev").unwrap().as_u64().unwrap(),
ops: ops,
}
}
}
Makes the `rev` field of an Update optional
See https://github.com/google/xi-editor/issues/237 for more detail. The
protocol doc says this field should be there, but apparently it's not.
use serde_json;
use serde_json::value::to_value;
use op::Op;
pub struct Update {
pub rev: u64,
pub ops: Vec<Op>,
}
impl Update {
pub fn from_value(value: &serde_json::Value) -> Update {
let object = value.as_object().unwrap();
let ops = object.get("ops").unwrap().as_array().unwrap().iter().map(
|op| Op::from_value(op)
).collect();
let rev = object.get("rev").unwrap_or(&to_value(0)).as_u64().unwrap();
Update {
rev: rev,
ops: ops,
}
}
}
|
use base64;
use crypto::digest::Digest;
use crypto::sha2::Sha256;
use hex::FromHex;
use pem;
use serde_json as json;
use std::{mem, thread};
use std::collections::{HashMap, HashSet};
use std::fmt::{self, Display, Formatter};
use std::net::SocketAddrV4;
use std::time::Duration;
use atomic::{Bus, Multicast, Payloads, Primary, Secondary, State, Step};
use datatype::{Config, EcuCustom, EcuManifests, Error, InstallCode, InstallOutcome,
Key, KeyType, OstreePackage, PrivateKey, RoleData, RoleMeta, RoleName,
Signature, SignatureType, TufMeta, TufSigned, Url, Util, canonicalize_json};
use http::{Client, Response};
use pacman::Credentials;
/// Uptane service to communicate with.
#[derive(Clone, Copy)]
pub enum Service {
Director,
Repo,
}
impl Display for Service {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
Service::Director => write!(f, "director"),
Service::Repo => write!(f, "repo"),
}
}
}
/// Software-over-the-air updates using Uptane verification.
pub struct Uptane {
pub director_server: Url,
pub repo_server: Url,
pub metadata_path: String,
pub persist_metadata: bool,
pub primary_ecu: String,
pub private_key: PrivateKey,
pub sig_type: SignatureType,
pub director_verifier: Verifier,
pub repo_verifier: Verifier,
pub atomic_wake_addr: SocketAddrV4,
pub atomic_msg_addr: SocketAddrV4,
pub atomic_timeout: Duration,
}
impl Uptane {
pub fn new(config: &Config) -> Result<Self, Error> {
let der_key = Util::read_file(&config.uptane.private_key_path)?;
let pub_key = Util::read_file(&config.uptane.public_key_path)?;
let mut hasher = Sha256::new();
hasher.input(&pub_key);
let mut uptane = Uptane {
director_server: config.uptane.director_server.clone(),
repo_server: config.uptane.repo_server.clone(),
metadata_path: config.uptane.metadata_path.clone(),
persist_metadata: true,
primary_ecu: config.uptane.primary_ecu_serial.clone(),
private_key: PrivateKey { keyid: hasher.result_str(), der_key: der_key },
sig_type: SignatureType::RsaSsaPss,
director_verifier: Verifier::default(),
repo_verifier: Verifier::default(),
atomic_wake_addr: *config.uptane.atomic_wake_up,
atomic_msg_addr: *config.uptane.atomic_message,
atomic_timeout: Duration::from_secs(config.uptane.atomic_timeout_sec),
};
uptane.add_root_keys(Service::Director)?;
uptane.add_root_keys(Service::Repo)?;
Ok(uptane)
}
/// Returns a URL based on the uptane service.
fn endpoint(&self, service: Service, endpoint: &str) -> Url {
match service {
Service::Director => self.director_server.join(&format!("/{}", endpoint)),
Service::Repo => self.repo_server.join(&format!("/{}", endpoint))
}
}
/// Returns the respective key verifier for an uptane service.
fn verifier(&mut self, service: Service) -> &mut Verifier {
match service {
Service::Director => &mut self.director_verifier,
Service::Repo => &mut self.repo_verifier
}
}
/// Add the keys from a service's local `root.json` metadata to its verifier.
fn add_root_keys(&mut self, service: Service) -> Result<(), Error> {
trace!("adding root keys for {}", service);
let json = Util::read_file(&format!("{}/{}/root.json", self.metadata_path, service))?;
let signed = json::from_slice::<TufSigned>(&json)?;
let data = json::from_value::<RoleData>(signed.signed)?;
for (role, meta) in data.roles.ok_or(Error::UptaneMissingRoles)? {
self.verifier(service).add_meta(role, meta)?;
}
for (id, key) in data.keys.ok_or(Error::UptaneMissingKeys)? {
self.verifier(service).add_key(id, key)?;
}
Ok(())
}
/// GET the bytes response from the given endpoint.
fn get(&mut self, client: &Client, service: Service, endpoint: &str) -> Result<Vec<u8>, Error> {
let rx = client.get(self.endpoint(service, endpoint), None);
match rx.recv().expect("couldn't GET from uptane") {
Response::Success(data) => Ok(data.body),
Response::Failed(data) => Err(data.into()),
Response::Error(err) => Err(err)
}
}
/// PUT bytes to endpoint.
fn put(&mut self, client: &Client, service: Service, endpoint: &str, bytes: Vec<u8>) -> Result<(), Error> {
let rx = client.put(self.endpoint(service, endpoint), Some(bytes));
match rx.recv().expect("couldn't PUT bytes to uptane") {
Response::Success(_) => Ok(()),
Response::Failed(data) => Err(data.into()),
Response::Error(err) => Err(err)
}
}
/// Fetch the latest role metadata from the Director service.
pub fn get_director(&mut self, client: &Client, role: RoleName) -> Result<Verified, Error> {
self.get_metadata(client, Service::Director, role)
}
/// Fetch the latest role metadata from the Repo service.
pub fn get_repo(&mut self, client: &Client, role: RoleName) -> Result<Verified, Error> {
self.get_metadata(client, Service::Repo, role)
}
/// Fetch the latest role metadata from the given service.
pub fn get_metadata(&mut self, client: &Client, service: Service, role: RoleName) -> Result<Verified, Error> {
trace!("getting {} role from {} service", role, service);
let json = self.get(client, service, &format!("{}.json", role))?;
let signed = json::from_slice::<TufSigned>(&json)?;
let mut verified = self.verifier(service).verify_signed(role, signed)?;
if verified.is_new() && self.persist_metadata {
let dir = format!("{}/{}", self.metadata_path, service);
Util::write_file(&format!("{}/{}.json", dir, role), &json)?;
Util::write_file(&format!("{}/{}.{}.json", dir, verified.new_ver, role), &json)?;
verified.json = Some(json);
}
Ok(verified)
}
/// Send a signed manifest with a list of signed objects to the Director server.
pub fn put_manifest(&mut self, client: &Client, signed: Vec<TufSigned>) -> Result<(), Error> {
let ecus = EcuManifests { primary_ecu_serial: self.primary_ecu.clone(), ecu_version_manifest: signed };
let manifest = self.private_key.sign_data(json::to_value(ecus)?, self.sig_type)?;
Ok(self.put(client, Service::Director, "manifest", json::to_vec(&manifest)?)?)
}
/// Start a transaction to install the verified targets to their respective ECUs.
pub fn install(&mut self, verified: Verified, treehub: Url, creds: Credentials) -> Result<(Vec<TufSigned>, bool), Error> {
let bus = || -> Result<Box<Bus>, Error> {
Ok(Box::new(Multicast::new(self.atomic_wake_addr, self.atomic_msg_addr)?))
};
if let Some(ref targets) = verified.data.targets {
if let Some(pkg) = primary_target(targets, &self.primary_ecu, &treehub)? {
let inst = Box::new(self.new_installer(pkg, creds));
let mut ecu = Secondary::new(self.primary_ecu.clone(), bus()?, inst, self.atomic_timeout, None);
thread::spawn(move || ecu.listen());
}
}
let mut primary = Primary::new(into_payloads(verified)?, bus()?, None, self.atomic_timeout, None);
match primary.commit() {
Ok(()) => Ok((primary.into_signed(), true)),
Err(Error::AtomicAbort(_)) |
Err(Error::AtomicTimeout) => Ok((primary.into_signed(), false)),
Err(err) => Err(err)
}
}
fn new_installer(&self, pkg: OstreePackage, creds: Credentials) -> PrimaryInstaller {
PrimaryInstaller {
pkg: pkg,
sig_type: self.sig_type,
priv_key: self.private_key.clone(),
credentials: creds
}
}
}
fn primary_target(targets: &HashMap<String, TufMeta>, primary_ecu: &str, treehub: &Url) -> Result<Option<OstreePackage>, Error> {
let primaries = targets.iter()
.map(|(refname, meta)| match meta.custom {
Some(ref custom) if custom.ecuIdentifier == primary_ecu => {
Ok(Some(OstreePackage::from_meta(meta.clone(), refname.clone(), "sha256", treehub)?))
}
Some(_) => Ok(None),
None => Err(Error::UptaneTargets("missing custom field with ecuIdentifier".into()))
})
.collect::<Result<Vec<Option<_>>, _>>()?
.into_iter()
.filter_map(|pkg| if pkg.is_some() { pkg } else { None })
.collect::<Vec<OstreePackage>>();
match primaries.len() {
0 => Ok(None),
1 => Ok(Some(primaries.into_iter().nth(0).expect("primary package"))),
_ => Err(Error::UptaneTargets("multiple primary targets".into()))
}
}
fn into_payloads(verified: Verified) -> Result<Payloads, Error> {
let json = verified.json.unwrap_or(Vec::new());
let targets = verified.data.targets.ok_or_else(|| Error::UptaneTargets("missing".into()))?;
let payloads = targets.into_iter()
.map(|(serial, _)| (serial, hashmap!{State::Verify => json.clone()}))
.collect::<Payloads>();
if payloads.len() == 0 {
Err(Error::UptaneTargets("no targets found".into()))
} else {
Ok(payloads)
}
}
/// Define an installer for an `OstreePackage` as part of a transaction.
pub struct PrimaryInstaller {
pkg: OstreePackage,
sig_type: SignatureType,
priv_key: PrivateKey,
credentials: Credentials,
}
impl PrimaryInstaller {
fn signed(&self, outcome: InstallOutcome) -> Result<Option<TufSigned>, Error> {
let custom = EcuCustom { operation_result: outcome.into_result(self.pkg.refName.clone()) };
let version = OstreePackage::get_latest(&self.pkg.ecu_serial)?.into_version(Some(custom));
Ok(Some(self.priv_key.sign_data(json::to_value(version)?, self.sig_type)?))
}
}
impl Step for PrimaryInstaller {
fn step(&mut self, state: State, _: &[u8]) -> Result<Option<TufSigned>, Error> {
match state {
State::Idle | State::Ready | State::Verify | State::Prepare => Ok(None),
State::Commit => self.signed(self.pkg.install(&self.credentials)?),
State::Abort => self.signed(InstallOutcome::new(InstallCode::INTERNAL_ERROR, "".into(), "aborted".into()))
}
}
}
/// Store the keys and role data used for verifying uptane metadata.
#[derive(Default)]
pub struct Verifier {
keys: HashMap<String, Key>,
roles: HashMap<RoleName, RoleMeta>,
}
impl Verifier {
pub fn add_meta(&mut self, role: RoleName, meta: RoleMeta) -> Result<(), Error> {
trace!("adding role to verifier: {}", role);
if self.roles.get(&role).is_some() {
Err(Error::UptaneRole(format!("{} already exists", role)))
} else if meta.threshold < 1 {
Err(Error::UptaneThreshold(format!("{} threshold too low", role)))
} else {
self.roles.insert(role, meta);
Ok(())
}
}
pub fn add_key(&mut self, id: String, key: Key) -> Result<(), Error> {
trace!("adding key_id to verifier: {}", id);
if id != key.key_id()? {
Err(Error::TufKeyId(format!("wrong key_id: {}", id)))
} else if self.keys.get(&id).is_some() {
Err(Error::TufKeyId(format!("key_id already exists: {}", id)))
} else {
self.keys.insert(id, key);
Ok(())
}
}
/// Verify that the signed data is valid.
pub fn verify_signed(&mut self, role: RoleName, signed: TufSigned) -> Result<Verified, Error> {
let current = {
let meta = self.roles.get(&role).ok_or_else(|| Error::UptaneRole(format!("{} not found", role)))?;
self.verify_signatures(&meta, &signed)?;
meta.version
};
let data = json::from_value::<RoleData>(signed.signed)?;
if data._type != role {
Err(Error::UptaneRole(format!("expected `{}`, got `{}`", role, data._type)))
} else if data.expired() {
Err(Error::UptaneExpired)
} else if data.version < current {
Err(Error::UptaneVersion)
} else if data.version > current {
let meta = self.roles.get_mut(&role).expect("get_mut role");
let old = mem::replace(&mut meta.version, data.version);
debug!("{} version updated from {} to {}", role, old, data.version);
Ok(Verified { role: role, data: data, json: None, new_ver: meta.version, old_ver: old })
} else {
Ok(Verified { role: role, data: data, json: None, new_ver: current, old_ver: current })
}
}
/// Verify that a role-defined threshold of signatures successfully validate.
pub fn verify_signatures(&self, meta: &RoleMeta, signed: &TufSigned) -> Result<(), Error> {
let cjson = canonicalize_json(&json::to_vec(&signed.signed)?)?;
let valid = signed.signatures
.iter()
.filter(|sig| meta.keyids.contains(&sig.keyid))
.filter(|sig| self.verify_data(&cjson, sig))
.map(|sig| &sig.sig)
.collect::<HashSet<_>>();
if (valid.len() as u64) < meta.threshold {
Err(Error::UptaneThreshold(format!("{} of {} ok", valid.len(), meta.threshold)))
} else {
Ok(())
}
}
/// Verify that the signature matches the data.
pub fn verify_data(&self, data: &[u8], sig: &Signature) -> bool {
let verify = || -> Result<bool, Error> {
let key = self.keys.get(&sig.keyid).ok_or_else(|| Error::KeyNotFound(sig.keyid.clone()))?;
match key.keytype {
KeyType::Ed25519 => {
let sig = Vec::from_hex(&sig.sig)?;
let key = Vec::from_hex(&key.keyval.public)?;
Ok(SignatureType::Ed25519.verify_msg(data, &key, &sig))
}
KeyType::Rsa => {
let sig = base64::decode(&sig.sig)?;
let pem = pem::parse(&key.keyval.public)?;
Ok(SignatureType::RsaSsaPss.verify_msg(data, &pem.contents, &sig))
}
}
};
match verify() {
Ok(true) => { trace!("successful verification: {}", sig.keyid); true }
Ok(false) => { trace!("failed verification: {}", sig.keyid); false }
Err(err) => { trace!("failed verification for {}: {}", sig.keyid, err); false }
}
}
}
/// Encapsulate successfully verified data with additional metadata.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct Verified {
pub role: RoleName,
pub data: RoleData,
pub json: Option<Vec<u8>>,
pub new_ver: u64,
pub old_ver: u64,
}
impl Verified {
pub fn is_new(&self) -> bool {
self.new_ver > self.old_ver
}
}
#[cfg(test)]
mod tests {
use super::*;
use pem;
use std::collections::HashMap;
use std::net::Ipv4Addr;
use datatype::{EcuManifests, EcuVersion, TufCustom, TufMeta, TufSigned};
use http::TestClient;
fn new_uptane() -> Uptane {
let mut uptane = Uptane {
director_server: "http://localhost:8001".parse().unwrap(),
repo_server: "http://localhost:8002".parse().unwrap(),
metadata_path: "tests/uptane_basic".into(),
persist_metadata: false,
primary_ecu: "test-primary-serial".into(),
private_key: PrivateKey {
keyid: "e453c713367595e1a9e5c1de8b2c039fe4178094bdaf2d52b1993fdd1a76ee26".into(),
der_key: pem::parse("-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDdC9QttkMbF5qB\n2plVU2hhG2sieXS2CVc3E8rm/oYGc9EHnlPMcAuaBtn9jaBo37PVYO+VFInzMu9f\nVMLm7d/hQxv4PjTBpkXvw1Ad0Tqhg/R8Lc4SXPxWxlVhg0ahLn3kDFQeEkrTNW7k\nxpAxWiE8V09ETcPwyNhPfcWeiBePwh8ySJ10IzqHt2kXwVbmL4F/mMX07KBYWIcA\n52TQLs2VhZLIaUBv9ZBxymAvogGz28clx7tHOJ8LZ/daiMzmtv5UbXPdt+q55rLJ\nZ1TuG0CuRqhTOllXnIvAYRQr6WBaLkGGbezQO86MDHBsV5TsG6JHPorrr6ogo+Lf\npuH6dcnHAgMBAAECggEBAMC/fs45fzyRkXYn4srHh14d5YbTN9VAQd/SD3zrdn0L\n4rrs8Y90KHmv/cgeBkFMx+iJtYBev4fk41xScf2icTVhKnOF8sTls1hGDIdjmeeb\nQ8ZAvs++a39TRMJaEW2dN8NyiKsMMlkH3+H3z2ZpfE+8pm8eDHza9dwjBP6fF0SP\nV1XPd2OSrJlvrgBrAU/8WWXYSYK+5F28QtJKsTuiwQylIHyJkd8cgZhgYXlUVvTj\nnHFJblpAT0qphji7p8G4Ejg+LNxu/ZD+D3wQ6iIPgKFVdC4uXmPwlf1LeYqXW0+g\ngTmHY7a/y66yn1H4A5gyfx2EffFMQu0Sl1RqzDVYYjECgYEA9Hy2QsP3pxW27yLs\nCu5e8pp3vZpdkNA71+7v2BVvaoaATnsSBOzo3elgRYsN0On4ObtfQXB3eC9poNuK\nzWxj8bkPbVOCpSpq//sUSqkh/XCmAhDl78BkgmWDb4EFEgcAT2xPBTHkb70jVAXB\nE1HBwsBcXhdxzRt8IYiBG+68d/8CgYEA53SJYpJ809lfpAG0CU986FFD7Fi/SvcX\n21TVMn1LpHuH7MZ2QuehS0SWevvspkIUm5uT3PrhTxdohAInNEzsdeHhTU11utIO\nrKnrtgZXKsBG4idsHu5ZQzp4n3CBEpfPFbOtP/UEKI/IGaJWGXVgG4J6LWmQ9LK9\nilNTaOUQ7jkCgYB+YP0B9DTPLN1cLgwf9mokNA7TdrkJA2r7yuo2I5ZtVUt7xghh\nfWk+VMXMDP4+UMNcbGvn8s/+01thqDrOx0m+iO/djn6JDC01Vz98/IKydImLpdqG\nHUiXUwwnFmVdlTrm01DhmZHA5N8fLr5IU0m6dx8IEExmPt/ioaJDoxvPVwKBgC+8\n1H01M3PKWLSN+WEWOO/9muHLaCEBF7WQKKzSNODG7cEDKe8gsR7CFbtl7GhaJr/1\ndajVQdU7Qb5AZ2+dEgQ6Q2rbOBYBLy+jmE8hvaa+o6APe3hhtp1sGObhoG2CTB7w\nwSH42hO3nBDVb6auk9T4s1Rcep5No1Q9XW28GSLZAoGATFlXg1hqNKLO8xXq1Uzi\nkDrN6Ep/wq80hLltYPu3AXQn714DVwNa3qLP04dAYXbs9IaQotAYVVGf6N1IepLM\nfQU6Q9fp9FtQJdU+Mjj2WMJVWbL0ihcU8VZV5TviNvtvR1rkToxSLia7eh39AY5G\nvkgeMZm7SwqZ9c/ZFnjJDqc=\n-----END PRIVATE KEY-----").unwrap().contents
},
sig_type: SignatureType::RsaSsaPss,
director_verifier: Verifier::default(),
repo_verifier: Verifier::default(),
atomic_wake_addr: SocketAddrV4::new(Ipv4Addr::new(232,0,0,101), 23211),
atomic_msg_addr: SocketAddrV4::new(Ipv4Addr::new(232,0,0,102), 23212),
atomic_timeout: Duration::from_secs(60),
};
uptane.add_root_keys(Service::Director).expect("add director root keys");
uptane
}
fn extract_custom(targets: HashMap<String, TufMeta>) -> HashMap<String, TufCustom> {
let mut out = HashMap::new();
for (file, meta) in targets {
let _ = meta.custom.map(|c| out.insert(file, c));
}
out
}
#[test]
fn test_read_manifest() {
let bytes = Util::read_file("tests/uptane_basic/director/manifest.json").expect("couldn't read manifest.json");
let signed = json::from_slice::<TufSigned>(&bytes).expect("couldn't load manifest");
let mut ecus = json::from_value::<EcuManifests>(signed.signed).expect("couldn't load signed manifest");
assert_eq!(ecus.primary_ecu_serial, "<primary_ecu_serial>");
assert_eq!(ecus.ecu_version_manifest.len(), 1);
let ver0 = ecus.ecu_version_manifest.pop().unwrap();
let ecu0 = json::from_value::<EcuVersion>(ver0.signed).expect("couldn't load first manifest");
assert_eq!(ecu0.installed_image.filepath, "<ostree_branch>-<ostree_commit>");
}
#[test]
fn test_get_targets() {
let mut uptane = new_uptane();
let client = TestClient::from_paths(&["tests/uptane_basic/director/targets.json"]);
let verified = uptane.get_director(&client, RoleName::Targets).expect("get targets");
assert!(verified.is_new());
let targets = verified.data.targets.expect("missing targets");
targets.get("/file.img").map(|meta| {
assert_eq!(meta.length, 1337);
let hash = meta.hashes.get("sha256").expect("sha256 hash");
assert_eq!(hash, "dd250ea90b872a4a9f439027ac49d853c753426f71f61ae44c2f360a16179fb9");
}).expect("get /file.img");
let custom = extract_custom(targets);
let image = custom.get("/file.img").expect("get /file.img custom");
assert_eq!(image.ecuIdentifier, "some-ecu-id");
}
#[test]
fn test_get_snapshot() {
let mut uptane = new_uptane();
let client = TestClient::from_paths(&["tests/uptane_basic/director/snapshot.json"]);
let verified = uptane.get_director(&client, RoleName::Snapshot).expect("couldn't get snapshot");
let metadata = verified.data.meta.as_ref().expect("missing meta");
assert!(verified.is_new());
let meta = metadata.get("targets.json").expect("no targets.json metadata");
assert_eq!(meta.length, 741);
let hash = meta.hashes.get("sha256").expect("couldn't get sha256 hash");
assert_eq!(hash, "b10b36997574e6898dda4cfeb61c5f286d84dfa4be807950f14996cd476e6305");
}
#[test]
fn test_get_timestamp() {
let mut uptane = new_uptane();
let client = TestClient::from_paths(&["tests/uptane_basic/director/timestamp.json"]);
let verified = uptane.get_director(&client, RoleName::Timestamp).expect("couldn't get timestamp");
let metadata = verified.data.meta.as_ref().expect("missing meta");
assert!(verified.is_new());
let meta = metadata.get("snapshot.json").expect("no snapshot.json metadata");
assert_eq!(meta.length, 784);
}
}
Fix Secondary ecuIdentifier in Payloads
use base64;
use crypto::digest::Digest;
use crypto::sha2::Sha256;
use hex::FromHex;
use pem;
use serde_json as json;
use std::{mem, thread};
use std::collections::{HashMap, HashSet};
use std::fmt::{self, Display, Formatter};
use std::net::SocketAddrV4;
use std::time::Duration;
use atomic::{Bus, Multicast, Payloads, Primary, Secondary, State, Step};
use datatype::{Config, EcuCustom, EcuManifests, Error, InstallCode, InstallOutcome,
Key, KeyType, OstreePackage, PrivateKey, RoleData, RoleMeta, RoleName,
Signature, SignatureType, TufMeta, TufSigned, Url, Util, canonicalize_json};
use http::{Client, Response};
use pacman::Credentials;
/// Uptane service to communicate with.
#[derive(Clone, Copy)]
pub enum Service {
Director,
Repo,
}
impl Display for Service {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
Service::Director => write!(f, "director"),
Service::Repo => write!(f, "repo"),
}
}
}
/// Software-over-the-air updates using Uptane verification.
pub struct Uptane {
pub director_server: Url,
pub repo_server: Url,
pub metadata_path: String,
pub persist_metadata: bool,
pub primary_ecu: String,
pub private_key: PrivateKey,
pub sig_type: SignatureType,
pub director_verifier: Verifier,
pub repo_verifier: Verifier,
pub atomic_wake_addr: SocketAddrV4,
pub atomic_msg_addr: SocketAddrV4,
pub atomic_timeout: Duration,
}
impl Uptane {
pub fn new(config: &Config) -> Result<Self, Error> {
let der_key = Util::read_file(&config.uptane.private_key_path)?;
let pub_key = Util::read_file(&config.uptane.public_key_path)?;
let mut hasher = Sha256::new();
hasher.input(&pub_key);
let mut uptane = Uptane {
director_server: config.uptane.director_server.clone(),
repo_server: config.uptane.repo_server.clone(),
metadata_path: config.uptane.metadata_path.clone(),
persist_metadata: true,
primary_ecu: config.uptane.primary_ecu_serial.clone(),
private_key: PrivateKey { keyid: hasher.result_str(), der_key: der_key },
sig_type: SignatureType::RsaSsaPss,
director_verifier: Verifier::default(),
repo_verifier: Verifier::default(),
atomic_wake_addr: *config.uptane.atomic_wake_up,
atomic_msg_addr: *config.uptane.atomic_message,
atomic_timeout: Duration::from_secs(config.uptane.atomic_timeout_sec),
};
uptane.add_root_keys(Service::Director)?;
uptane.add_root_keys(Service::Repo)?;
Ok(uptane)
}
/// Returns a URL based on the uptane service.
fn endpoint(&self, service: Service, endpoint: &str) -> Url {
match service {
Service::Director => self.director_server.join(&format!("/{}", endpoint)),
Service::Repo => self.repo_server.join(&format!("/{}", endpoint))
}
}
/// Returns the respective key verifier for an uptane service.
fn verifier(&mut self, service: Service) -> &mut Verifier {
match service {
Service::Director => &mut self.director_verifier,
Service::Repo => &mut self.repo_verifier
}
}
/// Add the keys from a service's local `root.json` metadata to its verifier.
fn add_root_keys(&mut self, service: Service) -> Result<(), Error> {
trace!("adding root keys for {}", service);
let json = Util::read_file(&format!("{}/{}/root.json", self.metadata_path, service))?;
let signed = json::from_slice::<TufSigned>(&json)?;
let data = json::from_value::<RoleData>(signed.signed)?;
for (role, meta) in data.roles.ok_or(Error::UptaneMissingRoles)? {
self.verifier(service).add_meta(role, meta)?;
}
for (id, key) in data.keys.ok_or(Error::UptaneMissingKeys)? {
self.verifier(service).add_key(id, key)?;
}
Ok(())
}
/// GET the bytes response from the given endpoint.
fn get(&mut self, client: &Client, service: Service, endpoint: &str) -> Result<Vec<u8>, Error> {
let rx = client.get(self.endpoint(service, endpoint), None);
match rx.recv().expect("couldn't GET from uptane") {
Response::Success(data) => Ok(data.body),
Response::Failed(data) => Err(data.into()),
Response::Error(err) => Err(err)
}
}
/// PUT bytes to endpoint.
fn put(&mut self, client: &Client, service: Service, endpoint: &str, bytes: Vec<u8>) -> Result<(), Error> {
let rx = client.put(self.endpoint(service, endpoint), Some(bytes));
match rx.recv().expect("couldn't PUT bytes to uptane") {
Response::Success(_) => Ok(()),
Response::Failed(data) => Err(data.into()),
Response::Error(err) => Err(err)
}
}
/// Fetch the latest role metadata from the Director service.
pub fn get_director(&mut self, client: &Client, role: RoleName) -> Result<Verified, Error> {
self.get_metadata(client, Service::Director, role)
}
/// Fetch the latest role metadata from the Repo service.
pub fn get_repo(&mut self, client: &Client, role: RoleName) -> Result<Verified, Error> {
self.get_metadata(client, Service::Repo, role)
}
/// Fetch the latest role metadata from the given service.
pub fn get_metadata(&mut self, client: &Client, service: Service, role: RoleName) -> Result<Verified, Error> {
trace!("getting {} role from {} service", role, service);
let json = self.get(client, service, &format!("{}.json", role))?;
let signed = json::from_slice::<TufSigned>(&json)?;
let mut verified = self.verifier(service).verify_signed(role, signed)?;
if verified.is_new() && self.persist_metadata {
let dir = format!("{}/{}", self.metadata_path, service);
Util::write_file(&format!("{}/{}.json", dir, role), &json)?;
Util::write_file(&format!("{}/{}.{}.json", dir, verified.new_ver, role), &json)?;
verified.json = Some(json);
}
Ok(verified)
}
/// Send a signed manifest with a list of signed objects to the Director server.
pub fn put_manifest(&mut self, client: &Client, signed: Vec<TufSigned>) -> Result<(), Error> {
let ecus = EcuManifests { primary_ecu_serial: self.primary_ecu.clone(), ecu_version_manifest: signed };
let manifest = self.private_key.sign_data(json::to_value(ecus)?, self.sig_type)?;
Ok(self.put(client, Service::Director, "manifest", json::to_vec(&manifest)?)?)
}
/// Start a transaction to install the verified targets to their respective ECUs.
pub fn install(&mut self, verified: Verified, treehub: Url, creds: Credentials) -> Result<(Vec<TufSigned>, bool), Error> {
let bus = || -> Result<Box<Bus>, Error> {
Ok(Box::new(Multicast::new(self.atomic_wake_addr, self.atomic_msg_addr)?))
};
if let Some(ref targets) = verified.data.targets {
if let Some(pkg) = primary_target(targets, &self.primary_ecu, &treehub)? {
let inst = Box::new(self.new_installer(pkg, creds));
let mut ecu = Secondary::new(self.primary_ecu.clone(), bus()?, inst, self.atomic_timeout, None);
thread::spawn(move || ecu.listen());
}
}
let mut primary = Primary::new(into_payloads(verified)?, bus()?, None, self.atomic_timeout, None);
match primary.commit() {
Ok(()) => Ok((primary.into_signed(), true)),
Err(Error::AtomicAbort(_)) |
Err(Error::AtomicTimeout) => Ok((primary.into_signed(), false)),
Err(err) => Err(err)
}
}
fn new_installer(&self, pkg: OstreePackage, creds: Credentials) -> PrimaryInstaller {
PrimaryInstaller {
pkg: pkg,
sig_type: self.sig_type,
priv_key: self.private_key.clone(),
credentials: creds
}
}
}
fn primary_target(targets: &HashMap<String, TufMeta>, primary_ecu: &str, treehub: &Url) -> Result<Option<OstreePackage>, Error> {
let primaries = targets.iter()
.map(|(refname, meta)| match meta.custom {
Some(ref custom) if custom.ecuIdentifier == primary_ecu => {
Some(OstreePackage::from_meta(meta.clone(), refname.clone(), "sha256", treehub))
}
_ => None
})
.filter_map(|pkg| if pkg.is_some() { pkg } else { None })
.collect::<Vec<Result<_, _>>>();
match primaries.len() {
0 => Ok(None),
1 => Ok(Some(primaries.into_iter().nth(0).expect("primary package")?)),
_ => Err(Error::UptaneTargets("multiple primary targets".into()))
}
}
fn into_payloads(verified: Verified) -> Result<Payloads, Error> {
let json = verified.json.unwrap_or(Vec::new());
let targets = verified.data.targets.ok_or_else(|| Error::UptaneTargets("missing".into()))?;
let payloads = targets.into_iter()
.map(|(_, meta)| {
if let Some(custom) = meta.custom {
Ok((custom.ecuIdentifier, hashmap!{State::Verify => json.clone()}))
} else {
Err(Error::UptaneTargets("missing custom field with ecuIdentifier".into()))
}
})
.collect::<Result<Payloads, Error>>()?;
if payloads.len() == 0 {
Err(Error::UptaneTargets("no targets found".into()))
} else {
Ok(payloads)
}
}
/// Define an installer for an `OstreePackage` as part of a transaction.
pub struct PrimaryInstaller {
pkg: OstreePackage,
sig_type: SignatureType,
priv_key: PrivateKey,
credentials: Credentials,
}
impl PrimaryInstaller {
fn signed(&self, outcome: InstallOutcome) -> Result<Option<TufSigned>, Error> {
let custom = EcuCustom { operation_result: outcome.into_result(self.pkg.refName.clone()) };
let version = OstreePackage::get_latest(&self.pkg.ecu_serial)?.into_version(Some(custom));
Ok(Some(self.priv_key.sign_data(json::to_value(version)?, self.sig_type)?))
}
}
impl Step for PrimaryInstaller {
fn step(&mut self, state: State, _: &[u8]) -> Result<Option<TufSigned>, Error> {
match state {
State::Idle | State::Ready | State::Verify | State::Prepare => Ok(None),
State::Commit => self.signed(self.pkg.install(&self.credentials)?),
State::Abort => self.signed(InstallOutcome::new(InstallCode::INTERNAL_ERROR, "".into(), "aborted".into()))
}
}
}
/// Store the keys and role data used for verifying uptane metadata.
#[derive(Default)]
pub struct Verifier {
keys: HashMap<String, Key>,
roles: HashMap<RoleName, RoleMeta>,
}
impl Verifier {
pub fn add_meta(&mut self, role: RoleName, meta: RoleMeta) -> Result<(), Error> {
trace!("adding role to verifier: {}", role);
if self.roles.get(&role).is_some() {
Err(Error::UptaneRole(format!("{} already exists", role)))
} else if meta.threshold < 1 {
Err(Error::UptaneThreshold(format!("{} threshold too low", role)))
} else {
self.roles.insert(role, meta);
Ok(())
}
}
pub fn add_key(&mut self, id: String, key: Key) -> Result<(), Error> {
trace!("adding key_id to verifier: {}", id);
if id != key.key_id()? {
Err(Error::TufKeyId(format!("wrong key_id: {}", id)))
} else if self.keys.get(&id).is_some() {
Err(Error::TufKeyId(format!("key_id already exists: {}", id)))
} else {
self.keys.insert(id, key);
Ok(())
}
}
/// Verify that the signed data is valid.
pub fn verify_signed(&mut self, role: RoleName, signed: TufSigned) -> Result<Verified, Error> {
let current = {
let meta = self.roles.get(&role).ok_or_else(|| Error::UptaneRole(format!("{} not found", role)))?;
self.verify_signatures(&meta, &signed)?;
meta.version
};
let data = json::from_value::<RoleData>(signed.signed)?;
if data._type != role {
Err(Error::UptaneRole(format!("expected `{}`, got `{}`", role, data._type)))
} else if data.expired() {
Err(Error::UptaneExpired)
} else if data.version < current {
Err(Error::UptaneVersion)
} else if data.version > current {
let meta = self.roles.get_mut(&role).expect("get_mut role");
let old = mem::replace(&mut meta.version, data.version);
debug!("{} version updated from {} to {}", role, old, data.version);
Ok(Verified { role: role, data: data, json: None, new_ver: meta.version, old_ver: old })
} else {
Ok(Verified { role: role, data: data, json: None, new_ver: current, old_ver: current })
}
}
/// Verify that a role-defined threshold of signatures successfully validate.
pub fn verify_signatures(&self, meta: &RoleMeta, signed: &TufSigned) -> Result<(), Error> {
let cjson = canonicalize_json(&json::to_vec(&signed.signed)?)?;
let valid = signed.signatures
.iter()
.filter(|sig| meta.keyids.contains(&sig.keyid))
.filter(|sig| self.verify_data(&cjson, sig))
.map(|sig| &sig.sig)
.collect::<HashSet<_>>();
if (valid.len() as u64) < meta.threshold {
Err(Error::UptaneThreshold(format!("{} of {} ok", valid.len(), meta.threshold)))
} else {
Ok(())
}
}
/// Verify that the signature matches the data.
pub fn verify_data(&self, data: &[u8], sig: &Signature) -> bool {
let verify = || -> Result<bool, Error> {
let key = self.keys.get(&sig.keyid).ok_or_else(|| Error::KeyNotFound(sig.keyid.clone()))?;
match key.keytype {
KeyType::Ed25519 => {
let sig = Vec::from_hex(&sig.sig)?;
let key = Vec::from_hex(&key.keyval.public)?;
Ok(SignatureType::Ed25519.verify_msg(data, &key, &sig))
}
KeyType::Rsa => {
let sig = base64::decode(&sig.sig)?;
let pem = pem::parse(&key.keyval.public)?;
Ok(SignatureType::RsaSsaPss.verify_msg(data, &pem.contents, &sig))
}
}
};
match verify() {
Ok(true) => { trace!("successful verification: {}", sig.keyid); true }
Ok(false) => { trace!("failed verification: {}", sig.keyid); false }
Err(err) => { trace!("failed verification for {}: {}", sig.keyid, err); false }
}
}
}
/// Encapsulate successfully verified data with additional metadata.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct Verified {
pub role: RoleName,
pub data: RoleData,
pub json: Option<Vec<u8>>,
pub new_ver: u64,
pub old_ver: u64,
}
impl Verified {
pub fn is_new(&self) -> bool {
self.new_ver > self.old_ver
}
}
#[cfg(test)]
mod tests {
use super::*;
use pem;
use std::collections::HashMap;
use std::net::Ipv4Addr;
use datatype::{EcuManifests, EcuVersion, TufCustom, TufMeta, TufSigned};
use http::TestClient;
fn new_uptane() -> Uptane {
let mut uptane = Uptane {
director_server: "http://localhost:8001".parse().unwrap(),
repo_server: "http://localhost:8002".parse().unwrap(),
metadata_path: "tests/uptane_basic".into(),
persist_metadata: false,
primary_ecu: "test-primary-serial".into(),
private_key: PrivateKey {
keyid: "e453c713367595e1a9e5c1de8b2c039fe4178094bdaf2d52b1993fdd1a76ee26".into(),
der_key: pem::parse("-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDdC9QttkMbF5qB\n2plVU2hhG2sieXS2CVc3E8rm/oYGc9EHnlPMcAuaBtn9jaBo37PVYO+VFInzMu9f\nVMLm7d/hQxv4PjTBpkXvw1Ad0Tqhg/R8Lc4SXPxWxlVhg0ahLn3kDFQeEkrTNW7k\nxpAxWiE8V09ETcPwyNhPfcWeiBePwh8ySJ10IzqHt2kXwVbmL4F/mMX07KBYWIcA\n52TQLs2VhZLIaUBv9ZBxymAvogGz28clx7tHOJ8LZ/daiMzmtv5UbXPdt+q55rLJ\nZ1TuG0CuRqhTOllXnIvAYRQr6WBaLkGGbezQO86MDHBsV5TsG6JHPorrr6ogo+Lf\npuH6dcnHAgMBAAECggEBAMC/fs45fzyRkXYn4srHh14d5YbTN9VAQd/SD3zrdn0L\n4rrs8Y90KHmv/cgeBkFMx+iJtYBev4fk41xScf2icTVhKnOF8sTls1hGDIdjmeeb\nQ8ZAvs++a39TRMJaEW2dN8NyiKsMMlkH3+H3z2ZpfE+8pm8eDHza9dwjBP6fF0SP\nV1XPd2OSrJlvrgBrAU/8WWXYSYK+5F28QtJKsTuiwQylIHyJkd8cgZhgYXlUVvTj\nnHFJblpAT0qphji7p8G4Ejg+LNxu/ZD+D3wQ6iIPgKFVdC4uXmPwlf1LeYqXW0+g\ngTmHY7a/y66yn1H4A5gyfx2EffFMQu0Sl1RqzDVYYjECgYEA9Hy2QsP3pxW27yLs\nCu5e8pp3vZpdkNA71+7v2BVvaoaATnsSBOzo3elgRYsN0On4ObtfQXB3eC9poNuK\nzWxj8bkPbVOCpSpq//sUSqkh/XCmAhDl78BkgmWDb4EFEgcAT2xPBTHkb70jVAXB\nE1HBwsBcXhdxzRt8IYiBG+68d/8CgYEA53SJYpJ809lfpAG0CU986FFD7Fi/SvcX\n21TVMn1LpHuH7MZ2QuehS0SWevvspkIUm5uT3PrhTxdohAInNEzsdeHhTU11utIO\nrKnrtgZXKsBG4idsHu5ZQzp4n3CBEpfPFbOtP/UEKI/IGaJWGXVgG4J6LWmQ9LK9\nilNTaOUQ7jkCgYB+YP0B9DTPLN1cLgwf9mokNA7TdrkJA2r7yuo2I5ZtVUt7xghh\nfWk+VMXMDP4+UMNcbGvn8s/+01thqDrOx0m+iO/djn6JDC01Vz98/IKydImLpdqG\nHUiXUwwnFmVdlTrm01DhmZHA5N8fLr5IU0m6dx8IEExmPt/ioaJDoxvPVwKBgC+8\n1H01M3PKWLSN+WEWOO/9muHLaCEBF7WQKKzSNODG7cEDKe8gsR7CFbtl7GhaJr/1\ndajVQdU7Qb5AZ2+dEgQ6Q2rbOBYBLy+jmE8hvaa+o6APe3hhtp1sGObhoG2CTB7w\nwSH42hO3nBDVb6auk9T4s1Rcep5No1Q9XW28GSLZAoGATFlXg1hqNKLO8xXq1Uzi\nkDrN6Ep/wq80hLltYPu3AXQn714DVwNa3qLP04dAYXbs9IaQotAYVVGf6N1IepLM\nfQU6Q9fp9FtQJdU+Mjj2WMJVWbL0ihcU8VZV5TviNvtvR1rkToxSLia7eh39AY5G\nvkgeMZm7SwqZ9c/ZFnjJDqc=\n-----END PRIVATE KEY-----").unwrap().contents
},
sig_type: SignatureType::RsaSsaPss,
director_verifier: Verifier::default(),
repo_verifier: Verifier::default(),
atomic_wake_addr: SocketAddrV4::new(Ipv4Addr::new(232,0,0,101), 23211),
atomic_msg_addr: SocketAddrV4::new(Ipv4Addr::new(232,0,0,102), 23212),
atomic_timeout: Duration::from_secs(60),
};
uptane.add_root_keys(Service::Director).expect("add director root keys");
uptane
}
fn extract_custom(targets: HashMap<String, TufMeta>) -> HashMap<String, TufCustom> {
let mut out = HashMap::new();
for (file, meta) in targets {
let _ = meta.custom.map(|c| out.insert(file, c));
}
out
}
#[test]
fn test_read_manifest() {
let bytes = Util::read_file("tests/uptane_basic/director/manifest.json").expect("couldn't read manifest.json");
let signed = json::from_slice::<TufSigned>(&bytes).expect("couldn't load manifest");
let mut ecus = json::from_value::<EcuManifests>(signed.signed).expect("couldn't load signed manifest");
assert_eq!(ecus.primary_ecu_serial, "<primary_ecu_serial>");
assert_eq!(ecus.ecu_version_manifest.len(), 1);
let ver0 = ecus.ecu_version_manifest.pop().unwrap();
let ecu0 = json::from_value::<EcuVersion>(ver0.signed).expect("couldn't load first manifest");
assert_eq!(ecu0.installed_image.filepath, "<ostree_branch>-<ostree_commit>");
}
#[test]
fn test_get_targets() {
let mut uptane = new_uptane();
let client = TestClient::from_paths(&["tests/uptane_basic/director/targets.json"]);
let verified = uptane.get_director(&client, RoleName::Targets).expect("get targets");
assert!(verified.is_new());
let targets = verified.data.targets.expect("missing targets");
targets.get("/file.img").map(|meta| {
assert_eq!(meta.length, 1337);
let hash = meta.hashes.get("sha256").expect("sha256 hash");
assert_eq!(hash, "dd250ea90b872a4a9f439027ac49d853c753426f71f61ae44c2f360a16179fb9");
}).expect("get /file.img");
let custom = extract_custom(targets);
let image = custom.get("/file.img").expect("get /file.img custom");
assert_eq!(image.ecuIdentifier, "some-ecu-id");
}
#[test]
fn test_get_snapshot() {
let mut uptane = new_uptane();
let client = TestClient::from_paths(&["tests/uptane_basic/director/snapshot.json"]);
let verified = uptane.get_director(&client, RoleName::Snapshot).expect("couldn't get snapshot");
let metadata = verified.data.meta.as_ref().expect("missing meta");
assert!(verified.is_new());
let meta = metadata.get("targets.json").expect("no targets.json metadata");
assert_eq!(meta.length, 741);
let hash = meta.hashes.get("sha256").expect("couldn't get sha256 hash");
assert_eq!(hash, "b10b36997574e6898dda4cfeb61c5f286d84dfa4be807950f14996cd476e6305");
}
#[test]
fn test_get_timestamp() {
let mut uptane = new_uptane();
let client = TestClient::from_paths(&["tests/uptane_basic/director/timestamp.json"]);
let verified = uptane.get_director(&client, RoleName::Timestamp).expect("couldn't get timestamp");
let metadata = verified.data.meta.as_ref().expect("missing meta");
assert!(verified.is_new());
let meta = metadata.get("snapshot.json").expect("no snapshot.json metadata");
assert_eq!(meta.length, 784);
}
}
|
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::UnknownUnit;
use crate::approxeq::ApproxEq;
use crate::approxord::{max, min};
use crate::length::Length;
use crate::num::*;
use crate::point::{point2, point3, Point2D, Point3D};
use crate::scale::Scale;
use crate::size::{size2, size3, Size2D, Size3D};
use crate::transform2d::Transform2D;
use crate::transform3d::Transform3D;
use crate::trig::Trig;
use crate::Angle;
use core::cmp::{Eq, PartialEq};
use core::fmt;
use core::hash::Hash;
use core::iter::Sum;
use core::marker::PhantomData;
use core::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
#[cfg(feature = "mint")]
use mint;
use num_traits::{Float, NumCast, Signed};
#[cfg(feature = "serde")]
use serde;
/// A 2d Vector tagged with a unit.
#[repr(C)]
pub struct Vector2D<T, U> {
/// The `x` (traditionally, horizontal) coordinate.
pub x: T,
/// The `y` (traditionally, vertical) coordinate.
pub y: T,
#[doc(hidden)]
pub _unit: PhantomData<U>,
}
mint_vec!(Vector2D[x, y] = Vector2);
impl<T: Copy, U> Copy for Vector2D<T, U> {}
impl<T: Clone, U> Clone for Vector2D<T, U> {
fn clone(&self) -> Self {
Vector2D {
x: self.x.clone(),
y: self.y.clone(),
_unit: PhantomData,
}
}
}
#[cfg(feature = "serde")]
impl<'de, T, U> serde::Deserialize<'de> for Vector2D<T, U>
where
T: serde::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let (x, y) = serde::Deserialize::deserialize(deserializer)?;
Ok(Vector2D {
x,
y,
_unit: PhantomData,
})
}
}
#[cfg(feature = "serde")]
impl<T, U> serde::Serialize for Vector2D<T, U>
where
T: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
(&self.x, &self.y).serialize(serializer)
}
}
impl<T: Eq, U> Eq for Vector2D<T, U> {}
impl<T: PartialEq, U> PartialEq for Vector2D<T, U> {
fn eq(&self, other: &Self) -> bool {
self.x == other.x && self.y == other.y
}
}
impl<T: Hash, U> Hash for Vector2D<T, U> {
fn hash<H: core::hash::Hasher>(&self, h: &mut H) {
self.x.hash(h);
self.y.hash(h);
}
}
impl<T: Zero, U> Zero for Vector2D<T, U> {
/// Constructor, setting all components to zero.
#[inline]
fn zero() -> Self {
Vector2D::new(Zero::zero(), Zero::zero())
}
}
impl<T: fmt::Debug, U> fmt::Debug for Vector2D<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("").field(&self.x).field(&self.y).finish()
}
}
impl<T: Default, U> Default for Vector2D<T, U> {
fn default() -> Self {
Vector2D::new(Default::default(), Default::default())
}
}
impl<T, U> Vector2D<T, U> {
/// Constructor, setting all components to zero.
#[inline]
pub fn zero() -> Self
where
T: Zero,
{
Vector2D::new(Zero::zero(), Zero::zero())
}
/// Constructor, setting all components to one.
#[inline]
pub fn one() -> Self
where
T: One,
{
Vector2D::new(One::one(), One::one())
}
/// Constructor taking scalar values directly.
#[inline]
pub const fn new(x: T, y: T) -> Self {
Vector2D {
x,
y,
_unit: PhantomData,
}
}
/// Constructor setting all components to the same value.
#[inline]
pub fn splat(v: T) -> Self
where
T: Clone,
{
Vector2D {
x: v.clone(),
y: v,
_unit: PhantomData,
}
}
/// Constructor taking angle and length
pub fn from_angle_and_length(angle: Angle<T>, length: T) -> Self
where
T: Trig + Mul<Output = T> + Copy,
{
vec2(length * angle.radians.cos(), length * angle.radians.sin())
}
/// Constructor taking properly Lengths instead of scalar values.
#[inline]
pub fn from_lengths(x: Length<T, U>, y: Length<T, U>) -> Self {
vec2(x.0, y.0)
}
/// Tag a unit-less value with units.
#[inline]
pub fn from_untyped(p: Vector2D<T, UnknownUnit>) -> Self {
vec2(p.x, p.y)
}
/// Computes the vector with absolute values of each component.
///
/// # Example
///
/// ```rust
/// # use std::{i32, f32};
/// # use euclid::vec2;
/// enum U {}
///
/// assert_eq!(vec2::<_, U>(-1, 2).abs(), vec2(1, 2));
///
/// let vec = vec2::<_, U>(f32::NAN, -f32::MAX).abs();
/// assert!(vec.x.is_nan());
/// assert_eq!(vec.y, f32::MAX);
/// ```
///
/// # Panics
///
/// The behavior for each component follows the scalar type's implementation of
/// `num_traits::Signed::abs`.
pub fn abs(self) -> Self
where
T: Signed,
{
vec2(self.x.abs(), self.y.abs())
}
/// Dot product.
#[inline]
pub fn dot(self, other: Self) -> T
where
T: Add<Output = T> + Mul<Output = T>,
{
self.x * other.x + self.y * other.y
}
/// Returns the norm of the cross product [self.x, self.y, 0] x [other.x, other.y, 0].
#[inline]
pub fn cross(self, other: Self) -> T
where
T: Sub<Output = T> + Mul<Output = T>,
{
self.x * other.y - self.y * other.x
}
/// Returns the component-wise multiplication of the two vectors.
#[inline]
pub fn component_mul(self, other: Self) -> Self
where
T: Mul<Output = T>,
{
vec2(self.x * other.x, self.y * other.y)
}
}
impl<T: Copy, U> Vector2D<T, U> {
/// Create a 3d vector from this one, using the specified z value.
#[inline]
pub fn extend(self, z: T) -> Vector3D<T, U> {
vec3(self.x, self.y, z)
}
/// Cast this vector into a point.
///
/// Equivalent to adding this vector to the origin.
#[inline]
pub fn to_point(self) -> Point2D<T, U> {
Point2D {
x: self.x,
y: self.y,
_unit: PhantomData,
}
}
/// Swap x and y.
#[inline]
pub fn yx(self) -> Self {
vec2(self.y, self.x)
}
/// Cast this vector into a size.
#[inline]
pub fn to_size(self) -> Size2D<T, U> {
size2(self.x, self.y)
}
/// Drop the units, preserving only the numeric value.
#[inline]
pub fn to_untyped(self) -> Vector2D<T, UnknownUnit> {
vec2(self.x, self.y)
}
/// Cast the unit.
#[inline]
pub fn cast_unit<V>(self) -> Vector2D<T, V> {
vec2(self.x, self.y)
}
/// Cast into an array with x and y.
#[inline]
pub fn to_array(self) -> [T; 2] {
[self.x, self.y]
}
/// Cast into a tuple with x and y.
#[inline]
pub fn to_tuple(self) -> (T, T) {
(self.x, self.y)
}
/// Convert into a 3d vector with `z` coordinate equals to `T::zero()`.
#[inline]
pub fn to_3d(self) -> Vector3D<T, U>
where
T: Zero,
{
vec3(self.x, self.y, Zero::zero())
}
/// Rounds each component to the nearest integer value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec2;
/// enum Mm {}
///
/// assert_eq!(vec2::<_, Mm>(-0.1, -0.8).round(), vec2::<_, Mm>(0.0, -1.0))
/// ```
#[inline]
#[must_use]
pub fn round(self) -> Self
where
T: Round,
{
vec2(self.x.round(), self.y.round())
}
/// Rounds each component to the smallest integer equal or greater than the original value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec2;
/// enum Mm {}
///
/// assert_eq!(vec2::<_, Mm>(-0.1, -0.8).ceil(), vec2::<_, Mm>(0.0, 0.0))
/// ```
#[inline]
#[must_use]
pub fn ceil(self) -> Self
where
T: Ceil,
{
vec2(self.x.ceil(), self.y.ceil())
}
/// Rounds each component to the biggest integer equal or lower than the original value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec2;
/// enum Mm {}
///
/// assert_eq!(vec2::<_, Mm>(-0.1, -0.8).floor(), vec2::<_, Mm>(-1.0, -1.0))
/// ```
#[inline]
#[must_use]
pub fn floor(self) -> Self
where
T: Floor,
{
vec2(self.x.floor(), self.y.floor())
}
/// Returns the signed angle between this vector and the x axis.
/// Positive values counted counterclockwise, where 0 is `+x` axis, `PI/2`
/// is `+y` axis.
///
/// The returned angle is between -PI and PI.
pub fn angle_from_x_axis(self) -> Angle<T>
where
T: Trig,
{
Angle::radians(Trig::fast_atan2(self.y, self.x))
}
/// Creates translation by this vector in vector units.
#[inline]
pub fn to_transform(self) -> Transform2D<T, U, U>
where
T: Zero + One,
{
Transform2D::translation(self.x, self.y)
}
}
impl<T, U> Vector2D<T, U>
where
T: Copy + Mul<T, Output = T> + Add<T, Output = T>,
{
/// Returns the vector's length squared.
#[inline]
pub fn square_length(self) -> T {
self.x * self.x + self.y * self.y
}
/// Returns this vector projected onto another one.
///
/// Projecting onto a nil vector will cause a division by zero.
#[inline]
pub fn project_onto_vector(self, onto: Self) -> Self
where
T: Sub<T, Output = T> + Div<T, Output = T>,
{
onto * (self.dot(onto) / onto.square_length())
}
/// Returns the signed angle between this vector and another vector.
///
/// The returned angle is between -PI and PI.
pub fn angle_to(self, other: Self) -> Angle<T>
where
T: Sub<Output = T> + Trig,
{
Angle::radians(Trig::fast_atan2(self.cross(other), self.dot(other)))
}
}
impl<T: Float, U> Vector2D<T, U> {
/// Returns the vector length.
#[inline]
pub fn length(self) -> T {
self.square_length().sqrt()
}
/// Returns the vector with length of one unit.
#[inline]
#[must_use]
pub fn normalize(self) -> Self {
self / self.length()
}
/// Returns the vector with length of one unit.
///
/// Unlike [`Vector2D::normalize`](#method.normalize), this returns None in the case that the
/// length of the vector is zero.
#[inline]
#[must_use]
pub fn try_normalize(self) -> Option<Self> {
let len = self.length();
if len == T::zero() {
None
} else {
Some(self / len)
}
}
/// Return the normalized vector even if the length is larger than the max value of Float.
#[inline]
#[must_use]
pub fn robust_normalize(self) -> Self {
let length = self.length();
if length.is_infinite() {
let scaled = self / T::max_value();
scaled / scaled.length()
} else {
self / length
}
}
/// Return this vector capped to a maximum length.
#[inline]
pub fn with_max_length(self, max_length: T) -> Self {
let square_length = self.square_length();
if square_length > max_length * max_length {
return self * (max_length / square_length.sqrt());
}
self
}
/// Return this vector with a minimum length applied.
#[inline]
pub fn with_min_length(self, min_length: T) -> Self {
let square_length = self.square_length();
if square_length < min_length * min_length {
return self * (min_length / square_length.sqrt());
}
self
}
/// Return this vector with minimum and maximum lengths applied.
#[inline]
pub fn clamp_length(self, min: T, max: T) -> Self {
debug_assert!(min <= max);
self.with_min_length(min).with_max_length(max)
}
}
impl<T, U> Vector2D<T, U>
where
T: Copy + One + Add<Output = T> + Sub<Output = T> + Mul<Output = T>,
{
/// Linearly interpolate each component between this vector and another vector.
///
/// # Example
///
/// ```rust
/// use euclid::vec2;
/// use euclid::default::Vector2D;
///
/// let from: Vector2D<_> = vec2(0.0, 10.0);
/// let to: Vector2D<_> = vec2(8.0, -4.0);
///
/// assert_eq!(from.lerp(to, -1.0), vec2(-8.0, 24.0));
/// assert_eq!(from.lerp(to, 0.0), vec2( 0.0, 10.0));
/// assert_eq!(from.lerp(to, 0.5), vec2( 4.0, 3.0));
/// assert_eq!(from.lerp(to, 1.0), vec2( 8.0, -4.0));
/// assert_eq!(from.lerp(to, 2.0), vec2(16.0, -18.0));
/// ```
#[inline]
pub fn lerp(self, other: Self, t: T) -> Self {
let one_t = T::one() - t;
self * one_t + other * t
}
/// Returns a reflection vector using an incident ray and a surface normal.
#[inline]
pub fn reflect(self, normal: Self) -> Self {
let two = T::one() + T::one();
self - normal * two * self.dot(normal)
}
}
impl<T: PartialOrd, U> Vector2D<T, U> {
/// Returns the vector each component of which are minimum of this vector and another.
#[inline]
pub fn min(self, other: Self) -> Self {
vec2(min(self.x, other.x), min(self.y, other.y))
}
/// Returns the vector each component of which are maximum of this vector and another.
#[inline]
pub fn max(self, other: Self) -> Self {
vec2(max(self.x, other.x), max(self.y, other.y))
}
/// Returns the vector each component of which is clamped by corresponding
/// components of `start` and `end`.
///
/// Shortcut for `self.max(start).min(end)`.
#[inline]
pub fn clamp(self, start: Self, end: Self) -> Self
where
T: Copy,
{
self.max(start).min(end)
}
/// Returns vector with results of "greater than" operation on each component.
#[inline]
pub fn greater_than(self, other: Self) -> BoolVector2D {
BoolVector2D {
x: self.x > other.x,
y: self.y > other.y,
}
}
/// Returns vector with results of "lower than" operation on each component.
#[inline]
pub fn lower_than(self, other: Self) -> BoolVector2D {
BoolVector2D {
x: self.x < other.x,
y: self.y < other.y,
}
}
}
impl<T: PartialEq, U> Vector2D<T, U> {
/// Returns vector with results of "equal" operation on each component.
#[inline]
pub fn equal(self, other: Self) -> BoolVector2D {
BoolVector2D {
x: self.x == other.x,
y: self.y == other.y,
}
}
/// Returns vector with results of "not equal" operation on each component.
#[inline]
pub fn not_equal(self, other: Self) -> BoolVector2D {
BoolVector2D {
x: self.x != other.x,
y: self.y != other.y,
}
}
}
impl<T: NumCast + Copy, U> Vector2D<T, U> {
/// Cast from one numeric representation to another, preserving the units.
///
/// When casting from floating vector to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using `round()`, `ceil()` or `floor()` before casting.
#[inline]
pub fn cast<NewT: NumCast>(self) -> Vector2D<NewT, U> {
self.try_cast().unwrap()
}
/// Fallible cast from one numeric representation to another, preserving the units.
///
/// When casting from floating vector to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using `round()`, `ceil()` or `floor()` before casting.
pub fn try_cast<NewT: NumCast>(self) -> Option<Vector2D<NewT, U>> {
match (NumCast::from(self.x), NumCast::from(self.y)) {
(Some(x), Some(y)) => Some(Vector2D::new(x, y)),
_ => None,
}
}
// Convenience functions for common casts.
/// Cast into an `f32` vector.
#[inline]
pub fn to_f32(self) -> Vector2D<f32, U> {
self.cast()
}
/// Cast into an `f64` vector.
#[inline]
pub fn to_f64(self) -> Vector2D<f64, U> {
self.cast()
}
/// Cast into an `usize` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_usize(self) -> Vector2D<usize, U> {
self.cast()
}
/// Cast into an `u32` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_u32(self) -> Vector2D<u32, U> {
self.cast()
}
/// Cast into an i32 vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_i32(self) -> Vector2D<i32, U> {
self.cast()
}
/// Cast into an i64 vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_i64(self) -> Vector2D<i64, U> {
self.cast()
}
}
impl<T: Neg, U> Neg for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn neg(self) -> Self::Output {
vec2(-self.x, -self.y)
}
}
impl<T: Add, U> Add for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn add(self, other: Self) -> Self::Output {
Vector2D::new(self.x + other.x, self.y + other.y)
}
}
impl<T: Add + Copy, U> Add<&Self> for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn add(self, other: &Self) -> Self::Output {
Vector2D::new(self.x + other.x, self.y + other.y)
}
}
impl<T: Add<Output = T> + Zero, U> Sum for Vector2D<T, U> {
fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(Self::zero(), Add::add)
}
}
impl<'a, T: 'a + Add<Output = T> + Copy + Zero, U: 'a> Sum<&'a Self> for Vector2D<T, U> {
fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(Self::zero(), Add::add)
}
}
impl<T: Copy + Add<T, Output = T>, U> AddAssign for Vector2D<T, U> {
#[inline]
fn add_assign(&mut self, other: Self) {
*self = *self + other
}
}
impl<T: Sub, U> Sub for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn sub(self, other: Self) -> Self::Output {
vec2(self.x - other.x, self.y - other.y)
}
}
impl<T: Copy + Sub<T, Output = T>, U> SubAssign<Vector2D<T, U>> for Vector2D<T, U> {
#[inline]
fn sub_assign(&mut self, other: Self) {
*self = *self - other
}
}
impl<T: Copy + Mul, U> Mul<T> for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn mul(self, scale: T) -> Self::Output {
vec2(self.x * scale, self.y * scale)
}
}
impl<T: Copy + Mul<T, Output = T>, U> MulAssign<T> for Vector2D<T, U> {
#[inline]
fn mul_assign(&mut self, scale: T) {
*self = *self * scale
}
}
impl<T: Copy + Mul, U1, U2> Mul<Scale<T, U1, U2>> for Vector2D<T, U1> {
type Output = Vector2D<T::Output, U2>;
#[inline]
fn mul(self, scale: Scale<T, U1, U2>) -> Self::Output {
vec2(self.x * scale.0, self.y * scale.0)
}
}
impl<T: Copy + MulAssign, U> MulAssign<Scale<T, U, U>> for Vector2D<T, U> {
#[inline]
fn mul_assign(&mut self, scale: Scale<T, U, U>) {
self.x *= scale.0;
self.y *= scale.0;
}
}
impl<T: Copy + Div, U> Div<T> for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn div(self, scale: T) -> Self::Output {
vec2(self.x / scale, self.y / scale)
}
}
impl<T: Copy + Div<T, Output = T>, U> DivAssign<T> for Vector2D<T, U> {
#[inline]
fn div_assign(&mut self, scale: T) {
*self = *self / scale
}
}
impl<T: Copy + Div, U1, U2> Div<Scale<T, U1, U2>> for Vector2D<T, U2> {
type Output = Vector2D<T::Output, U1>;
#[inline]
fn div(self, scale: Scale<T, U1, U2>) -> Self::Output {
vec2(self.x / scale.0, self.y / scale.0)
}
}
impl<T: Copy + DivAssign, U> DivAssign<Scale<T, U, U>> for Vector2D<T, U> {
#[inline]
fn div_assign(&mut self, scale: Scale<T, U, U>) {
self.x /= scale.0;
self.y /= scale.0;
}
}
impl<T: Round, U> Round for Vector2D<T, U> {
/// See [`Vector2D::round()`](#method.round)
#[inline]
fn round(self) -> Self {
self.round()
}
}
impl<T: Ceil, U> Ceil for Vector2D<T, U> {
/// See [`Vector2D::ceil()`](#method.ceil)
#[inline]
fn ceil(self) -> Self {
self.ceil()
}
}
impl<T: Floor, U> Floor for Vector2D<T, U> {
/// See [`Vector2D::floor()`](#method.floor)
#[inline]
fn floor(self) -> Self {
self.floor()
}
}
impl<T: ApproxEq<T>, U> ApproxEq<Vector2D<T, U>> for Vector2D<T, U> {
#[inline]
fn approx_epsilon() -> Self {
vec2(T::approx_epsilon(), T::approx_epsilon())
}
#[inline]
fn approx_eq_eps(&self, other: &Self, eps: &Self) -> bool {
self.x.approx_eq_eps(&other.x, &eps.x) && self.y.approx_eq_eps(&other.y, &eps.y)
}
}
impl<T, U> Into<[T; 2]> for Vector2D<T, U> {
fn into(self) -> [T; 2] {
[self.x, self.y]
}
}
impl<T, U> From<[T; 2]> for Vector2D<T, U> {
fn from([x, y]: [T; 2]) -> Self {
vec2(x, y)
}
}
impl<T, U> Into<(T, T)> for Vector2D<T, U> {
fn into(self) -> (T, T) {
(self.x, self.y)
}
}
impl<T, U> From<(T, T)> for Vector2D<T, U> {
fn from(tuple: (T, T)) -> Self {
vec2(tuple.0, tuple.1)
}
}
impl<T, U> From<Size2D<T, U>> for Vector2D<T, U> {
fn from(size: Size2D<T, U>) -> Self {
vec2(size.width, size.height)
}
}
/// A 3d Vector tagged with a unit.
#[repr(C)]
pub struct Vector3D<T, U> {
/// The `x` (traditionally, horizontal) coordinate.
pub x: T,
/// The `y` (traditionally, vertical) coordinate.
pub y: T,
/// The `z` (traditionally, depth) coordinate.
pub z: T,
#[doc(hidden)]
pub _unit: PhantomData<U>,
}
mint_vec!(Vector3D[x, y, z] = Vector3);
impl<T: Copy, U> Copy for Vector3D<T, U> {}
impl<T: Clone, U> Clone for Vector3D<T, U> {
fn clone(&self) -> Self {
Vector3D {
x: self.x.clone(),
y: self.y.clone(),
z: self.z.clone(),
_unit: PhantomData,
}
}
}
#[cfg(feature = "serde")]
impl<'de, T, U> serde::Deserialize<'de> for Vector3D<T, U>
where
T: serde::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let (x, y, z) = serde::Deserialize::deserialize(deserializer)?;
Ok(Vector3D {
x,
y,
z,
_unit: PhantomData,
})
}
}
#[cfg(feature = "serde")]
impl<T, U> serde::Serialize for Vector3D<T, U>
where
T: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
(&self.x, &self.y, &self.z).serialize(serializer)
}
}
impl<T: Eq, U> Eq for Vector3D<T, U> {}
impl<T: PartialEq, U> PartialEq for Vector3D<T, U> {
fn eq(&self, other: &Self) -> bool {
self.x == other.x && self.y == other.y && self.z == other.z
}
}
impl<T: Hash, U> Hash for Vector3D<T, U> {
fn hash<H: core::hash::Hasher>(&self, h: &mut H) {
self.x.hash(h);
self.y.hash(h);
self.z.hash(h);
}
}
impl<T: Zero, U> Zero for Vector3D<T, U> {
/// Constructor, setting all components to zero.
#[inline]
fn zero() -> Self {
vec3(Zero::zero(), Zero::zero(), Zero::zero())
}
}
impl<T: fmt::Debug, U> fmt::Debug for Vector3D<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("")
.field(&self.x)
.field(&self.y)
.field(&self.z)
.finish()
}
}
impl<T: Default, U> Default for Vector3D<T, U> {
fn default() -> Self {
Vector3D::new(Default::default(), Default::default(), Default::default())
}
}
impl<T, U> Vector3D<T, U> {
/// Constructor, setting all components to zero.
#[inline]
pub fn zero() -> Self
where
T: Zero,
{
vec3(Zero::zero(), Zero::zero(), Zero::zero())
}
/// Constructor, setting all components to one.
#[inline]
pub fn one() -> Self
where
T: One,
{
vec3(One::one(), One::one(), One::one())
}
/// Constructor taking scalar values directly.
#[inline]
pub const fn new(x: T, y: T, z: T) -> Self {
Vector3D {
x,
y,
z,
_unit: PhantomData,
}
}
/// Constructor setting all components to the same value.
#[inline]
pub fn splat(v: T) -> Self
where
T: Clone,
{
Vector3D {
x: v.clone(),
y: v.clone(),
z: v,
_unit: PhantomData,
}
}
/// Constructor taking properly Lengths instead of scalar values.
#[inline]
pub fn from_lengths(x: Length<T, U>, y: Length<T, U>, z: Length<T, U>) -> Vector3D<T, U> {
vec3(x.0, y.0, z.0)
}
/// Tag a unitless value with units.
#[inline]
pub fn from_untyped(p: Vector3D<T, UnknownUnit>) -> Self {
vec3(p.x, p.y, p.z)
}
/// Computes the vector with absolute values of each component.
///
/// # Example
///
/// ```rust
/// # use std::{i32, f32};
/// # use euclid::vec3;
/// enum U {}
///
/// assert_eq!(vec3::<_, U>(-1, 0, 2).abs(), vec3(1, 0, 2));
///
/// let vec = vec3::<_, U>(f32::NAN, 0.0, -f32::MAX).abs();
/// assert!(vec.x.is_nan());
/// assert_eq!(vec.y, 0.0);
/// assert_eq!(vec.z, f32::MAX);
/// ```
///
/// # Panics
///
/// The behavior for each component follows the scalar type's implementation of
/// `num_traits::Signed::abs`.
pub fn abs(self) -> Self
where
T: Signed,
{
vec3(self.x.abs(), self.y.abs(), self.z.abs())
}
/// Dot product.
#[inline]
pub fn dot(self, other: Self) -> T
where
T: Add<Output = T> + Mul<Output = T>,
{
self.x * other.x + self.y * other.y + self.z * other.z
}
}
impl<T: Copy, U> Vector3D<T, U> {
/// Cross product.
#[inline]
pub fn cross(self, other: Self) -> Self
where
T: Sub<Output = T> + Mul<Output = T>,
{
vec3(
self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x,
)
}
/// Returns the component-wise multiplication of the two vectors.
#[inline]
pub fn component_mul(self, other: Self) -> Self
where
T: Mul<Output = T>,
{
vec3(self.x * other.x, self.y * other.y, self.z * other.z)
}
/// Cast this vector into a point.
///
/// Equivalent to adding this vector to the origin.
#[inline]
pub fn to_point(self) -> Point3D<T, U> {
point3(self.x, self.y, self.z)
}
/// Returns a 2d vector using this vector's x and y coordinates
#[inline]
pub fn xy(self) -> Vector2D<T, U> {
vec2(self.x, self.y)
}
/// Returns a 2d vector using this vector's x and z coordinates
#[inline]
pub fn xz(self) -> Vector2D<T, U> {
vec2(self.x, self.z)
}
/// Returns a 2d vector using this vector's x and z coordinates
#[inline]
pub fn yz(self) -> Vector2D<T, U> {
vec2(self.y, self.z)
}
/// Cast into an array with x, y and z.
#[inline]
pub fn to_array(self) -> [T; 3] {
[self.x, self.y, self.z]
}
/// Cast into an array with x, y, z and 0.
#[inline]
pub fn to_array_4d(self) -> [T; 4]
where
T: Zero,
{
[self.x, self.y, self.z, Zero::zero()]
}
/// Cast into a tuple with x, y and z.
#[inline]
pub fn to_tuple(self) -> (T, T, T) {
(self.x, self.y, self.z)
}
/// Cast into a tuple with x, y, z and 0.
#[inline]
pub fn to_tuple_4d(self) -> (T, T, T, T)
where
T: Zero,
{
(self.x, self.y, self.z, Zero::zero())
}
/// Drop the units, preserving only the numeric value.
#[inline]
pub fn to_untyped(self) -> Vector3D<T, UnknownUnit> {
vec3(self.x, self.y, self.z)
}
/// Cast the unit.
#[inline]
pub fn cast_unit<V>(self) -> Vector3D<T, V> {
vec3(self.x, self.y, self.z)
}
/// Convert into a 2d vector.
#[inline]
pub fn to_2d(self) -> Vector2D<T, U> {
self.xy()
}
/// Rounds each component to the nearest integer value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec3;
/// enum Mm {}
///
/// assert_eq!(vec3::<_, Mm>(-0.1, -0.8, 0.4).round(), vec3::<_, Mm>(0.0, -1.0, 0.0))
/// ```
#[inline]
#[must_use]
pub fn round(self) -> Self
where
T: Round,
{
vec3(self.x.round(), self.y.round(), self.z.round())
}
/// Rounds each component to the smallest integer equal or greater than the original value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec3;
/// enum Mm {}
///
/// assert_eq!(vec3::<_, Mm>(-0.1, -0.8, 0.4).ceil(), vec3::<_, Mm>(0.0, 0.0, 1.0))
/// ```
#[inline]
#[must_use]
pub fn ceil(self) -> Self
where
T: Ceil,
{
vec3(self.x.ceil(), self.y.ceil(), self.z.ceil())
}
/// Rounds each component to the biggest integer equal or lower than the original value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec3;
/// enum Mm {}
///
/// assert_eq!(vec3::<_, Mm>(-0.1, -0.8, 0.4).floor(), vec3::<_, Mm>(-1.0, -1.0, 0.0))
/// ```
#[inline]
#[must_use]
pub fn floor(self) -> Self
where
T: Floor,
{
vec3(self.x.floor(), self.y.floor(), self.z.floor())
}
/// Creates translation by this vector in vector units
#[inline]
pub fn to_transform(self) -> Transform3D<T, U, U>
where
T: Zero + One,
{
Transform3D::translation(self.x, self.y, self.z)
}
}
impl<T, U> Vector3D<T, U>
where
T: Copy + Mul<T, Output = T> + Add<T, Output = T>,
{
/// Returns the vector's length squared.
#[inline]
pub fn square_length(self) -> T {
self.x * self.x + self.y * self.y + self.z * self.z
}
/// Returns this vector projected onto another one.
///
/// Projecting onto a nil vector will cause a division by zero.
#[inline]
pub fn project_onto_vector(self, onto: Self) -> Self
where
T: Sub<T, Output = T> + Div<T, Output = T>,
{
onto * (self.dot(onto) / onto.square_length())
}
}
impl<T: Float, U> Vector3D<T, U> {
/// Returns the positive angle between this vector and another vector.
///
/// The returned angle is between 0 and PI.
pub fn angle_to(self, other: Self) -> Angle<T>
where
T: Trig,
{
Angle::radians(Trig::fast_atan2(
self.cross(other).length(),
self.dot(other),
))
}
/// Returns the vector length.
#[inline]
pub fn length(self) -> T {
self.square_length().sqrt()
}
/// Returns the vector with length of one unit
#[inline]
#[must_use]
pub fn normalize(self) -> Self {
self / self.length()
}
/// Returns the vector with length of one unit.
///
/// Unlike [`Vector2D::normalize`](#method.normalize), this returns None in the case that the
/// length of the vector is zero.
#[inline]
#[must_use]
pub fn try_normalize(self) -> Option<Self> {
let len = self.length();
if len == T::zero() {
None
} else {
Some(self / len)
}
}
/// Return the normalized vector even if the length is larger than the max value of Float.
#[inline]
#[must_use]
pub fn robust_normalize(self) -> Self {
let length = self.length();
if length.is_infinite() {
let scaled = self / T::max_value();
scaled / scaled.length()
} else {
self / length
}
}
/// Return this vector capped to a maximum length.
#[inline]
pub fn with_max_length(self, max_length: T) -> Self {
let square_length = self.square_length();
if square_length > max_length * max_length {
return self * (max_length / square_length.sqrt());
}
self
}
/// Return this vector with a minimum length applied.
#[inline]
pub fn with_min_length(self, min_length: T) -> Self {
let square_length = self.square_length();
if square_length < min_length * min_length {
return self * (min_length / square_length.sqrt());
}
self
}
/// Return this vector with minimum and maximum lengths applied.
#[inline]
pub fn clamp_length(self, min: T, max: T) -> Self {
debug_assert!(min <= max);
self.with_min_length(min).with_max_length(max)
}
}
impl<T, U> Vector3D<T, U>
where
T: Copy + One + Add<Output = T> + Sub<Output = T> + Mul<Output = T>,
{
/// Linearly interpolate each component between this vector and another vector.
///
/// # Example
///
/// ```rust
/// use euclid::vec3;
/// use euclid::default::Vector3D;
///
/// let from: Vector3D<_> = vec3(0.0, 10.0, -1.0);
/// let to: Vector3D<_> = vec3(8.0, -4.0, 0.0);
///
/// assert_eq!(from.lerp(to, -1.0), vec3(-8.0, 24.0, -2.0));
/// assert_eq!(from.lerp(to, 0.0), vec3( 0.0, 10.0, -1.0));
/// assert_eq!(from.lerp(to, 0.5), vec3( 4.0, 3.0, -0.5));
/// assert_eq!(from.lerp(to, 1.0), vec3( 8.0, -4.0, 0.0));
/// assert_eq!(from.lerp(to, 2.0), vec3(16.0, -18.0, 1.0));
/// ```
#[inline]
pub fn lerp(self, other: Self, t: T) -> Self {
let one_t = T::one() - t;
self * one_t + other * t
}
/// Returns a reflection vector using an incident ray and a surface normal.
#[inline]
pub fn reflect(self, normal: Self) -> Self {
let two = T::one() + T::one();
self - normal * two * self.dot(normal)
}
}
impl<T: PartialOrd, U> Vector3D<T, U> {
/// Returns the vector each component of which are minimum of this vector and another.
#[inline]
pub fn min(self, other: Self) -> Self {
vec3(
min(self.x, other.x),
min(self.y, other.y),
min(self.z, other.z),
)
}
/// Returns the vector each component of which are maximum of this vector and another.
#[inline]
pub fn max(self, other: Self) -> Self {
vec3(
max(self.x, other.x),
max(self.y, other.y),
max(self.z, other.z),
)
}
/// Returns the vector each component of which is clamped by corresponding
/// components of `start` and `end`.
///
/// Shortcut for `self.max(start).min(end)`.
#[inline]
pub fn clamp(self, start: Self, end: Self) -> Self
where
T: Copy,
{
self.max(start).min(end)
}
/// Returns vector with results of "greater than" operation on each component.
#[inline]
pub fn greater_than(self, other: Self) -> BoolVector3D {
BoolVector3D {
x: self.x > other.x,
y: self.y > other.y,
z: self.z > other.z,
}
}
/// Returns vector with results of "lower than" operation on each component.
#[inline]
pub fn lower_than(self, other: Self) -> BoolVector3D {
BoolVector3D {
x: self.x < other.x,
y: self.y < other.y,
z: self.z < other.z,
}
}
}
impl<T: PartialEq, U> Vector3D<T, U> {
/// Returns vector with results of "equal" operation on each component.
#[inline]
pub fn equal(self, other: Self) -> BoolVector3D {
BoolVector3D {
x: self.x == other.x,
y: self.y == other.y,
z: self.z == other.z,
}
}
/// Returns vector with results of "not equal" operation on each component.
#[inline]
pub fn not_equal(self, other: Self) -> BoolVector3D {
BoolVector3D {
x: self.x != other.x,
y: self.y != other.y,
z: self.z != other.z,
}
}
}
impl<T: NumCast + Copy, U> Vector3D<T, U> {
/// Cast from one numeric representation to another, preserving the units.
///
/// When casting from floating vector to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using `round()`, `ceil()` or `floor()` before casting.
#[inline]
pub fn cast<NewT: NumCast>(self) -> Vector3D<NewT, U> {
self.try_cast().unwrap()
}
/// Fallible cast from one numeric representation to another, preserving the units.
///
/// When casting from floating vector to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using `round()`, `ceil()` or `floor()` before casting.
pub fn try_cast<NewT: NumCast>(self) -> Option<Vector3D<NewT, U>> {
match (
NumCast::from(self.x),
NumCast::from(self.y),
NumCast::from(self.z),
) {
(Some(x), Some(y), Some(z)) => Some(vec3(x, y, z)),
_ => None,
}
}
// Convenience functions for common casts.
/// Cast into an `f32` vector.
#[inline]
pub fn to_f32(self) -> Vector3D<f32, U> {
self.cast()
}
/// Cast into an `f64` vector.
#[inline]
pub fn to_f64(self) -> Vector3D<f64, U> {
self.cast()
}
/// Cast into an `usize` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_usize(self) -> Vector3D<usize, U> {
self.cast()
}
/// Cast into an `u32` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_u32(self) -> Vector3D<u32, U> {
self.cast()
}
/// Cast into an `i32` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_i32(self) -> Vector3D<i32, U> {
self.cast()
}
/// Cast into an `i64` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_i64(self) -> Vector3D<i64, U> {
self.cast()
}
}
impl<T: Neg, U> Neg for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn neg(self) -> Self::Output {
vec3(-self.x, -self.y, -self.z)
}
}
impl<T: Add, U> Add for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn add(self, other: Self) -> Self::Output {
vec3(self.x + other.x, self.y + other.y, self.z + other.z)
}
}
impl<'a, T: 'a + Add + Copy, U: 'a> Add<&Self> for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn add(self, other: &Self) -> Self::Output {
vec3(self.x + other.x, self.y + other.y, self.z + other.z)
}
}
impl<T: Add<Output = T> + Zero, U> Sum for Vector3D<T, U> {
fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(Self::zero(), Add::add)
}
}
impl<'a, T: 'a + Add<Output = T> + Copy + Zero, U: 'a> Sum<&'a Self> for Vector3D<T, U> {
fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(Self::zero(), Add::add)
}
}
impl<T: Copy + Add<T, Output = T>, U> AddAssign for Vector3D<T, U> {
#[inline]
fn add_assign(&mut self, other: Self) {
*self = *self + other
}
}
impl<T: Sub, U> Sub for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn sub(self, other: Self) -> Self::Output {
vec3(self.x - other.x, self.y - other.y, self.z - other.z)
}
}
impl<T: Copy + Sub<T, Output = T>, U> SubAssign<Vector3D<T, U>> for Vector3D<T, U> {
#[inline]
fn sub_assign(&mut self, other: Self) {
*self = *self - other
}
}
impl<T: Copy + Mul, U> Mul<T> for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn mul(self, scale: T) -> Self::Output {
vec3(
self.x * scale,
self.y * scale,
self.z * scale,
)
}
}
impl<T: Copy + Mul<T, Output = T>, U> MulAssign<T> for Vector3D<T, U> {
#[inline]
fn mul_assign(&mut self, scale: T) {
*self = *self * scale
}
}
impl<T: Copy + Mul, U1, U2> Mul<Scale<T, U1, U2>> for Vector3D<T, U1> {
type Output = Vector3D<T::Output, U2>;
#[inline]
fn mul(self, scale: Scale<T, U1, U2>) -> Self::Output {
vec3(
self.x * scale.0,
self.y * scale.0,
self.z * scale.0,
)
}
}
impl<T: Copy + MulAssign, U> MulAssign<Scale<T, U, U>> for Vector3D<T, U> {
#[inline]
fn mul_assign(&mut self, scale: Scale<T, U, U>) {
self.x *= scale.0;
self.y *= scale.0;
self.z *= scale.0;
}
}
impl<T: Copy + Div, U> Div<T> for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn div(self, scale: T) -> Self::Output {
vec3(
self.x / scale,
self.y / scale,
self.z / scale,
)
}
}
impl<T: Copy + Div<T, Output = T>, U> DivAssign<T> for Vector3D<T, U> {
#[inline]
fn div_assign(&mut self, scale: T) {
*self = *self / scale
}
}
impl<T: Copy + Div, U1, U2> Div<Scale<T, U1, U2>> for Vector3D<T, U2> {
type Output = Vector3D<T::Output, U1>;
#[inline]
fn div(self, scale: Scale<T, U1, U2>) -> Self::Output {
vec3(
self.x / scale.0,
self.y / scale.0,
self.z / scale.0,
)
}
}
impl<T: Copy + DivAssign, U> DivAssign<Scale<T, U, U>> for Vector3D<T, U> {
#[inline]
fn div_assign(&mut self, scale: Scale<T, U, U>) {
self.x /= scale.0;
self.y /= scale.0;
self.z /= scale.0;
}
}
impl<T: Round, U> Round for Vector3D<T, U> {
/// See [`Vector3D::round()`](#method.round)
#[inline]
fn round(self) -> Self {
self.round()
}
}
impl<T: Ceil, U> Ceil for Vector3D<T, U> {
/// See [`Vector3D::ceil()`](#method.ceil)
#[inline]
fn ceil(self) -> Self {
self.ceil()
}
}
impl<T: Floor, U> Floor for Vector3D<T, U> {
/// See [`Vector3D::floor()`](#method.floor)
#[inline]
fn floor(self) -> Self {
self.floor()
}
}
impl<T: ApproxEq<T>, U> ApproxEq<Vector3D<T, U>> for Vector3D<T, U> {
#[inline]
fn approx_epsilon() -> Self {
vec3(
T::approx_epsilon(),
T::approx_epsilon(),
T::approx_epsilon(),
)
}
#[inline]
fn approx_eq_eps(&self, other: &Self, eps: &Self) -> bool {
self.x.approx_eq_eps(&other.x, &eps.x)
&& self.y.approx_eq_eps(&other.y, &eps.y)
&& self.z.approx_eq_eps(&other.z, &eps.z)
}
}
impl<T, U> Into<[T; 3]> for Vector3D<T, U> {
fn into(self) -> [T; 3] {
[self.x, self.y, self.z]
}
}
impl<T, U> From<[T; 3]> for Vector3D<T, U> {
fn from([x, y, z]: [T; 3]) -> Self {
vec3(x, y, z)
}
}
impl<T, U> Into<(T, T, T)> for Vector3D<T, U> {
fn into(self) -> (T, T, T) {
(self.x, self.y, self.z)
}
}
impl<T, U> From<(T, T, T)> for Vector3D<T, U> {
fn from(tuple: (T, T, T)) -> Self {
vec3(tuple.0, tuple.1, tuple.2)
}
}
/// A 2d vector of booleans, useful for component-wise logic operations.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct BoolVector2D {
pub x: bool,
pub y: bool,
}
/// A 3d vector of booleans, useful for component-wise logic operations.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct BoolVector3D {
pub x: bool,
pub y: bool,
pub z: bool,
}
impl BoolVector2D {
/// Returns `true` if all components are `true` and `false` otherwise.
#[inline]
pub fn all(self) -> bool {
self.x && self.y
}
/// Returns `true` if any component are `true` and `false` otherwise.
#[inline]
pub fn any(self) -> bool {
self.x || self.y
}
/// Returns `true` if all components are `false` and `false` otherwise. Negation of `any()`.
#[inline]
pub fn none(self) -> bool {
!self.any()
}
/// Returns new vector with by-component AND operation applied.
#[inline]
pub fn and(self, other: Self) -> Self {
BoolVector2D {
x: self.x && other.x,
y: self.y && other.y,
}
}
/// Returns new vector with by-component OR operation applied.
#[inline]
pub fn or(self, other: Self) -> Self {
BoolVector2D {
x: self.x || other.x,
y: self.y || other.y,
}
}
/// Returns new vector with results of negation operation on each component.
#[inline]
pub fn not(self) -> Self {
BoolVector2D {
x: !self.x,
y: !self.y,
}
}
/// Returns point, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
pub fn select_point<T, U>(self, a: Point2D<T, U>, b: Point2D<T, U>) -> Point2D<T, U> {
point2(
if self.x { a.x } else { b.x },
if self.y { a.y } else { b.y },
)
}
/// Returns vector, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
pub fn select_vector<T, U>(self, a: Vector2D<T, U>, b: Vector2D<T, U>) -> Vector2D<T, U> {
vec2(
if self.x { a.x } else { b.x },
if self.y { a.y } else { b.y },
)
}
/// Returns size, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
pub fn select_size<T, U>(self, a: Size2D<T, U>, b: Size2D<T, U>) -> Size2D<T, U> {
size2(
if self.x { a.width } else { b.width },
if self.y { a.height } else { b.height },
)
}
}
impl BoolVector3D {
/// Returns `true` if all components are `true` and `false` otherwise.
#[inline]
pub fn all(self) -> bool {
self.x && self.y && self.z
}
/// Returns `true` if any component are `true` and `false` otherwise.
#[inline]
pub fn any(self) -> bool {
self.x || self.y || self.z
}
/// Returns `true` if all components are `false` and `false` otherwise. Negation of `any()`.
#[inline]
pub fn none(self) -> bool {
!self.any()
}
/// Returns new vector with by-component AND operation applied.
#[inline]
pub fn and(self, other: Self) -> Self {
BoolVector3D {
x: self.x && other.x,
y: self.y && other.y,
z: self.z && other.z,
}
}
/// Returns new vector with by-component OR operation applied.
#[inline]
pub fn or(self, other: Self) -> Self {
BoolVector3D {
x: self.x || other.x,
y: self.y || other.y,
z: self.z || other.z,
}
}
/// Returns new vector with results of negation operation on each component.
#[inline]
pub fn not(self) -> Self {
BoolVector3D {
x: !self.x,
y: !self.y,
z: !self.z,
}
}
/// Returns point, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
pub fn select_point<T, U>(self, a: Point3D<T, U>, b: Point3D<T, U>) -> Point3D<T, U> {
point3(
if self.x { a.x } else { b.x },
if self.y { a.y } else { b.y },
if self.z { a.z } else { b.z },
)
}
/// Returns vector, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
pub fn select_vector<T, U>(self, a: Vector3D<T, U>, b: Vector3D<T, U>) -> Vector3D<T, U> {
vec3(
if self.x { a.x } else { b.x },
if self.y { a.y } else { b.y },
if self.z { a.z } else { b.z },
)
}
/// Returns size, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
#[must_use]
pub fn select_size<T, U>(self, a: Size3D<T, U>, b: Size3D<T, U>) -> Size3D<T, U> {
size3(
if self.x { a.width } else { b.width },
if self.y { a.height } else { b.height },
if self.z { a.depth } else { b.depth },
)
}
/// Returns a 2d vector using this vector's x and y coordinates.
#[inline]
pub fn xy(self) -> BoolVector2D {
BoolVector2D {
x: self.x,
y: self.y,
}
}
/// Returns a 2d vector using this vector's x and z coordinates.
#[inline]
pub fn xz(self) -> BoolVector2D {
BoolVector2D {
x: self.x,
y: self.z,
}
}
/// Returns a 2d vector using this vector's y and z coordinates.
#[inline]
pub fn yz(self) -> BoolVector2D {
BoolVector2D {
x: self.y,
y: self.z,
}
}
}
/// Convenience constructor.
#[inline]
pub fn vec2<T, U>(x: T, y: T) -> Vector2D<T, U> {
Vector2D {
x,
y,
_unit: PhantomData,
}
}
/// Convenience constructor.
#[inline]
pub fn vec3<T, U>(x: T, y: T, z: T) -> Vector3D<T, U> {
Vector3D {
x,
y,
z,
_unit: PhantomData,
}
}
/// Shorthand for `BoolVector2D { x, y }`.
#[inline]
pub fn bvec2(x: bool, y: bool) -> BoolVector2D {
BoolVector2D { x, y }
}
/// Shorthand for `BoolVector3D { x, y, z }`.
#[inline]
pub fn bvec3(x: bool, y: bool, z: bool) -> BoolVector3D {
BoolVector3D { x, y, z }
}
#[cfg(test)]
mod vector2d {
use crate::scale::Scale;
use crate::{default, vec2};
#[cfg(feature = "mint")]
use mint;
type Vec2 = default::Vector2D<f32>;
#[test]
pub fn test_scalar_mul() {
let p1: Vec2 = vec2(3.0, 5.0);
let result = p1 * 5.0;
assert_eq!(result, Vec2::new(15.0, 25.0));
}
#[test]
pub fn test_dot() {
let p1: Vec2 = vec2(2.0, 7.0);
let p2: Vec2 = vec2(13.0, 11.0);
assert_eq!(p1.dot(p2), 103.0);
}
#[test]
pub fn test_cross() {
let p1: Vec2 = vec2(4.0, 7.0);
let p2: Vec2 = vec2(13.0, 8.0);
let r = p1.cross(p2);
assert_eq!(r, -59.0);
}
#[test]
pub fn test_normalize() {
use std::f32;
let p0: Vec2 = Vec2::zero();
let p1: Vec2 = vec2(4.0, 0.0);
let p2: Vec2 = vec2(3.0, -4.0);
assert!(p0.normalize().x.is_nan() && p0.normalize().y.is_nan());
assert_eq!(p1.normalize(), vec2(1.0, 0.0));
assert_eq!(p2.normalize(), vec2(0.6, -0.8));
let p3: Vec2 = vec2(::std::f32::MAX, ::std::f32::MAX);
assert_ne!(
p3.normalize(),
vec2(1.0 / 2.0f32.sqrt(), 1.0 / 2.0f32.sqrt())
);
assert_eq!(
p3.robust_normalize(),
vec2(1.0 / 2.0f32.sqrt(), 1.0 / 2.0f32.sqrt())
);
let p4: Vec2 = Vec2::zero();
assert!(p4.try_normalize().is_none());
let p5: Vec2 = Vec2::new(f32::MIN_POSITIVE, f32::MIN_POSITIVE);
assert!(p5.try_normalize().is_none());
let p6: Vec2 = vec2(4.0, 0.0);
let p7: Vec2 = vec2(3.0, -4.0);
assert_eq!(p6.try_normalize().unwrap(), vec2(1.0, 0.0));
assert_eq!(p7.try_normalize().unwrap(), vec2(0.6, -0.8));
}
#[test]
pub fn test_min() {
let p1: Vec2 = vec2(1.0, 3.0);
let p2: Vec2 = vec2(2.0, 2.0);
let result = p1.min(p2);
assert_eq!(result, vec2(1.0, 2.0));
}
#[test]
pub fn test_max() {
let p1: Vec2 = vec2(1.0, 3.0);
let p2: Vec2 = vec2(2.0, 2.0);
let result = p1.max(p2);
assert_eq!(result, vec2(2.0, 3.0));
}
#[test]
pub fn test_angle_from_x_axis() {
use crate::approxeq::ApproxEq;
use core::f32::consts::FRAC_PI_2;
let right: Vec2 = vec2(10.0, 0.0);
let down: Vec2 = vec2(0.0, 4.0);
let up: Vec2 = vec2(0.0, -1.0);
assert!(right.angle_from_x_axis().get().approx_eq(&0.0));
assert!(down.angle_from_x_axis().get().approx_eq(&FRAC_PI_2));
assert!(up.angle_from_x_axis().get().approx_eq(&-FRAC_PI_2));
}
#[test]
pub fn test_angle_to() {
use crate::approxeq::ApproxEq;
use core::f32::consts::FRAC_PI_2;
let right: Vec2 = vec2(10.0, 0.0);
let right2: Vec2 = vec2(1.0, 0.0);
let up: Vec2 = vec2(0.0, -1.0);
let up_left: Vec2 = vec2(-1.0, -1.0);
assert!(right.angle_to(right2).get().approx_eq(&0.0));
assert!(right.angle_to(up).get().approx_eq(&-FRAC_PI_2));
assert!(up.angle_to(right).get().approx_eq(&FRAC_PI_2));
assert!(up_left
.angle_to(up)
.get()
.approx_eq_eps(&(0.5 * FRAC_PI_2), &0.0005));
}
#[test]
pub fn test_with_max_length() {
use crate::approxeq::ApproxEq;
let v1: Vec2 = vec2(0.5, 0.5);
let v2: Vec2 = vec2(1.0, 0.0);
let v3: Vec2 = vec2(0.1, 0.2);
let v4: Vec2 = vec2(2.0, -2.0);
let v5: Vec2 = vec2(1.0, 2.0);
let v6: Vec2 = vec2(-1.0, 3.0);
assert_eq!(v1.with_max_length(1.0), v1);
assert_eq!(v2.with_max_length(1.0), v2);
assert_eq!(v3.with_max_length(1.0), v3);
assert_eq!(v4.with_max_length(10.0), v4);
assert_eq!(v5.with_max_length(10.0), v5);
assert_eq!(v6.with_max_length(10.0), v6);
let v4_clamped = v4.with_max_length(1.0);
assert!(v4_clamped.length().approx_eq(&1.0));
assert!(v4_clamped.normalize().approx_eq(&v4.normalize()));
let v5_clamped = v5.with_max_length(1.5);
assert!(v5_clamped.length().approx_eq(&1.5));
assert!(v5_clamped.normalize().approx_eq(&v5.normalize()));
let v6_clamped = v6.with_max_length(2.5);
assert!(v6_clamped.length().approx_eq(&2.5));
assert!(v6_clamped.normalize().approx_eq(&v6.normalize()));
}
#[test]
pub fn test_project_onto_vector() {
use crate::approxeq::ApproxEq;
let v1: Vec2 = vec2(1.0, 2.0);
let x: Vec2 = vec2(1.0, 0.0);
let y: Vec2 = vec2(0.0, 1.0);
assert!(v1.project_onto_vector(x).approx_eq(&vec2(1.0, 0.0)));
assert!(v1.project_onto_vector(y).approx_eq(&vec2(0.0, 2.0)));
assert!(v1.project_onto_vector(-x).approx_eq(&vec2(1.0, 0.0)));
assert!(v1.project_onto_vector(x * 10.0).approx_eq(&vec2(1.0, 0.0)));
assert!(v1.project_onto_vector(v1 * 2.0).approx_eq(&v1));
assert!(v1.project_onto_vector(-v1).approx_eq(&v1));
}
#[cfg(feature = "mint")]
#[test]
pub fn test_mint() {
let v1 = Vec2::new(1.0, 3.0);
let vm: mint::Vector2<_> = v1.into();
let v2 = Vec2::from(vm);
assert_eq!(v1, v2);
}
pub enum Mm {}
pub enum Cm {}
pub type Vector2DMm<T> = super::Vector2D<T, Mm>;
pub type Vector2DCm<T> = super::Vector2D<T, Cm>;
#[test]
pub fn test_add() {
let p1 = Vector2DMm::new(1.0, 2.0);
let p2 = Vector2DMm::new(3.0, 4.0);
assert_eq!(p1 + p2, vec2(4.0, 6.0));
assert_eq!(p1 + &p2, vec2(4.0, 6.0));
}
#[test]
pub fn test_sum() {
let vecs = [
Vector2DMm::new(1.0, 2.0),
Vector2DMm::new(3.0, 4.0),
Vector2DMm::new(5.0, 6.0)
];
let sum = Vector2DMm::new(9.0, 12.0);
assert_eq!(vecs.iter().sum::<Vector2DMm<_>>(), sum);
assert_eq!(vecs.into_iter().sum::<Vector2DMm<_>>(), sum);
}
#[test]
pub fn test_add_assign() {
let mut p1 = Vector2DMm::new(1.0, 2.0);
p1 += vec2(3.0, 4.0);
assert_eq!(p1, vec2(4.0, 6.0));
}
#[test]
pub fn test_tpyed_scalar_mul() {
let p1 = Vector2DMm::new(1.0, 2.0);
let cm_per_mm = Scale::<f32, Mm, Cm>::new(0.1);
let result: Vector2DCm<f32> = p1 * cm_per_mm;
assert_eq!(result, vec2(0.1, 0.2));
}
#[test]
pub fn test_swizzling() {
let p: default::Vector2D<i32> = vec2(1, 2);
assert_eq!(p.yx(), vec2(2, 1));
}
#[test]
pub fn test_reflect() {
use crate::approxeq::ApproxEq;
let a: Vec2 = vec2(1.0, 3.0);
let n1: Vec2 = vec2(0.0, -1.0);
let n2: Vec2 = vec2(1.0, -1.0).normalize();
assert!(a.reflect(n1).approx_eq(&vec2(1.0, -3.0)));
assert!(a.reflect(n2).approx_eq(&vec2(3.0, 1.0)));
}
}
#[cfg(test)]
mod vector3d {
use crate::scale::Scale;
use crate::{default, vec2, vec3};
#[cfg(feature = "mint")]
use mint;
type Vec3 = default::Vector3D<f32>;
#[test]
pub fn test_add() {
let p1 = Vec3::new(1.0, 2.0, 3.0);
let p2 = Vec3::new(4.0, 5.0, 6.0);
assert_eq!(p1 + p2, vec3(5.0, 7.0, 9.0));
assert_eq!(p1 + &p2, vec3(5.0, 7.0, 9.0));
}
#[test]
pub fn test_sum() {
let vecs = [
Vec3::new(1.0, 2.0, 3.0),
Vec3::new(4.0, 5.0, 6.0),
Vec3::new(7.0, 8.0, 9.0)
];
let sum = Vec3::new(12.0, 15.0, 18.0);
assert_eq!(vecs.iter().sum::<Vec3>(), sum);
assert_eq!(vecs.into_iter().sum::<Vec3>(), sum);
}
#[test]
pub fn test_dot() {
let p1: Vec3 = vec3(7.0, 21.0, 32.0);
let p2: Vec3 = vec3(43.0, 5.0, 16.0);
assert_eq!(p1.dot(p2), 918.0);
}
#[test]
pub fn test_cross() {
let p1: Vec3 = vec3(4.0, 7.0, 9.0);
let p2: Vec3 = vec3(13.0, 8.0, 3.0);
let p3 = p1.cross(p2);
assert_eq!(p3, vec3(-51.0, 105.0, -59.0));
}
#[test]
pub fn test_normalize() {
use std::f32;
let p0: Vec3 = Vec3::zero();
let p1: Vec3 = vec3(0.0, -6.0, 0.0);
let p2: Vec3 = vec3(1.0, 2.0, -2.0);
assert!(
p0.normalize().x.is_nan() && p0.normalize().y.is_nan() && p0.normalize().z.is_nan()
);
assert_eq!(p1.normalize(), vec3(0.0, -1.0, 0.0));
assert_eq!(p2.normalize(), vec3(1.0 / 3.0, 2.0 / 3.0, -2.0 / 3.0));
let p3: Vec3 = vec3(::std::f32::MAX, ::std::f32::MAX, 0.0);
assert_ne!(
p3.normalize(),
vec3(1.0 / 2.0f32.sqrt(), 1.0 / 2.0f32.sqrt(), 0.0)
);
assert_eq!(
p3.robust_normalize(),
vec3(1.0 / 2.0f32.sqrt(), 1.0 / 2.0f32.sqrt(), 0.0)
);
let p4: Vec3 = Vec3::zero();
assert!(p4.try_normalize().is_none());
let p5: Vec3 = Vec3::new(f32::MIN_POSITIVE, f32::MIN_POSITIVE, f32::MIN_POSITIVE);
assert!(p5.try_normalize().is_none());
let p6: Vec3 = vec3(4.0, 0.0, 3.0);
let p7: Vec3 = vec3(3.0, -4.0, 0.0);
assert_eq!(p6.try_normalize().unwrap(), vec3(0.8, 0.0, 0.6));
assert_eq!(p7.try_normalize().unwrap(), vec3(0.6, -0.8, 0.0));
}
#[test]
pub fn test_min() {
let p1: Vec3 = vec3(1.0, 3.0, 5.0);
let p2: Vec3 = vec3(2.0, 2.0, -1.0);
let result = p1.min(p2);
assert_eq!(result, vec3(1.0, 2.0, -1.0));
}
#[test]
pub fn test_max() {
let p1: Vec3 = vec3(1.0, 3.0, 5.0);
let p2: Vec3 = vec3(2.0, 2.0, -1.0);
let result = p1.max(p2);
assert_eq!(result, vec3(2.0, 3.0, 5.0));
}
#[test]
pub fn test_clamp() {
let p1: Vec3 = vec3(1.0, -1.0, 5.0);
let p2: Vec3 = vec3(2.0, 5.0, 10.0);
let p3: Vec3 = vec3(-1.0, 2.0, 20.0);
let result = p3.clamp(p1, p2);
assert_eq!(result, vec3(1.0, 2.0, 10.0));
}
#[test]
pub fn test_typed_scalar_mul() {
enum Mm {}
enum Cm {}
let p1 = super::Vector3D::<f32, Mm>::new(1.0, 2.0, 3.0);
let cm_per_mm = Scale::<f32, Mm, Cm>::new(0.1);
let result: super::Vector3D<f32, Cm> = p1 * cm_per_mm;
assert_eq!(result, vec3(0.1, 0.2, 0.3));
}
#[test]
pub fn test_swizzling() {
let p: Vec3 = vec3(1.0, 2.0, 3.0);
assert_eq!(p.xy(), vec2(1.0, 2.0));
assert_eq!(p.xz(), vec2(1.0, 3.0));
assert_eq!(p.yz(), vec2(2.0, 3.0));
}
#[cfg(feature = "mint")]
#[test]
pub fn test_mint() {
let v1 = Vec3::new(1.0, 3.0, 5.0);
let vm: mint::Vector3<_> = v1.into();
let v2 = Vec3::from(vm);
assert_eq!(v1, v2);
}
#[test]
pub fn test_reflect() {
use crate::approxeq::ApproxEq;
let a: Vec3 = vec3(1.0, 3.0, 2.0);
let n1: Vec3 = vec3(0.0, -1.0, 0.0);
let n2: Vec3 = vec3(0.0, 1.0, 1.0).normalize();
assert!(a.reflect(n1).approx_eq(&vec3(1.0, -3.0, 2.0)));
assert!(a.reflect(n2).approx_eq(&vec3(1.0, -2.0, -3.0)));
}
#[test]
pub fn test_angle_to() {
use crate::approxeq::ApproxEq;
use core::f32::consts::FRAC_PI_2;
let right: Vec3 = vec3(10.0, 0.0, 0.0);
let right2: Vec3 = vec3(1.0, 0.0, 0.0);
let up: Vec3 = vec3(0.0, -1.0, 0.0);
let up_left: Vec3 = vec3(-1.0, -1.0, 0.0);
assert!(right.angle_to(right2).get().approx_eq(&0.0));
assert!(right.angle_to(up).get().approx_eq(&FRAC_PI_2));
assert!(up.angle_to(right).get().approx_eq(&FRAC_PI_2));
assert!(up_left
.angle_to(up)
.get()
.approx_eq_eps(&(0.5 * FRAC_PI_2), &0.0005));
}
#[test]
pub fn test_with_max_length() {
use crate::approxeq::ApproxEq;
let v1: Vec3 = vec3(0.5, 0.5, 0.0);
let v2: Vec3 = vec3(1.0, 0.0, 0.0);
let v3: Vec3 = vec3(0.1, 0.2, 0.3);
let v4: Vec3 = vec3(2.0, -2.0, 2.0);
let v5: Vec3 = vec3(1.0, 2.0, -3.0);
let v6: Vec3 = vec3(-1.0, 3.0, 2.0);
assert_eq!(v1.with_max_length(1.0), v1);
assert_eq!(v2.with_max_length(1.0), v2);
assert_eq!(v3.with_max_length(1.0), v3);
assert_eq!(v4.with_max_length(10.0), v4);
assert_eq!(v5.with_max_length(10.0), v5);
assert_eq!(v6.with_max_length(10.0), v6);
let v4_clamped = v4.with_max_length(1.0);
assert!(v4_clamped.length().approx_eq(&1.0));
assert!(v4_clamped.normalize().approx_eq(&v4.normalize()));
let v5_clamped = v5.with_max_length(1.5);
assert!(v5_clamped.length().approx_eq(&1.5));
assert!(v5_clamped.normalize().approx_eq(&v5.normalize()));
let v6_clamped = v6.with_max_length(2.5);
assert!(v6_clamped.length().approx_eq(&2.5));
assert!(v6_clamped.normalize().approx_eq(&v6.normalize()));
}
#[test]
pub fn test_project_onto_vector() {
use crate::approxeq::ApproxEq;
let v1: Vec3 = vec3(1.0, 2.0, 3.0);
let x: Vec3 = vec3(1.0, 0.0, 0.0);
let y: Vec3 = vec3(0.0, 1.0, 0.0);
let z: Vec3 = vec3(0.0, 0.0, 1.0);
assert!(v1.project_onto_vector(x).approx_eq(&vec3(1.0, 0.0, 0.0)));
assert!(v1.project_onto_vector(y).approx_eq(&vec3(0.0, 2.0, 0.0)));
assert!(v1.project_onto_vector(z).approx_eq(&vec3(0.0, 0.0, 3.0)));
assert!(v1.project_onto_vector(-x).approx_eq(&vec3(1.0, 0.0, 0.0)));
assert!(v1
.project_onto_vector(x * 10.0)
.approx_eq(&vec3(1.0, 0.0, 0.0)));
assert!(v1.project_onto_vector(v1 * 2.0).approx_eq(&v1));
assert!(v1.project_onto_vector(-v1).approx_eq(&v1));
}
}
#[cfg(test)]
mod bool_vector {
use super::*;
use crate::default;
type Vec2 = default::Vector2D<f32>;
type Vec3 = default::Vector3D<f32>;
#[test]
fn test_bvec2() {
assert_eq!(
Vec2::new(1.0, 2.0).greater_than(Vec2::new(2.0, 1.0)),
bvec2(false, true),
);
assert_eq!(
Vec2::new(1.0, 2.0).lower_than(Vec2::new(2.0, 1.0)),
bvec2(true, false),
);
assert_eq!(
Vec2::new(1.0, 2.0).equal(Vec2::new(1.0, 3.0)),
bvec2(true, false),
);
assert_eq!(
Vec2::new(1.0, 2.0).not_equal(Vec2::new(1.0, 3.0)),
bvec2(false, true),
);
assert!(bvec2(true, true).any());
assert!(bvec2(false, true).any());
assert!(bvec2(true, false).any());
assert!(!bvec2(false, false).any());
assert!(bvec2(false, false).none());
assert!(bvec2(true, true).all());
assert!(!bvec2(false, true).all());
assert!(!bvec2(true, false).all());
assert!(!bvec2(false, false).all());
assert_eq!(bvec2(true, false).not(), bvec2(false, true));
assert_eq!(
bvec2(true, false).and(bvec2(true, true)),
bvec2(true, false)
);
assert_eq!(bvec2(true, false).or(bvec2(true, true)), bvec2(true, true));
assert_eq!(
bvec2(true, false).select_vector(Vec2::new(1.0, 2.0), Vec2::new(3.0, 4.0)),
Vec2::new(1.0, 4.0),
);
}
#[test]
fn test_bvec3() {
assert_eq!(
Vec3::new(1.0, 2.0, 3.0).greater_than(Vec3::new(3.0, 2.0, 1.0)),
bvec3(false, false, true),
);
assert_eq!(
Vec3::new(1.0, 2.0, 3.0).lower_than(Vec3::new(3.0, 2.0, 1.0)),
bvec3(true, false, false),
);
assert_eq!(
Vec3::new(1.0, 2.0, 3.0).equal(Vec3::new(3.0, 2.0, 1.0)),
bvec3(false, true, false),
);
assert_eq!(
Vec3::new(1.0, 2.0, 3.0).not_equal(Vec3::new(3.0, 2.0, 1.0)),
bvec3(true, false, true),
);
assert!(bvec3(true, true, false).any());
assert!(bvec3(false, true, false).any());
assert!(bvec3(true, false, false).any());
assert!(!bvec3(false, false, false).any());
assert!(bvec3(false, false, false).none());
assert!(bvec3(true, true, true).all());
assert!(!bvec3(false, true, false).all());
assert!(!bvec3(true, false, false).all());
assert!(!bvec3(false, false, false).all());
assert_eq!(bvec3(true, false, true).not(), bvec3(false, true, false));
assert_eq!(
bvec3(true, false, true).and(bvec3(true, true, false)),
bvec3(true, false, false)
);
assert_eq!(
bvec3(true, false, false).or(bvec3(true, true, false)),
bvec3(true, true, false)
);
assert_eq!(
bvec3(true, false, true)
.select_vector(Vec3::new(1.0, 2.0, 3.0), Vec3::new(4.0, 5.0, 6.0)),
Vec3::new(1.0, 5.0, 3.0),
);
}
}
Auto merge of #477 - rsaarelm:const-vec, r=nical
Make vec shorthand constructors const fns
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::UnknownUnit;
use crate::approxeq::ApproxEq;
use crate::approxord::{max, min};
use crate::length::Length;
use crate::num::*;
use crate::point::{point2, point3, Point2D, Point3D};
use crate::scale::Scale;
use crate::size::{size2, size3, Size2D, Size3D};
use crate::transform2d::Transform2D;
use crate::transform3d::Transform3D;
use crate::trig::Trig;
use crate::Angle;
use core::cmp::{Eq, PartialEq};
use core::fmt;
use core::hash::Hash;
use core::iter::Sum;
use core::marker::PhantomData;
use core::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
#[cfg(feature = "mint")]
use mint;
use num_traits::{Float, NumCast, Signed};
#[cfg(feature = "serde")]
use serde;
/// A 2d Vector tagged with a unit.
#[repr(C)]
pub struct Vector2D<T, U> {
/// The `x` (traditionally, horizontal) coordinate.
pub x: T,
/// The `y` (traditionally, vertical) coordinate.
pub y: T,
#[doc(hidden)]
pub _unit: PhantomData<U>,
}
mint_vec!(Vector2D[x, y] = Vector2);
impl<T: Copy, U> Copy for Vector2D<T, U> {}
impl<T: Clone, U> Clone for Vector2D<T, U> {
fn clone(&self) -> Self {
Vector2D {
x: self.x.clone(),
y: self.y.clone(),
_unit: PhantomData,
}
}
}
#[cfg(feature = "serde")]
impl<'de, T, U> serde::Deserialize<'de> for Vector2D<T, U>
where
T: serde::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let (x, y) = serde::Deserialize::deserialize(deserializer)?;
Ok(Vector2D {
x,
y,
_unit: PhantomData,
})
}
}
#[cfg(feature = "serde")]
impl<T, U> serde::Serialize for Vector2D<T, U>
where
T: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
(&self.x, &self.y).serialize(serializer)
}
}
impl<T: Eq, U> Eq for Vector2D<T, U> {}
impl<T: PartialEq, U> PartialEq for Vector2D<T, U> {
fn eq(&self, other: &Self) -> bool {
self.x == other.x && self.y == other.y
}
}
impl<T: Hash, U> Hash for Vector2D<T, U> {
fn hash<H: core::hash::Hasher>(&self, h: &mut H) {
self.x.hash(h);
self.y.hash(h);
}
}
impl<T: Zero, U> Zero for Vector2D<T, U> {
/// Constructor, setting all components to zero.
#[inline]
fn zero() -> Self {
Vector2D::new(Zero::zero(), Zero::zero())
}
}
impl<T: fmt::Debug, U> fmt::Debug for Vector2D<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("").field(&self.x).field(&self.y).finish()
}
}
impl<T: Default, U> Default for Vector2D<T, U> {
fn default() -> Self {
Vector2D::new(Default::default(), Default::default())
}
}
impl<T, U> Vector2D<T, U> {
/// Constructor, setting all components to zero.
#[inline]
pub fn zero() -> Self
where
T: Zero,
{
Vector2D::new(Zero::zero(), Zero::zero())
}
/// Constructor, setting all components to one.
#[inline]
pub fn one() -> Self
where
T: One,
{
Vector2D::new(One::one(), One::one())
}
/// Constructor taking scalar values directly.
#[inline]
pub const fn new(x: T, y: T) -> Self {
Vector2D {
x,
y,
_unit: PhantomData,
}
}
/// Constructor setting all components to the same value.
#[inline]
pub fn splat(v: T) -> Self
where
T: Clone,
{
Vector2D {
x: v.clone(),
y: v,
_unit: PhantomData,
}
}
/// Constructor taking angle and length
pub fn from_angle_and_length(angle: Angle<T>, length: T) -> Self
where
T: Trig + Mul<Output = T> + Copy,
{
vec2(length * angle.radians.cos(), length * angle.radians.sin())
}
/// Constructor taking properly Lengths instead of scalar values.
#[inline]
pub fn from_lengths(x: Length<T, U>, y: Length<T, U>) -> Self {
vec2(x.0, y.0)
}
/// Tag a unit-less value with units.
#[inline]
pub fn from_untyped(p: Vector2D<T, UnknownUnit>) -> Self {
vec2(p.x, p.y)
}
/// Computes the vector with absolute values of each component.
///
/// # Example
///
/// ```rust
/// # use std::{i32, f32};
/// # use euclid::vec2;
/// enum U {}
///
/// assert_eq!(vec2::<_, U>(-1, 2).abs(), vec2(1, 2));
///
/// let vec = vec2::<_, U>(f32::NAN, -f32::MAX).abs();
/// assert!(vec.x.is_nan());
/// assert_eq!(vec.y, f32::MAX);
/// ```
///
/// # Panics
///
/// The behavior for each component follows the scalar type's implementation of
/// `num_traits::Signed::abs`.
pub fn abs(self) -> Self
where
T: Signed,
{
vec2(self.x.abs(), self.y.abs())
}
/// Dot product.
#[inline]
pub fn dot(self, other: Self) -> T
where
T: Add<Output = T> + Mul<Output = T>,
{
self.x * other.x + self.y * other.y
}
/// Returns the norm of the cross product [self.x, self.y, 0] x [other.x, other.y, 0].
#[inline]
pub fn cross(self, other: Self) -> T
where
T: Sub<Output = T> + Mul<Output = T>,
{
self.x * other.y - self.y * other.x
}
/// Returns the component-wise multiplication of the two vectors.
#[inline]
pub fn component_mul(self, other: Self) -> Self
where
T: Mul<Output = T>,
{
vec2(self.x * other.x, self.y * other.y)
}
}
impl<T: Copy, U> Vector2D<T, U> {
/// Create a 3d vector from this one, using the specified z value.
#[inline]
pub fn extend(self, z: T) -> Vector3D<T, U> {
vec3(self.x, self.y, z)
}
/// Cast this vector into a point.
///
/// Equivalent to adding this vector to the origin.
#[inline]
pub fn to_point(self) -> Point2D<T, U> {
Point2D {
x: self.x,
y: self.y,
_unit: PhantomData,
}
}
/// Swap x and y.
#[inline]
pub fn yx(self) -> Self {
vec2(self.y, self.x)
}
/// Cast this vector into a size.
#[inline]
pub fn to_size(self) -> Size2D<T, U> {
size2(self.x, self.y)
}
/// Drop the units, preserving only the numeric value.
#[inline]
pub fn to_untyped(self) -> Vector2D<T, UnknownUnit> {
vec2(self.x, self.y)
}
/// Cast the unit.
#[inline]
pub fn cast_unit<V>(self) -> Vector2D<T, V> {
vec2(self.x, self.y)
}
/// Cast into an array with x and y.
#[inline]
pub fn to_array(self) -> [T; 2] {
[self.x, self.y]
}
/// Cast into a tuple with x and y.
#[inline]
pub fn to_tuple(self) -> (T, T) {
(self.x, self.y)
}
/// Convert into a 3d vector with `z` coordinate equals to `T::zero()`.
#[inline]
pub fn to_3d(self) -> Vector3D<T, U>
where
T: Zero,
{
vec3(self.x, self.y, Zero::zero())
}
/// Rounds each component to the nearest integer value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec2;
/// enum Mm {}
///
/// assert_eq!(vec2::<_, Mm>(-0.1, -0.8).round(), vec2::<_, Mm>(0.0, -1.0))
/// ```
#[inline]
#[must_use]
pub fn round(self) -> Self
where
T: Round,
{
vec2(self.x.round(), self.y.round())
}
/// Rounds each component to the smallest integer equal or greater than the original value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec2;
/// enum Mm {}
///
/// assert_eq!(vec2::<_, Mm>(-0.1, -0.8).ceil(), vec2::<_, Mm>(0.0, 0.0))
/// ```
#[inline]
#[must_use]
pub fn ceil(self) -> Self
where
T: Ceil,
{
vec2(self.x.ceil(), self.y.ceil())
}
/// Rounds each component to the biggest integer equal or lower than the original value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec2;
/// enum Mm {}
///
/// assert_eq!(vec2::<_, Mm>(-0.1, -0.8).floor(), vec2::<_, Mm>(-1.0, -1.0))
/// ```
#[inline]
#[must_use]
pub fn floor(self) -> Self
where
T: Floor,
{
vec2(self.x.floor(), self.y.floor())
}
/// Returns the signed angle between this vector and the x axis.
/// Positive values counted counterclockwise, where 0 is `+x` axis, `PI/2`
/// is `+y` axis.
///
/// The returned angle is between -PI and PI.
pub fn angle_from_x_axis(self) -> Angle<T>
where
T: Trig,
{
Angle::radians(Trig::fast_atan2(self.y, self.x))
}
/// Creates translation by this vector in vector units.
#[inline]
pub fn to_transform(self) -> Transform2D<T, U, U>
where
T: Zero + One,
{
Transform2D::translation(self.x, self.y)
}
}
impl<T, U> Vector2D<T, U>
where
T: Copy + Mul<T, Output = T> + Add<T, Output = T>,
{
/// Returns the vector's length squared.
#[inline]
pub fn square_length(self) -> T {
self.x * self.x + self.y * self.y
}
/// Returns this vector projected onto another one.
///
/// Projecting onto a nil vector will cause a division by zero.
#[inline]
pub fn project_onto_vector(self, onto: Self) -> Self
where
T: Sub<T, Output = T> + Div<T, Output = T>,
{
onto * (self.dot(onto) / onto.square_length())
}
/// Returns the signed angle between this vector and another vector.
///
/// The returned angle is between -PI and PI.
pub fn angle_to(self, other: Self) -> Angle<T>
where
T: Sub<Output = T> + Trig,
{
Angle::radians(Trig::fast_atan2(self.cross(other), self.dot(other)))
}
}
impl<T: Float, U> Vector2D<T, U> {
/// Returns the vector length.
#[inline]
pub fn length(self) -> T {
self.square_length().sqrt()
}
/// Returns the vector with length of one unit.
#[inline]
#[must_use]
pub fn normalize(self) -> Self {
self / self.length()
}
/// Returns the vector with length of one unit.
///
/// Unlike [`Vector2D::normalize`](#method.normalize), this returns None in the case that the
/// length of the vector is zero.
#[inline]
#[must_use]
pub fn try_normalize(self) -> Option<Self> {
let len = self.length();
if len == T::zero() {
None
} else {
Some(self / len)
}
}
/// Return the normalized vector even if the length is larger than the max value of Float.
#[inline]
#[must_use]
pub fn robust_normalize(self) -> Self {
let length = self.length();
if length.is_infinite() {
let scaled = self / T::max_value();
scaled / scaled.length()
} else {
self / length
}
}
/// Return this vector capped to a maximum length.
#[inline]
pub fn with_max_length(self, max_length: T) -> Self {
let square_length = self.square_length();
if square_length > max_length * max_length {
return self * (max_length / square_length.sqrt());
}
self
}
/// Return this vector with a minimum length applied.
#[inline]
pub fn with_min_length(self, min_length: T) -> Self {
let square_length = self.square_length();
if square_length < min_length * min_length {
return self * (min_length / square_length.sqrt());
}
self
}
/// Return this vector with minimum and maximum lengths applied.
#[inline]
pub fn clamp_length(self, min: T, max: T) -> Self {
debug_assert!(min <= max);
self.with_min_length(min).with_max_length(max)
}
}
impl<T, U> Vector2D<T, U>
where
T: Copy + One + Add<Output = T> + Sub<Output = T> + Mul<Output = T>,
{
/// Linearly interpolate each component between this vector and another vector.
///
/// # Example
///
/// ```rust
/// use euclid::vec2;
/// use euclid::default::Vector2D;
///
/// let from: Vector2D<_> = vec2(0.0, 10.0);
/// let to: Vector2D<_> = vec2(8.0, -4.0);
///
/// assert_eq!(from.lerp(to, -1.0), vec2(-8.0, 24.0));
/// assert_eq!(from.lerp(to, 0.0), vec2( 0.0, 10.0));
/// assert_eq!(from.lerp(to, 0.5), vec2( 4.0, 3.0));
/// assert_eq!(from.lerp(to, 1.0), vec2( 8.0, -4.0));
/// assert_eq!(from.lerp(to, 2.0), vec2(16.0, -18.0));
/// ```
#[inline]
pub fn lerp(self, other: Self, t: T) -> Self {
let one_t = T::one() - t;
self * one_t + other * t
}
/// Returns a reflection vector using an incident ray and a surface normal.
#[inline]
pub fn reflect(self, normal: Self) -> Self {
let two = T::one() + T::one();
self - normal * two * self.dot(normal)
}
}
impl<T: PartialOrd, U> Vector2D<T, U> {
/// Returns the vector each component of which are minimum of this vector and another.
#[inline]
pub fn min(self, other: Self) -> Self {
vec2(min(self.x, other.x), min(self.y, other.y))
}
/// Returns the vector each component of which are maximum of this vector and another.
#[inline]
pub fn max(self, other: Self) -> Self {
vec2(max(self.x, other.x), max(self.y, other.y))
}
/// Returns the vector each component of which is clamped by corresponding
/// components of `start` and `end`.
///
/// Shortcut for `self.max(start).min(end)`.
#[inline]
pub fn clamp(self, start: Self, end: Self) -> Self
where
T: Copy,
{
self.max(start).min(end)
}
/// Returns vector with results of "greater than" operation on each component.
#[inline]
pub fn greater_than(self, other: Self) -> BoolVector2D {
BoolVector2D {
x: self.x > other.x,
y: self.y > other.y,
}
}
/// Returns vector with results of "lower than" operation on each component.
#[inline]
pub fn lower_than(self, other: Self) -> BoolVector2D {
BoolVector2D {
x: self.x < other.x,
y: self.y < other.y,
}
}
}
impl<T: PartialEq, U> Vector2D<T, U> {
/// Returns vector with results of "equal" operation on each component.
#[inline]
pub fn equal(self, other: Self) -> BoolVector2D {
BoolVector2D {
x: self.x == other.x,
y: self.y == other.y,
}
}
/// Returns vector with results of "not equal" operation on each component.
#[inline]
pub fn not_equal(self, other: Self) -> BoolVector2D {
BoolVector2D {
x: self.x != other.x,
y: self.y != other.y,
}
}
}
impl<T: NumCast + Copy, U> Vector2D<T, U> {
/// Cast from one numeric representation to another, preserving the units.
///
/// When casting from floating vector to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using `round()`, `ceil()` or `floor()` before casting.
#[inline]
pub fn cast<NewT: NumCast>(self) -> Vector2D<NewT, U> {
self.try_cast().unwrap()
}
/// Fallible cast from one numeric representation to another, preserving the units.
///
/// When casting from floating vector to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using `round()`, `ceil()` or `floor()` before casting.
pub fn try_cast<NewT: NumCast>(self) -> Option<Vector2D<NewT, U>> {
match (NumCast::from(self.x), NumCast::from(self.y)) {
(Some(x), Some(y)) => Some(Vector2D::new(x, y)),
_ => None,
}
}
// Convenience functions for common casts.
/// Cast into an `f32` vector.
#[inline]
pub fn to_f32(self) -> Vector2D<f32, U> {
self.cast()
}
/// Cast into an `f64` vector.
#[inline]
pub fn to_f64(self) -> Vector2D<f64, U> {
self.cast()
}
/// Cast into an `usize` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_usize(self) -> Vector2D<usize, U> {
self.cast()
}
/// Cast into an `u32` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_u32(self) -> Vector2D<u32, U> {
self.cast()
}
/// Cast into an i32 vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_i32(self) -> Vector2D<i32, U> {
self.cast()
}
/// Cast into an i64 vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_i64(self) -> Vector2D<i64, U> {
self.cast()
}
}
impl<T: Neg, U> Neg for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn neg(self) -> Self::Output {
vec2(-self.x, -self.y)
}
}
impl<T: Add, U> Add for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn add(self, other: Self) -> Self::Output {
Vector2D::new(self.x + other.x, self.y + other.y)
}
}
impl<T: Add + Copy, U> Add<&Self> for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn add(self, other: &Self) -> Self::Output {
Vector2D::new(self.x + other.x, self.y + other.y)
}
}
impl<T: Add<Output = T> + Zero, U> Sum for Vector2D<T, U> {
fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(Self::zero(), Add::add)
}
}
impl<'a, T: 'a + Add<Output = T> + Copy + Zero, U: 'a> Sum<&'a Self> for Vector2D<T, U> {
fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(Self::zero(), Add::add)
}
}
impl<T: Copy + Add<T, Output = T>, U> AddAssign for Vector2D<T, U> {
#[inline]
fn add_assign(&mut self, other: Self) {
*self = *self + other
}
}
impl<T: Sub, U> Sub for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn sub(self, other: Self) -> Self::Output {
vec2(self.x - other.x, self.y - other.y)
}
}
impl<T: Copy + Sub<T, Output = T>, U> SubAssign<Vector2D<T, U>> for Vector2D<T, U> {
#[inline]
fn sub_assign(&mut self, other: Self) {
*self = *self - other
}
}
impl<T: Copy + Mul, U> Mul<T> for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn mul(self, scale: T) -> Self::Output {
vec2(self.x * scale, self.y * scale)
}
}
impl<T: Copy + Mul<T, Output = T>, U> MulAssign<T> for Vector2D<T, U> {
#[inline]
fn mul_assign(&mut self, scale: T) {
*self = *self * scale
}
}
impl<T: Copy + Mul, U1, U2> Mul<Scale<T, U1, U2>> for Vector2D<T, U1> {
type Output = Vector2D<T::Output, U2>;
#[inline]
fn mul(self, scale: Scale<T, U1, U2>) -> Self::Output {
vec2(self.x * scale.0, self.y * scale.0)
}
}
impl<T: Copy + MulAssign, U> MulAssign<Scale<T, U, U>> for Vector2D<T, U> {
#[inline]
fn mul_assign(&mut self, scale: Scale<T, U, U>) {
self.x *= scale.0;
self.y *= scale.0;
}
}
impl<T: Copy + Div, U> Div<T> for Vector2D<T, U> {
type Output = Vector2D<T::Output, U>;
#[inline]
fn div(self, scale: T) -> Self::Output {
vec2(self.x / scale, self.y / scale)
}
}
impl<T: Copy + Div<T, Output = T>, U> DivAssign<T> for Vector2D<T, U> {
#[inline]
fn div_assign(&mut self, scale: T) {
*self = *self / scale
}
}
impl<T: Copy + Div, U1, U2> Div<Scale<T, U1, U2>> for Vector2D<T, U2> {
type Output = Vector2D<T::Output, U1>;
#[inline]
fn div(self, scale: Scale<T, U1, U2>) -> Self::Output {
vec2(self.x / scale.0, self.y / scale.0)
}
}
impl<T: Copy + DivAssign, U> DivAssign<Scale<T, U, U>> for Vector2D<T, U> {
#[inline]
fn div_assign(&mut self, scale: Scale<T, U, U>) {
self.x /= scale.0;
self.y /= scale.0;
}
}
impl<T: Round, U> Round for Vector2D<T, U> {
/// See [`Vector2D::round()`](#method.round)
#[inline]
fn round(self) -> Self {
self.round()
}
}
impl<T: Ceil, U> Ceil for Vector2D<T, U> {
/// See [`Vector2D::ceil()`](#method.ceil)
#[inline]
fn ceil(self) -> Self {
self.ceil()
}
}
impl<T: Floor, U> Floor for Vector2D<T, U> {
/// See [`Vector2D::floor()`](#method.floor)
#[inline]
fn floor(self) -> Self {
self.floor()
}
}
impl<T: ApproxEq<T>, U> ApproxEq<Vector2D<T, U>> for Vector2D<T, U> {
#[inline]
fn approx_epsilon() -> Self {
vec2(T::approx_epsilon(), T::approx_epsilon())
}
#[inline]
fn approx_eq_eps(&self, other: &Self, eps: &Self) -> bool {
self.x.approx_eq_eps(&other.x, &eps.x) && self.y.approx_eq_eps(&other.y, &eps.y)
}
}
impl<T, U> Into<[T; 2]> for Vector2D<T, U> {
fn into(self) -> [T; 2] {
[self.x, self.y]
}
}
impl<T, U> From<[T; 2]> for Vector2D<T, U> {
fn from([x, y]: [T; 2]) -> Self {
vec2(x, y)
}
}
impl<T, U> Into<(T, T)> for Vector2D<T, U> {
fn into(self) -> (T, T) {
(self.x, self.y)
}
}
impl<T, U> From<(T, T)> for Vector2D<T, U> {
fn from(tuple: (T, T)) -> Self {
vec2(tuple.0, tuple.1)
}
}
impl<T, U> From<Size2D<T, U>> for Vector2D<T, U> {
fn from(size: Size2D<T, U>) -> Self {
vec2(size.width, size.height)
}
}
/// A 3d Vector tagged with a unit.
#[repr(C)]
pub struct Vector3D<T, U> {
/// The `x` (traditionally, horizontal) coordinate.
pub x: T,
/// The `y` (traditionally, vertical) coordinate.
pub y: T,
/// The `z` (traditionally, depth) coordinate.
pub z: T,
#[doc(hidden)]
pub _unit: PhantomData<U>,
}
mint_vec!(Vector3D[x, y, z] = Vector3);
impl<T: Copy, U> Copy for Vector3D<T, U> {}
impl<T: Clone, U> Clone for Vector3D<T, U> {
fn clone(&self) -> Self {
Vector3D {
x: self.x.clone(),
y: self.y.clone(),
z: self.z.clone(),
_unit: PhantomData,
}
}
}
#[cfg(feature = "serde")]
impl<'de, T, U> serde::Deserialize<'de> for Vector3D<T, U>
where
T: serde::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let (x, y, z) = serde::Deserialize::deserialize(deserializer)?;
Ok(Vector3D {
x,
y,
z,
_unit: PhantomData,
})
}
}
#[cfg(feature = "serde")]
impl<T, U> serde::Serialize for Vector3D<T, U>
where
T: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
(&self.x, &self.y, &self.z).serialize(serializer)
}
}
impl<T: Eq, U> Eq for Vector3D<T, U> {}
impl<T: PartialEq, U> PartialEq for Vector3D<T, U> {
fn eq(&self, other: &Self) -> bool {
self.x == other.x && self.y == other.y && self.z == other.z
}
}
impl<T: Hash, U> Hash for Vector3D<T, U> {
fn hash<H: core::hash::Hasher>(&self, h: &mut H) {
self.x.hash(h);
self.y.hash(h);
self.z.hash(h);
}
}
impl<T: Zero, U> Zero for Vector3D<T, U> {
/// Constructor, setting all components to zero.
#[inline]
fn zero() -> Self {
vec3(Zero::zero(), Zero::zero(), Zero::zero())
}
}
impl<T: fmt::Debug, U> fmt::Debug for Vector3D<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("")
.field(&self.x)
.field(&self.y)
.field(&self.z)
.finish()
}
}
impl<T: Default, U> Default for Vector3D<T, U> {
fn default() -> Self {
Vector3D::new(Default::default(), Default::default(), Default::default())
}
}
impl<T, U> Vector3D<T, U> {
/// Constructor, setting all components to zero.
#[inline]
pub fn zero() -> Self
where
T: Zero,
{
vec3(Zero::zero(), Zero::zero(), Zero::zero())
}
/// Constructor, setting all components to one.
#[inline]
pub fn one() -> Self
where
T: One,
{
vec3(One::one(), One::one(), One::one())
}
/// Constructor taking scalar values directly.
#[inline]
pub const fn new(x: T, y: T, z: T) -> Self {
Vector3D {
x,
y,
z,
_unit: PhantomData,
}
}
/// Constructor setting all components to the same value.
#[inline]
pub fn splat(v: T) -> Self
where
T: Clone,
{
Vector3D {
x: v.clone(),
y: v.clone(),
z: v,
_unit: PhantomData,
}
}
/// Constructor taking properly Lengths instead of scalar values.
#[inline]
pub fn from_lengths(x: Length<T, U>, y: Length<T, U>, z: Length<T, U>) -> Vector3D<T, U> {
vec3(x.0, y.0, z.0)
}
/// Tag a unitless value with units.
#[inline]
pub fn from_untyped(p: Vector3D<T, UnknownUnit>) -> Self {
vec3(p.x, p.y, p.z)
}
/// Computes the vector with absolute values of each component.
///
/// # Example
///
/// ```rust
/// # use std::{i32, f32};
/// # use euclid::vec3;
/// enum U {}
///
/// assert_eq!(vec3::<_, U>(-1, 0, 2).abs(), vec3(1, 0, 2));
///
/// let vec = vec3::<_, U>(f32::NAN, 0.0, -f32::MAX).abs();
/// assert!(vec.x.is_nan());
/// assert_eq!(vec.y, 0.0);
/// assert_eq!(vec.z, f32::MAX);
/// ```
///
/// # Panics
///
/// The behavior for each component follows the scalar type's implementation of
/// `num_traits::Signed::abs`.
pub fn abs(self) -> Self
where
T: Signed,
{
vec3(self.x.abs(), self.y.abs(), self.z.abs())
}
/// Dot product.
#[inline]
pub fn dot(self, other: Self) -> T
where
T: Add<Output = T> + Mul<Output = T>,
{
self.x * other.x + self.y * other.y + self.z * other.z
}
}
impl<T: Copy, U> Vector3D<T, U> {
/// Cross product.
#[inline]
pub fn cross(self, other: Self) -> Self
where
T: Sub<Output = T> + Mul<Output = T>,
{
vec3(
self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x,
)
}
/// Returns the component-wise multiplication of the two vectors.
#[inline]
pub fn component_mul(self, other: Self) -> Self
where
T: Mul<Output = T>,
{
vec3(self.x * other.x, self.y * other.y, self.z * other.z)
}
/// Cast this vector into a point.
///
/// Equivalent to adding this vector to the origin.
#[inline]
pub fn to_point(self) -> Point3D<T, U> {
point3(self.x, self.y, self.z)
}
/// Returns a 2d vector using this vector's x and y coordinates
#[inline]
pub fn xy(self) -> Vector2D<T, U> {
vec2(self.x, self.y)
}
/// Returns a 2d vector using this vector's x and z coordinates
#[inline]
pub fn xz(self) -> Vector2D<T, U> {
vec2(self.x, self.z)
}
/// Returns a 2d vector using this vector's x and z coordinates
#[inline]
pub fn yz(self) -> Vector2D<T, U> {
vec2(self.y, self.z)
}
/// Cast into an array with x, y and z.
#[inline]
pub fn to_array(self) -> [T; 3] {
[self.x, self.y, self.z]
}
/// Cast into an array with x, y, z and 0.
#[inline]
pub fn to_array_4d(self) -> [T; 4]
where
T: Zero,
{
[self.x, self.y, self.z, Zero::zero()]
}
/// Cast into a tuple with x, y and z.
#[inline]
pub fn to_tuple(self) -> (T, T, T) {
(self.x, self.y, self.z)
}
/// Cast into a tuple with x, y, z and 0.
#[inline]
pub fn to_tuple_4d(self) -> (T, T, T, T)
where
T: Zero,
{
(self.x, self.y, self.z, Zero::zero())
}
/// Drop the units, preserving only the numeric value.
#[inline]
pub fn to_untyped(self) -> Vector3D<T, UnknownUnit> {
vec3(self.x, self.y, self.z)
}
/// Cast the unit.
#[inline]
pub fn cast_unit<V>(self) -> Vector3D<T, V> {
vec3(self.x, self.y, self.z)
}
/// Convert into a 2d vector.
#[inline]
pub fn to_2d(self) -> Vector2D<T, U> {
self.xy()
}
/// Rounds each component to the nearest integer value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec3;
/// enum Mm {}
///
/// assert_eq!(vec3::<_, Mm>(-0.1, -0.8, 0.4).round(), vec3::<_, Mm>(0.0, -1.0, 0.0))
/// ```
#[inline]
#[must_use]
pub fn round(self) -> Self
where
T: Round,
{
vec3(self.x.round(), self.y.round(), self.z.round())
}
/// Rounds each component to the smallest integer equal or greater than the original value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec3;
/// enum Mm {}
///
/// assert_eq!(vec3::<_, Mm>(-0.1, -0.8, 0.4).ceil(), vec3::<_, Mm>(0.0, 0.0, 1.0))
/// ```
#[inline]
#[must_use]
pub fn ceil(self) -> Self
where
T: Ceil,
{
vec3(self.x.ceil(), self.y.ceil(), self.z.ceil())
}
/// Rounds each component to the biggest integer equal or lower than the original value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
///
/// ```rust
/// # use euclid::vec3;
/// enum Mm {}
///
/// assert_eq!(vec3::<_, Mm>(-0.1, -0.8, 0.4).floor(), vec3::<_, Mm>(-1.0, -1.0, 0.0))
/// ```
#[inline]
#[must_use]
pub fn floor(self) -> Self
where
T: Floor,
{
vec3(self.x.floor(), self.y.floor(), self.z.floor())
}
/// Creates translation by this vector in vector units
#[inline]
pub fn to_transform(self) -> Transform3D<T, U, U>
where
T: Zero + One,
{
Transform3D::translation(self.x, self.y, self.z)
}
}
impl<T, U> Vector3D<T, U>
where
T: Copy + Mul<T, Output = T> + Add<T, Output = T>,
{
/// Returns the vector's length squared.
#[inline]
pub fn square_length(self) -> T {
self.x * self.x + self.y * self.y + self.z * self.z
}
/// Returns this vector projected onto another one.
///
/// Projecting onto a nil vector will cause a division by zero.
#[inline]
pub fn project_onto_vector(self, onto: Self) -> Self
where
T: Sub<T, Output = T> + Div<T, Output = T>,
{
onto * (self.dot(onto) / onto.square_length())
}
}
impl<T: Float, U> Vector3D<T, U> {
/// Returns the positive angle between this vector and another vector.
///
/// The returned angle is between 0 and PI.
pub fn angle_to(self, other: Self) -> Angle<T>
where
T: Trig,
{
Angle::radians(Trig::fast_atan2(
self.cross(other).length(),
self.dot(other),
))
}
/// Returns the vector length.
#[inline]
pub fn length(self) -> T {
self.square_length().sqrt()
}
/// Returns the vector with length of one unit
#[inline]
#[must_use]
pub fn normalize(self) -> Self {
self / self.length()
}
/// Returns the vector with length of one unit.
///
/// Unlike [`Vector2D::normalize`](#method.normalize), this returns None in the case that the
/// length of the vector is zero.
#[inline]
#[must_use]
pub fn try_normalize(self) -> Option<Self> {
let len = self.length();
if len == T::zero() {
None
} else {
Some(self / len)
}
}
/// Return the normalized vector even if the length is larger than the max value of Float.
#[inline]
#[must_use]
pub fn robust_normalize(self) -> Self {
let length = self.length();
if length.is_infinite() {
let scaled = self / T::max_value();
scaled / scaled.length()
} else {
self / length
}
}
/// Return this vector capped to a maximum length.
#[inline]
pub fn with_max_length(self, max_length: T) -> Self {
let square_length = self.square_length();
if square_length > max_length * max_length {
return self * (max_length / square_length.sqrt());
}
self
}
/// Return this vector with a minimum length applied.
#[inline]
pub fn with_min_length(self, min_length: T) -> Self {
let square_length = self.square_length();
if square_length < min_length * min_length {
return self * (min_length / square_length.sqrt());
}
self
}
/// Return this vector with minimum and maximum lengths applied.
#[inline]
pub fn clamp_length(self, min: T, max: T) -> Self {
debug_assert!(min <= max);
self.with_min_length(min).with_max_length(max)
}
}
impl<T, U> Vector3D<T, U>
where
T: Copy + One + Add<Output = T> + Sub<Output = T> + Mul<Output = T>,
{
/// Linearly interpolate each component between this vector and another vector.
///
/// # Example
///
/// ```rust
/// use euclid::vec3;
/// use euclid::default::Vector3D;
///
/// let from: Vector3D<_> = vec3(0.0, 10.0, -1.0);
/// let to: Vector3D<_> = vec3(8.0, -4.0, 0.0);
///
/// assert_eq!(from.lerp(to, -1.0), vec3(-8.0, 24.0, -2.0));
/// assert_eq!(from.lerp(to, 0.0), vec3( 0.0, 10.0, -1.0));
/// assert_eq!(from.lerp(to, 0.5), vec3( 4.0, 3.0, -0.5));
/// assert_eq!(from.lerp(to, 1.0), vec3( 8.0, -4.0, 0.0));
/// assert_eq!(from.lerp(to, 2.0), vec3(16.0, -18.0, 1.0));
/// ```
#[inline]
pub fn lerp(self, other: Self, t: T) -> Self {
let one_t = T::one() - t;
self * one_t + other * t
}
/// Returns a reflection vector using an incident ray and a surface normal.
#[inline]
pub fn reflect(self, normal: Self) -> Self {
let two = T::one() + T::one();
self - normal * two * self.dot(normal)
}
}
impl<T: PartialOrd, U> Vector3D<T, U> {
/// Returns the vector each component of which are minimum of this vector and another.
#[inline]
pub fn min(self, other: Self) -> Self {
vec3(
min(self.x, other.x),
min(self.y, other.y),
min(self.z, other.z),
)
}
/// Returns the vector each component of which are maximum of this vector and another.
#[inline]
pub fn max(self, other: Self) -> Self {
vec3(
max(self.x, other.x),
max(self.y, other.y),
max(self.z, other.z),
)
}
/// Returns the vector each component of which is clamped by corresponding
/// components of `start` and `end`.
///
/// Shortcut for `self.max(start).min(end)`.
#[inline]
pub fn clamp(self, start: Self, end: Self) -> Self
where
T: Copy,
{
self.max(start).min(end)
}
/// Returns vector with results of "greater than" operation on each component.
#[inline]
pub fn greater_than(self, other: Self) -> BoolVector3D {
BoolVector3D {
x: self.x > other.x,
y: self.y > other.y,
z: self.z > other.z,
}
}
/// Returns vector with results of "lower than" operation on each component.
#[inline]
pub fn lower_than(self, other: Self) -> BoolVector3D {
BoolVector3D {
x: self.x < other.x,
y: self.y < other.y,
z: self.z < other.z,
}
}
}
impl<T: PartialEq, U> Vector3D<T, U> {
/// Returns vector with results of "equal" operation on each component.
#[inline]
pub fn equal(self, other: Self) -> BoolVector3D {
BoolVector3D {
x: self.x == other.x,
y: self.y == other.y,
z: self.z == other.z,
}
}
/// Returns vector with results of "not equal" operation on each component.
#[inline]
pub fn not_equal(self, other: Self) -> BoolVector3D {
BoolVector3D {
x: self.x != other.x,
y: self.y != other.y,
z: self.z != other.z,
}
}
}
impl<T: NumCast + Copy, U> Vector3D<T, U> {
/// Cast from one numeric representation to another, preserving the units.
///
/// When casting from floating vector to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using `round()`, `ceil()` or `floor()` before casting.
#[inline]
pub fn cast<NewT: NumCast>(self) -> Vector3D<NewT, U> {
self.try_cast().unwrap()
}
/// Fallible cast from one numeric representation to another, preserving the units.
///
/// When casting from floating vector to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using `round()`, `ceil()` or `floor()` before casting.
pub fn try_cast<NewT: NumCast>(self) -> Option<Vector3D<NewT, U>> {
match (
NumCast::from(self.x),
NumCast::from(self.y),
NumCast::from(self.z),
) {
(Some(x), Some(y), Some(z)) => Some(vec3(x, y, z)),
_ => None,
}
}
// Convenience functions for common casts.
/// Cast into an `f32` vector.
#[inline]
pub fn to_f32(self) -> Vector3D<f32, U> {
self.cast()
}
/// Cast into an `f64` vector.
#[inline]
pub fn to_f64(self) -> Vector3D<f64, U> {
self.cast()
}
/// Cast into an `usize` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_usize(self) -> Vector3D<usize, U> {
self.cast()
}
/// Cast into an `u32` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_u32(self) -> Vector3D<u32, U> {
self.cast()
}
/// Cast into an `i32` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_i32(self) -> Vector3D<i32, U> {
self.cast()
}
/// Cast into an `i64` vector, truncating decimals if any.
///
/// When casting from floating vector vectors, it is worth considering whether
/// to `round()`, `ceil()` or `floor()` before the cast in order to obtain
/// the desired conversion behavior.
#[inline]
pub fn to_i64(self) -> Vector3D<i64, U> {
self.cast()
}
}
impl<T: Neg, U> Neg for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn neg(self) -> Self::Output {
vec3(-self.x, -self.y, -self.z)
}
}
impl<T: Add, U> Add for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn add(self, other: Self) -> Self::Output {
vec3(self.x + other.x, self.y + other.y, self.z + other.z)
}
}
impl<'a, T: 'a + Add + Copy, U: 'a> Add<&Self> for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn add(self, other: &Self) -> Self::Output {
vec3(self.x + other.x, self.y + other.y, self.z + other.z)
}
}
impl<T: Add<Output = T> + Zero, U> Sum for Vector3D<T, U> {
fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(Self::zero(), Add::add)
}
}
impl<'a, T: 'a + Add<Output = T> + Copy + Zero, U: 'a> Sum<&'a Self> for Vector3D<T, U> {
fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(Self::zero(), Add::add)
}
}
impl<T: Copy + Add<T, Output = T>, U> AddAssign for Vector3D<T, U> {
#[inline]
fn add_assign(&mut self, other: Self) {
*self = *self + other
}
}
impl<T: Sub, U> Sub for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn sub(self, other: Self) -> Self::Output {
vec3(self.x - other.x, self.y - other.y, self.z - other.z)
}
}
impl<T: Copy + Sub<T, Output = T>, U> SubAssign<Vector3D<T, U>> for Vector3D<T, U> {
#[inline]
fn sub_assign(&mut self, other: Self) {
*self = *self - other
}
}
impl<T: Copy + Mul, U> Mul<T> for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn mul(self, scale: T) -> Self::Output {
vec3(
self.x * scale,
self.y * scale,
self.z * scale,
)
}
}
impl<T: Copy + Mul<T, Output = T>, U> MulAssign<T> for Vector3D<T, U> {
#[inline]
fn mul_assign(&mut self, scale: T) {
*self = *self * scale
}
}
impl<T: Copy + Mul, U1, U2> Mul<Scale<T, U1, U2>> for Vector3D<T, U1> {
type Output = Vector3D<T::Output, U2>;
#[inline]
fn mul(self, scale: Scale<T, U1, U2>) -> Self::Output {
vec3(
self.x * scale.0,
self.y * scale.0,
self.z * scale.0,
)
}
}
impl<T: Copy + MulAssign, U> MulAssign<Scale<T, U, U>> for Vector3D<T, U> {
#[inline]
fn mul_assign(&mut self, scale: Scale<T, U, U>) {
self.x *= scale.0;
self.y *= scale.0;
self.z *= scale.0;
}
}
impl<T: Copy + Div, U> Div<T> for Vector3D<T, U> {
type Output = Vector3D<T::Output, U>;
#[inline]
fn div(self, scale: T) -> Self::Output {
vec3(
self.x / scale,
self.y / scale,
self.z / scale,
)
}
}
impl<T: Copy + Div<T, Output = T>, U> DivAssign<T> for Vector3D<T, U> {
#[inline]
fn div_assign(&mut self, scale: T) {
*self = *self / scale
}
}
impl<T: Copy + Div, U1, U2> Div<Scale<T, U1, U2>> for Vector3D<T, U2> {
type Output = Vector3D<T::Output, U1>;
#[inline]
fn div(self, scale: Scale<T, U1, U2>) -> Self::Output {
vec3(
self.x / scale.0,
self.y / scale.0,
self.z / scale.0,
)
}
}
impl<T: Copy + DivAssign, U> DivAssign<Scale<T, U, U>> for Vector3D<T, U> {
#[inline]
fn div_assign(&mut self, scale: Scale<T, U, U>) {
self.x /= scale.0;
self.y /= scale.0;
self.z /= scale.0;
}
}
impl<T: Round, U> Round for Vector3D<T, U> {
/// See [`Vector3D::round()`](#method.round)
#[inline]
fn round(self) -> Self {
self.round()
}
}
impl<T: Ceil, U> Ceil for Vector3D<T, U> {
/// See [`Vector3D::ceil()`](#method.ceil)
#[inline]
fn ceil(self) -> Self {
self.ceil()
}
}
impl<T: Floor, U> Floor for Vector3D<T, U> {
/// See [`Vector3D::floor()`](#method.floor)
#[inline]
fn floor(self) -> Self {
self.floor()
}
}
impl<T: ApproxEq<T>, U> ApproxEq<Vector3D<T, U>> for Vector3D<T, U> {
#[inline]
fn approx_epsilon() -> Self {
vec3(
T::approx_epsilon(),
T::approx_epsilon(),
T::approx_epsilon(),
)
}
#[inline]
fn approx_eq_eps(&self, other: &Self, eps: &Self) -> bool {
self.x.approx_eq_eps(&other.x, &eps.x)
&& self.y.approx_eq_eps(&other.y, &eps.y)
&& self.z.approx_eq_eps(&other.z, &eps.z)
}
}
impl<T, U> Into<[T; 3]> for Vector3D<T, U> {
fn into(self) -> [T; 3] {
[self.x, self.y, self.z]
}
}
impl<T, U> From<[T; 3]> for Vector3D<T, U> {
fn from([x, y, z]: [T; 3]) -> Self {
vec3(x, y, z)
}
}
impl<T, U> Into<(T, T, T)> for Vector3D<T, U> {
fn into(self) -> (T, T, T) {
(self.x, self.y, self.z)
}
}
impl<T, U> From<(T, T, T)> for Vector3D<T, U> {
fn from(tuple: (T, T, T)) -> Self {
vec3(tuple.0, tuple.1, tuple.2)
}
}
/// A 2d vector of booleans, useful for component-wise logic operations.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct BoolVector2D {
pub x: bool,
pub y: bool,
}
/// A 3d vector of booleans, useful for component-wise logic operations.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct BoolVector3D {
pub x: bool,
pub y: bool,
pub z: bool,
}
impl BoolVector2D {
/// Returns `true` if all components are `true` and `false` otherwise.
#[inline]
pub fn all(self) -> bool {
self.x && self.y
}
/// Returns `true` if any component are `true` and `false` otherwise.
#[inline]
pub fn any(self) -> bool {
self.x || self.y
}
/// Returns `true` if all components are `false` and `false` otherwise. Negation of `any()`.
#[inline]
pub fn none(self) -> bool {
!self.any()
}
/// Returns new vector with by-component AND operation applied.
#[inline]
pub fn and(self, other: Self) -> Self {
BoolVector2D {
x: self.x && other.x,
y: self.y && other.y,
}
}
/// Returns new vector with by-component OR operation applied.
#[inline]
pub fn or(self, other: Self) -> Self {
BoolVector2D {
x: self.x || other.x,
y: self.y || other.y,
}
}
/// Returns new vector with results of negation operation on each component.
#[inline]
pub fn not(self) -> Self {
BoolVector2D {
x: !self.x,
y: !self.y,
}
}
/// Returns point, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
pub fn select_point<T, U>(self, a: Point2D<T, U>, b: Point2D<T, U>) -> Point2D<T, U> {
point2(
if self.x { a.x } else { b.x },
if self.y { a.y } else { b.y },
)
}
/// Returns vector, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
pub fn select_vector<T, U>(self, a: Vector2D<T, U>, b: Vector2D<T, U>) -> Vector2D<T, U> {
vec2(
if self.x { a.x } else { b.x },
if self.y { a.y } else { b.y },
)
}
/// Returns size, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
pub fn select_size<T, U>(self, a: Size2D<T, U>, b: Size2D<T, U>) -> Size2D<T, U> {
size2(
if self.x { a.width } else { b.width },
if self.y { a.height } else { b.height },
)
}
}
impl BoolVector3D {
/// Returns `true` if all components are `true` and `false` otherwise.
#[inline]
pub fn all(self) -> bool {
self.x && self.y && self.z
}
/// Returns `true` if any component are `true` and `false` otherwise.
#[inline]
pub fn any(self) -> bool {
self.x || self.y || self.z
}
/// Returns `true` if all components are `false` and `false` otherwise. Negation of `any()`.
#[inline]
pub fn none(self) -> bool {
!self.any()
}
/// Returns new vector with by-component AND operation applied.
#[inline]
pub fn and(self, other: Self) -> Self {
BoolVector3D {
x: self.x && other.x,
y: self.y && other.y,
z: self.z && other.z,
}
}
/// Returns new vector with by-component OR operation applied.
#[inline]
pub fn or(self, other: Self) -> Self {
BoolVector3D {
x: self.x || other.x,
y: self.y || other.y,
z: self.z || other.z,
}
}
/// Returns new vector with results of negation operation on each component.
#[inline]
pub fn not(self) -> Self {
BoolVector3D {
x: !self.x,
y: !self.y,
z: !self.z,
}
}
/// Returns point, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
pub fn select_point<T, U>(self, a: Point3D<T, U>, b: Point3D<T, U>) -> Point3D<T, U> {
point3(
if self.x { a.x } else { b.x },
if self.y { a.y } else { b.y },
if self.z { a.z } else { b.z },
)
}
/// Returns vector, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
pub fn select_vector<T, U>(self, a: Vector3D<T, U>, b: Vector3D<T, U>) -> Vector3D<T, U> {
vec3(
if self.x { a.x } else { b.x },
if self.y { a.y } else { b.y },
if self.z { a.z } else { b.z },
)
}
/// Returns size, each component of which or from `a`, or from `b` depending on truly value
/// of corresponding vector component. `true` selects value from `a` and `false` from `b`.
#[inline]
#[must_use]
pub fn select_size<T, U>(self, a: Size3D<T, U>, b: Size3D<T, U>) -> Size3D<T, U> {
size3(
if self.x { a.width } else { b.width },
if self.y { a.height } else { b.height },
if self.z { a.depth } else { b.depth },
)
}
/// Returns a 2d vector using this vector's x and y coordinates.
#[inline]
pub fn xy(self) -> BoolVector2D {
BoolVector2D {
x: self.x,
y: self.y,
}
}
/// Returns a 2d vector using this vector's x and z coordinates.
#[inline]
pub fn xz(self) -> BoolVector2D {
BoolVector2D {
x: self.x,
y: self.z,
}
}
/// Returns a 2d vector using this vector's y and z coordinates.
#[inline]
pub fn yz(self) -> BoolVector2D {
BoolVector2D {
x: self.y,
y: self.z,
}
}
}
/// Convenience constructor.
#[inline]
pub const fn vec2<T, U>(x: T, y: T) -> Vector2D<T, U> {
Vector2D {
x,
y,
_unit: PhantomData,
}
}
/// Convenience constructor.
#[inline]
pub const fn vec3<T, U>(x: T, y: T, z: T) -> Vector3D<T, U> {
Vector3D {
x,
y,
z,
_unit: PhantomData,
}
}
/// Shorthand for `BoolVector2D { x, y }`.
#[inline]
pub const fn bvec2(x: bool, y: bool) -> BoolVector2D {
BoolVector2D { x, y }
}
/// Shorthand for `BoolVector3D { x, y, z }`.
#[inline]
pub const fn bvec3(x: bool, y: bool, z: bool) -> BoolVector3D {
BoolVector3D { x, y, z }
}
#[cfg(test)]
mod vector2d {
use crate::scale::Scale;
use crate::{default, vec2};
#[cfg(feature = "mint")]
use mint;
type Vec2 = default::Vector2D<f32>;
#[test]
pub fn test_scalar_mul() {
let p1: Vec2 = vec2(3.0, 5.0);
let result = p1 * 5.0;
assert_eq!(result, Vec2::new(15.0, 25.0));
}
#[test]
pub fn test_dot() {
let p1: Vec2 = vec2(2.0, 7.0);
let p2: Vec2 = vec2(13.0, 11.0);
assert_eq!(p1.dot(p2), 103.0);
}
#[test]
pub fn test_cross() {
let p1: Vec2 = vec2(4.0, 7.0);
let p2: Vec2 = vec2(13.0, 8.0);
let r = p1.cross(p2);
assert_eq!(r, -59.0);
}
#[test]
pub fn test_normalize() {
use std::f32;
let p0: Vec2 = Vec2::zero();
let p1: Vec2 = vec2(4.0, 0.0);
let p2: Vec2 = vec2(3.0, -4.0);
assert!(p0.normalize().x.is_nan() && p0.normalize().y.is_nan());
assert_eq!(p1.normalize(), vec2(1.0, 0.0));
assert_eq!(p2.normalize(), vec2(0.6, -0.8));
let p3: Vec2 = vec2(::std::f32::MAX, ::std::f32::MAX);
assert_ne!(
p3.normalize(),
vec2(1.0 / 2.0f32.sqrt(), 1.0 / 2.0f32.sqrt())
);
assert_eq!(
p3.robust_normalize(),
vec2(1.0 / 2.0f32.sqrt(), 1.0 / 2.0f32.sqrt())
);
let p4: Vec2 = Vec2::zero();
assert!(p4.try_normalize().is_none());
let p5: Vec2 = Vec2::new(f32::MIN_POSITIVE, f32::MIN_POSITIVE);
assert!(p5.try_normalize().is_none());
let p6: Vec2 = vec2(4.0, 0.0);
let p7: Vec2 = vec2(3.0, -4.0);
assert_eq!(p6.try_normalize().unwrap(), vec2(1.0, 0.0));
assert_eq!(p7.try_normalize().unwrap(), vec2(0.6, -0.8));
}
#[test]
pub fn test_min() {
let p1: Vec2 = vec2(1.0, 3.0);
let p2: Vec2 = vec2(2.0, 2.0);
let result = p1.min(p2);
assert_eq!(result, vec2(1.0, 2.0));
}
#[test]
pub fn test_max() {
let p1: Vec2 = vec2(1.0, 3.0);
let p2: Vec2 = vec2(2.0, 2.0);
let result = p1.max(p2);
assert_eq!(result, vec2(2.0, 3.0));
}
#[test]
pub fn test_angle_from_x_axis() {
use crate::approxeq::ApproxEq;
use core::f32::consts::FRAC_PI_2;
let right: Vec2 = vec2(10.0, 0.0);
let down: Vec2 = vec2(0.0, 4.0);
let up: Vec2 = vec2(0.0, -1.0);
assert!(right.angle_from_x_axis().get().approx_eq(&0.0));
assert!(down.angle_from_x_axis().get().approx_eq(&FRAC_PI_2));
assert!(up.angle_from_x_axis().get().approx_eq(&-FRAC_PI_2));
}
#[test]
pub fn test_angle_to() {
use crate::approxeq::ApproxEq;
use core::f32::consts::FRAC_PI_2;
let right: Vec2 = vec2(10.0, 0.0);
let right2: Vec2 = vec2(1.0, 0.0);
let up: Vec2 = vec2(0.0, -1.0);
let up_left: Vec2 = vec2(-1.0, -1.0);
assert!(right.angle_to(right2).get().approx_eq(&0.0));
assert!(right.angle_to(up).get().approx_eq(&-FRAC_PI_2));
assert!(up.angle_to(right).get().approx_eq(&FRAC_PI_2));
assert!(up_left
.angle_to(up)
.get()
.approx_eq_eps(&(0.5 * FRAC_PI_2), &0.0005));
}
#[test]
pub fn test_with_max_length() {
use crate::approxeq::ApproxEq;
let v1: Vec2 = vec2(0.5, 0.5);
let v2: Vec2 = vec2(1.0, 0.0);
let v3: Vec2 = vec2(0.1, 0.2);
let v4: Vec2 = vec2(2.0, -2.0);
let v5: Vec2 = vec2(1.0, 2.0);
let v6: Vec2 = vec2(-1.0, 3.0);
assert_eq!(v1.with_max_length(1.0), v1);
assert_eq!(v2.with_max_length(1.0), v2);
assert_eq!(v3.with_max_length(1.0), v3);
assert_eq!(v4.with_max_length(10.0), v4);
assert_eq!(v5.with_max_length(10.0), v5);
assert_eq!(v6.with_max_length(10.0), v6);
let v4_clamped = v4.with_max_length(1.0);
assert!(v4_clamped.length().approx_eq(&1.0));
assert!(v4_clamped.normalize().approx_eq(&v4.normalize()));
let v5_clamped = v5.with_max_length(1.5);
assert!(v5_clamped.length().approx_eq(&1.5));
assert!(v5_clamped.normalize().approx_eq(&v5.normalize()));
let v6_clamped = v6.with_max_length(2.5);
assert!(v6_clamped.length().approx_eq(&2.5));
assert!(v6_clamped.normalize().approx_eq(&v6.normalize()));
}
#[test]
pub fn test_project_onto_vector() {
use crate::approxeq::ApproxEq;
let v1: Vec2 = vec2(1.0, 2.0);
let x: Vec2 = vec2(1.0, 0.0);
let y: Vec2 = vec2(0.0, 1.0);
assert!(v1.project_onto_vector(x).approx_eq(&vec2(1.0, 0.0)));
assert!(v1.project_onto_vector(y).approx_eq(&vec2(0.0, 2.0)));
assert!(v1.project_onto_vector(-x).approx_eq(&vec2(1.0, 0.0)));
assert!(v1.project_onto_vector(x * 10.0).approx_eq(&vec2(1.0, 0.0)));
assert!(v1.project_onto_vector(v1 * 2.0).approx_eq(&v1));
assert!(v1.project_onto_vector(-v1).approx_eq(&v1));
}
#[cfg(feature = "mint")]
#[test]
pub fn test_mint() {
let v1 = Vec2::new(1.0, 3.0);
let vm: mint::Vector2<_> = v1.into();
let v2 = Vec2::from(vm);
assert_eq!(v1, v2);
}
pub enum Mm {}
pub enum Cm {}
pub type Vector2DMm<T> = super::Vector2D<T, Mm>;
pub type Vector2DCm<T> = super::Vector2D<T, Cm>;
#[test]
pub fn test_add() {
let p1 = Vector2DMm::new(1.0, 2.0);
let p2 = Vector2DMm::new(3.0, 4.0);
assert_eq!(p1 + p2, vec2(4.0, 6.0));
assert_eq!(p1 + &p2, vec2(4.0, 6.0));
}
#[test]
pub fn test_sum() {
let vecs = [
Vector2DMm::new(1.0, 2.0),
Vector2DMm::new(3.0, 4.0),
Vector2DMm::new(5.0, 6.0)
];
let sum = Vector2DMm::new(9.0, 12.0);
assert_eq!(vecs.iter().sum::<Vector2DMm<_>>(), sum);
assert_eq!(vecs.into_iter().sum::<Vector2DMm<_>>(), sum);
}
#[test]
pub fn test_add_assign() {
let mut p1 = Vector2DMm::new(1.0, 2.0);
p1 += vec2(3.0, 4.0);
assert_eq!(p1, vec2(4.0, 6.0));
}
#[test]
pub fn test_tpyed_scalar_mul() {
let p1 = Vector2DMm::new(1.0, 2.0);
let cm_per_mm = Scale::<f32, Mm, Cm>::new(0.1);
let result: Vector2DCm<f32> = p1 * cm_per_mm;
assert_eq!(result, vec2(0.1, 0.2));
}
#[test]
pub fn test_swizzling() {
let p: default::Vector2D<i32> = vec2(1, 2);
assert_eq!(p.yx(), vec2(2, 1));
}
#[test]
pub fn test_reflect() {
use crate::approxeq::ApproxEq;
let a: Vec2 = vec2(1.0, 3.0);
let n1: Vec2 = vec2(0.0, -1.0);
let n2: Vec2 = vec2(1.0, -1.0).normalize();
assert!(a.reflect(n1).approx_eq(&vec2(1.0, -3.0)));
assert!(a.reflect(n2).approx_eq(&vec2(3.0, 1.0)));
}
}
#[cfg(test)]
mod vector3d {
use crate::scale::Scale;
use crate::{default, vec2, vec3};
#[cfg(feature = "mint")]
use mint;
type Vec3 = default::Vector3D<f32>;
#[test]
pub fn test_add() {
let p1 = Vec3::new(1.0, 2.0, 3.0);
let p2 = Vec3::new(4.0, 5.0, 6.0);
assert_eq!(p1 + p2, vec3(5.0, 7.0, 9.0));
assert_eq!(p1 + &p2, vec3(5.0, 7.0, 9.0));
}
#[test]
pub fn test_sum() {
let vecs = [
Vec3::new(1.0, 2.0, 3.0),
Vec3::new(4.0, 5.0, 6.0),
Vec3::new(7.0, 8.0, 9.0)
];
let sum = Vec3::new(12.0, 15.0, 18.0);
assert_eq!(vecs.iter().sum::<Vec3>(), sum);
assert_eq!(vecs.into_iter().sum::<Vec3>(), sum);
}
#[test]
pub fn test_dot() {
let p1: Vec3 = vec3(7.0, 21.0, 32.0);
let p2: Vec3 = vec3(43.0, 5.0, 16.0);
assert_eq!(p1.dot(p2), 918.0);
}
#[test]
pub fn test_cross() {
let p1: Vec3 = vec3(4.0, 7.0, 9.0);
let p2: Vec3 = vec3(13.0, 8.0, 3.0);
let p3 = p1.cross(p2);
assert_eq!(p3, vec3(-51.0, 105.0, -59.0));
}
#[test]
pub fn test_normalize() {
use std::f32;
let p0: Vec3 = Vec3::zero();
let p1: Vec3 = vec3(0.0, -6.0, 0.0);
let p2: Vec3 = vec3(1.0, 2.0, -2.0);
assert!(
p0.normalize().x.is_nan() && p0.normalize().y.is_nan() && p0.normalize().z.is_nan()
);
assert_eq!(p1.normalize(), vec3(0.0, -1.0, 0.0));
assert_eq!(p2.normalize(), vec3(1.0 / 3.0, 2.0 / 3.0, -2.0 / 3.0));
let p3: Vec3 = vec3(::std::f32::MAX, ::std::f32::MAX, 0.0);
assert_ne!(
p3.normalize(),
vec3(1.0 / 2.0f32.sqrt(), 1.0 / 2.0f32.sqrt(), 0.0)
);
assert_eq!(
p3.robust_normalize(),
vec3(1.0 / 2.0f32.sqrt(), 1.0 / 2.0f32.sqrt(), 0.0)
);
let p4: Vec3 = Vec3::zero();
assert!(p4.try_normalize().is_none());
let p5: Vec3 = Vec3::new(f32::MIN_POSITIVE, f32::MIN_POSITIVE, f32::MIN_POSITIVE);
assert!(p5.try_normalize().is_none());
let p6: Vec3 = vec3(4.0, 0.0, 3.0);
let p7: Vec3 = vec3(3.0, -4.0, 0.0);
assert_eq!(p6.try_normalize().unwrap(), vec3(0.8, 0.0, 0.6));
assert_eq!(p7.try_normalize().unwrap(), vec3(0.6, -0.8, 0.0));
}
#[test]
pub fn test_min() {
let p1: Vec3 = vec3(1.0, 3.0, 5.0);
let p2: Vec3 = vec3(2.0, 2.0, -1.0);
let result = p1.min(p2);
assert_eq!(result, vec3(1.0, 2.0, -1.0));
}
#[test]
pub fn test_max() {
let p1: Vec3 = vec3(1.0, 3.0, 5.0);
let p2: Vec3 = vec3(2.0, 2.0, -1.0);
let result = p1.max(p2);
assert_eq!(result, vec3(2.0, 3.0, 5.0));
}
#[test]
pub fn test_clamp() {
let p1: Vec3 = vec3(1.0, -1.0, 5.0);
let p2: Vec3 = vec3(2.0, 5.0, 10.0);
let p3: Vec3 = vec3(-1.0, 2.0, 20.0);
let result = p3.clamp(p1, p2);
assert_eq!(result, vec3(1.0, 2.0, 10.0));
}
#[test]
pub fn test_typed_scalar_mul() {
enum Mm {}
enum Cm {}
let p1 = super::Vector3D::<f32, Mm>::new(1.0, 2.0, 3.0);
let cm_per_mm = Scale::<f32, Mm, Cm>::new(0.1);
let result: super::Vector3D<f32, Cm> = p1 * cm_per_mm;
assert_eq!(result, vec3(0.1, 0.2, 0.3));
}
#[test]
pub fn test_swizzling() {
let p: Vec3 = vec3(1.0, 2.0, 3.0);
assert_eq!(p.xy(), vec2(1.0, 2.0));
assert_eq!(p.xz(), vec2(1.0, 3.0));
assert_eq!(p.yz(), vec2(2.0, 3.0));
}
#[cfg(feature = "mint")]
#[test]
pub fn test_mint() {
let v1 = Vec3::new(1.0, 3.0, 5.0);
let vm: mint::Vector3<_> = v1.into();
let v2 = Vec3::from(vm);
assert_eq!(v1, v2);
}
#[test]
pub fn test_reflect() {
use crate::approxeq::ApproxEq;
let a: Vec3 = vec3(1.0, 3.0, 2.0);
let n1: Vec3 = vec3(0.0, -1.0, 0.0);
let n2: Vec3 = vec3(0.0, 1.0, 1.0).normalize();
assert!(a.reflect(n1).approx_eq(&vec3(1.0, -3.0, 2.0)));
assert!(a.reflect(n2).approx_eq(&vec3(1.0, -2.0, -3.0)));
}
#[test]
pub fn test_angle_to() {
use crate::approxeq::ApproxEq;
use core::f32::consts::FRAC_PI_2;
let right: Vec3 = vec3(10.0, 0.0, 0.0);
let right2: Vec3 = vec3(1.0, 0.0, 0.0);
let up: Vec3 = vec3(0.0, -1.0, 0.0);
let up_left: Vec3 = vec3(-1.0, -1.0, 0.0);
assert!(right.angle_to(right2).get().approx_eq(&0.0));
assert!(right.angle_to(up).get().approx_eq(&FRAC_PI_2));
assert!(up.angle_to(right).get().approx_eq(&FRAC_PI_2));
assert!(up_left
.angle_to(up)
.get()
.approx_eq_eps(&(0.5 * FRAC_PI_2), &0.0005));
}
#[test]
pub fn test_with_max_length() {
use crate::approxeq::ApproxEq;
let v1: Vec3 = vec3(0.5, 0.5, 0.0);
let v2: Vec3 = vec3(1.0, 0.0, 0.0);
let v3: Vec3 = vec3(0.1, 0.2, 0.3);
let v4: Vec3 = vec3(2.0, -2.0, 2.0);
let v5: Vec3 = vec3(1.0, 2.0, -3.0);
let v6: Vec3 = vec3(-1.0, 3.0, 2.0);
assert_eq!(v1.with_max_length(1.0), v1);
assert_eq!(v2.with_max_length(1.0), v2);
assert_eq!(v3.with_max_length(1.0), v3);
assert_eq!(v4.with_max_length(10.0), v4);
assert_eq!(v5.with_max_length(10.0), v5);
assert_eq!(v6.with_max_length(10.0), v6);
let v4_clamped = v4.with_max_length(1.0);
assert!(v4_clamped.length().approx_eq(&1.0));
assert!(v4_clamped.normalize().approx_eq(&v4.normalize()));
let v5_clamped = v5.with_max_length(1.5);
assert!(v5_clamped.length().approx_eq(&1.5));
assert!(v5_clamped.normalize().approx_eq(&v5.normalize()));
let v6_clamped = v6.with_max_length(2.5);
assert!(v6_clamped.length().approx_eq(&2.5));
assert!(v6_clamped.normalize().approx_eq(&v6.normalize()));
}
#[test]
pub fn test_project_onto_vector() {
use crate::approxeq::ApproxEq;
let v1: Vec3 = vec3(1.0, 2.0, 3.0);
let x: Vec3 = vec3(1.0, 0.0, 0.0);
let y: Vec3 = vec3(0.0, 1.0, 0.0);
let z: Vec3 = vec3(0.0, 0.0, 1.0);
assert!(v1.project_onto_vector(x).approx_eq(&vec3(1.0, 0.0, 0.0)));
assert!(v1.project_onto_vector(y).approx_eq(&vec3(0.0, 2.0, 0.0)));
assert!(v1.project_onto_vector(z).approx_eq(&vec3(0.0, 0.0, 3.0)));
assert!(v1.project_onto_vector(-x).approx_eq(&vec3(1.0, 0.0, 0.0)));
assert!(v1
.project_onto_vector(x * 10.0)
.approx_eq(&vec3(1.0, 0.0, 0.0)));
assert!(v1.project_onto_vector(v1 * 2.0).approx_eq(&v1));
assert!(v1.project_onto_vector(-v1).approx_eq(&v1));
}
}
#[cfg(test)]
mod bool_vector {
use super::*;
use crate::default;
type Vec2 = default::Vector2D<f32>;
type Vec3 = default::Vector3D<f32>;
#[test]
fn test_bvec2() {
assert_eq!(
Vec2::new(1.0, 2.0).greater_than(Vec2::new(2.0, 1.0)),
bvec2(false, true),
);
assert_eq!(
Vec2::new(1.0, 2.0).lower_than(Vec2::new(2.0, 1.0)),
bvec2(true, false),
);
assert_eq!(
Vec2::new(1.0, 2.0).equal(Vec2::new(1.0, 3.0)),
bvec2(true, false),
);
assert_eq!(
Vec2::new(1.0, 2.0).not_equal(Vec2::new(1.0, 3.0)),
bvec2(false, true),
);
assert!(bvec2(true, true).any());
assert!(bvec2(false, true).any());
assert!(bvec2(true, false).any());
assert!(!bvec2(false, false).any());
assert!(bvec2(false, false).none());
assert!(bvec2(true, true).all());
assert!(!bvec2(false, true).all());
assert!(!bvec2(true, false).all());
assert!(!bvec2(false, false).all());
assert_eq!(bvec2(true, false).not(), bvec2(false, true));
assert_eq!(
bvec2(true, false).and(bvec2(true, true)),
bvec2(true, false)
);
assert_eq!(bvec2(true, false).or(bvec2(true, true)), bvec2(true, true));
assert_eq!(
bvec2(true, false).select_vector(Vec2::new(1.0, 2.0), Vec2::new(3.0, 4.0)),
Vec2::new(1.0, 4.0),
);
}
#[test]
fn test_bvec3() {
assert_eq!(
Vec3::new(1.0, 2.0, 3.0).greater_than(Vec3::new(3.0, 2.0, 1.0)),
bvec3(false, false, true),
);
assert_eq!(
Vec3::new(1.0, 2.0, 3.0).lower_than(Vec3::new(3.0, 2.0, 1.0)),
bvec3(true, false, false),
);
assert_eq!(
Vec3::new(1.0, 2.0, 3.0).equal(Vec3::new(3.0, 2.0, 1.0)),
bvec3(false, true, false),
);
assert_eq!(
Vec3::new(1.0, 2.0, 3.0).not_equal(Vec3::new(3.0, 2.0, 1.0)),
bvec3(true, false, true),
);
assert!(bvec3(true, true, false).any());
assert!(bvec3(false, true, false).any());
assert!(bvec3(true, false, false).any());
assert!(!bvec3(false, false, false).any());
assert!(bvec3(false, false, false).none());
assert!(bvec3(true, true, true).all());
assert!(!bvec3(false, true, false).all());
assert!(!bvec3(true, false, false).all());
assert!(!bvec3(false, false, false).all());
assert_eq!(bvec3(true, false, true).not(), bvec3(false, true, false));
assert_eq!(
bvec3(true, false, true).and(bvec3(true, true, false)),
bvec3(true, false, false)
);
assert_eq!(
bvec3(true, false, false).or(bvec3(true, true, false)),
bvec3(true, true, false)
);
assert_eq!(
bvec3(true, false, true)
.select_vector(Vec3::new(1.0, 2.0, 3.0), Vec3::new(4.0, 5.0, 6.0)),
Vec3::new(1.0, 5.0, 3.0),
);
}
}
|
use typenum;
use typenum::uint::UInt;
use typenum::consts::*;
use typenum::operator_aliases::Mod;
use typenum::type_operators::Same;
use nodrop::NoDrop;
use num;
use num::Float;
use std::ops::{Deref, DerefMut, Add, Sub, Mul, Div, Neg, Rem,
AddAssign, SubAssign, MulAssign, DivAssign, Index, IndexMut};
use std::marker::PhantomData;
use std::slice::{Iter as SliceIter, IterMut as SliceIterMut};
use std::mem;
use std::ptr;
use std::fmt::{Debug, Formatter, Result as FmtResult};
use std::iter::FromIterator;
/// A fixed-size vector whose elements are allocated on the stack.
///
/// ```rust
/// # use rowcol::prelude::*;
///
/// let arr = Vector::<i32, U5>::new([1, 2, 3, 4, 5]);
/// assert_eq!(*arr, [1, 2, 3, 4, 5]);
/// ```
pub struct Vector<T, N: ArrayLen<T>>(N::Array);
impl<T, N: ArrayLen<T>> Vector<T, N> {
/// Creates a vector from an array.
#[inline]
pub fn new(array: N::Array) -> Self {
Vector(array)
}
#[inline]
pub fn generate<F>(f: F) -> Self where F: FnMut(usize) -> T {
(0..N::to_usize()).map(f).collect()
}
/// Returns the inner array.
#[inline]
pub fn into_inner(self) -> N::Array {
self.0
}
/// Returns a slice of entire vector.
#[inline]
pub fn as_slice(&self) -> &[T] {
self.0.as_ref()
}
/// Returns a mutable slice of entire vector.
#[inline]
pub fn as_slice_mut(&mut self) -> &mut [T] {
self.0.as_mut()
}
/// Returns an iterator over this vector.
#[inline]
pub fn iter(&self) -> SliceIter<T> {
self.as_slice().iter()
}
/// Returns an mutable iterator over this vector.
#[inline]
pub fn iter_mut(&mut self) -> SliceIterMut<T> {
self.as_slice_mut().iter_mut()
}
/// Splits this vector into chunks of `I`-length vectors. `N % I` must be zero.
///
/// ```rust
/// # use rowcol::prelude::*;
/// let v = Vector::<i32, U4>::new([1, 2, 3, 4]);
/// let mut it = v.into_chunks::<U2>();
/// assert_eq!(it.next(), Some(Vector::new([1, 2])));
/// assert_eq!(it.next(), Some(Vector::new([3, 4])));
/// assert_eq!(it.next(), None);
/// ```
#[inline]
pub fn into_chunks<I>(self) -> Chunks<T, N, I>
where
N: Rem<I>,
Mod<N, I>: Same<U0>
{
Chunks::new(self.into_iter())
}
#[inline]
pub unsafe fn get_unchecked(&self, i: usize) -> &T {
debug_assert!(i < self.len());
self.as_slice().get_unchecked(i)
}
#[inline]
pub unsafe fn get_unchecked_mut(&mut self, i: usize) -> &mut T {
debug_assert!(i < self.len());
self.as_slice_mut().get_unchecked_mut(i)
}
#[inline]
pub fn len(&self) -> usize {
N::to_usize()
}
#[inline]
pub fn is_empty(&self) -> bool {
false
}
}
impl<T, N> Debug for Vector<T, N>
where T: Debug, N: ArrayLen<T>
{
fn fmt(&self, f: &mut Formatter) -> FmtResult {
try!(f.write_str("Vector"));
f.debug_list().entries(self.iter()).finish()
}
}
impl<T, N> Clone for Vector<T, N>
where
T: Clone,
N: ArrayLen<T>
{
#[inline]
fn clone(&self) -> Self {
self.iter().cloned().collect()
}
}
impl<T, N> Copy for Vector<T, N>
where
T: Copy,
N::Array: Copy,
N: ArrayLen<T>
{
}
impl<T, N> Default for Vector<T, N>
where
T: Default,
N: ArrayLen<T>,
{
#[inline]
fn default() -> Self {
Vector::generate(|_| T::default())
}
}
impl<T, N> Deref for Vector<T, N>
where
N: ArrayLen<T>
{
type Target = N::Array;
#[inline]
fn deref(&self) -> &N::Array {
&self.0
}
}
impl<T, N> DerefMut for Vector<T, N>
where N: ArrayLen<T>
{
#[inline]
fn deref_mut(&mut self) -> &mut N::Array {
&mut self.0
}
}
impl<T, N> AsRef<[T]> for Vector<T, N>
where N: ArrayLen<T>
{
#[inline]
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
impl<T, N> AsMut<[T]> for Vector<T, N>
where N: ArrayLen<T>
{
#[inline]
fn as_mut(&mut self) -> &mut [T] {
self.as_slice_mut()
}
}
impl<T, U, N> PartialEq<Vector<U, N>> for Vector<T, N>
where
T: PartialEq<U>,
N: ArrayLen<T> + ArrayLen<U>,
{
#[inline]
fn eq(&self, other: &Vector<U, N>) -> bool {
self.as_slice() == other.as_slice()
}
}
impl<T, N> Eq for Vector<T, N>
where
T: PartialEq,
N: ArrayLen<T>,
{
}
impl<T, N> num::Zero for Vector<T, N> where T: num::Zero + Clone, N: ArrayLen<T> {
#[inline]
fn zero() -> Self {
Vector::generate(|_| T::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.iter().all(num::Zero::is_zero)
}
}
impl<T, N> num::Bounded for Vector<T, N> where T: num::Bounded, N: ArrayLen<T> {
#[inline]
fn min_value() -> Self {
Vector::generate(|_| T::min_value())
}
#[inline]
fn max_value() -> Self {
Vector::generate(|_| T::max_value())
}
}
impl<T, N> Index<usize> for Vector<T, N> where N: ArrayLen<T> {
type Output = T;
#[inline]
fn index(&self, idx: usize) -> &T {
&self.as_slice()[idx]
}
}
impl<T, N> IndexMut<usize> for Vector<T, N> where N: ArrayLen<T> {
#[inline]
fn index_mut(&mut self, idx: usize) -> &mut T {
&mut self.as_slice_mut()[idx]
}
}
impl<T, N, IU, IB> Index<UInt<IU, IB>> for Vector<T, N>
where
N: ArrayLen<T>,
IU: typenum::Unsigned,
IB: typenum::Bit,
UInt<IU, IB>: typenum::Cmp<N>,
typenum::Compare<UInt<IU, IB>, N>: Same<typenum::Less>,
{
type Output = T;
#[inline]
fn index(&self, _: UInt<IU, IB>) -> &T {
unsafe {
self.get_unchecked(<UInt<IU, IB> as typenum::Unsigned>::to_usize())
}
}
}
impl<T, N> Index<U0> for Vector<T, N>
where N: ArrayLen<T>,
{
type Output = T;
#[inline]
fn index(&self, _: U0) -> &T {
unsafe {
self.get_unchecked(0)
}
}
}
impl<T, N, IU, IB> IndexMut<UInt<IU, IB>> for Vector<T, N>
where
N: ArrayLen<T>,
IU: typenum::Unsigned,
IB: typenum::Bit,
UInt<IU, IB>: typenum::Cmp<N>,
typenum::Compare<UInt<IU, IB>, N>: Same<typenum::Less>,
{
#[inline]
fn index_mut(&mut self, _: UInt<IU, IB>) -> &mut T {
unsafe {
self.get_unchecked_mut(<UInt<IU, IB> as typenum::Unsigned>::to_usize())
}
}
}
impl<T, N> IndexMut<U0> for Vector<T, N>
where N: ArrayLen<T>,
{
#[inline]
fn index_mut(&mut self, _: U0) -> &mut T {
unsafe {
self.get_unchecked_mut(0)
}
}
}
macro_rules! impl_vector_arith {
(T T : $op_trait:ident, $op_fn:ident) => {
impl<T, U, N> $op_trait<Vector<U, N>> for Vector<T, N>
where
T: $op_trait<U> ,
N: ArrayLen<T> + ArrayLen<U> + ArrayLen<<T as $op_trait<U>>::Output>,
{
type Output = Vector<<T as $op_trait<U>>::Output, N>;
fn $op_fn(self, other: Vector<U, N>) -> Self::Output {
self.into_iter().zip(other).map(|(a, b)| $op_trait::$op_fn(a, b)).collect()
}
}
};
(T &T : $op_trait:ident, $op_fn:ident) => {
impl<'a, T, U, N> $op_trait<&'a Vector<U, N>> for Vector<T, N>
where
T: $op_trait<&'a U>,
N: ArrayLen<T> + ArrayLen<U> + ArrayLen<<T as $op_trait<&'a U>>::Output>,
{
type Output = Vector<<T as $op_trait<&'a U>>::Output, N>;
fn $op_fn(self, other: &'a Vector<U, N>) -> Self::Output {
self.into_iter().zip(other.iter()).map(|(a, b)| $op_trait::$op_fn(a, b)).collect()
}
}
};
(&T T : $op_trait:ident, $op_fn:ident) => {
impl<'a, T, U, N> $op_trait<Vector<U, N>> for &'a Vector<T, N>
where
&'a T: $op_trait<U>,
N: ArrayLen<T> + ArrayLen<U> + ArrayLen<<&'a T as $op_trait<U>>::Output>,
{
type Output = Vector<<&'a T as $op_trait<U>>::Output, N>;
fn $op_fn(self, other: Vector<U, N>) -> Self::Output {
self.iter().zip(other).map(|(a, b)| $op_trait::$op_fn(a, b)).collect()
}
}
};
(&T &T : $op_trait:ident, $op_fn:ident) => {
impl<'a, 'b, T, U, N> $op_trait<&'a Vector<U, N>> for &'b Vector<T, N>
where
&'b T: $op_trait<&'a U>,
N: ArrayLen<T> + ArrayLen<U> + ArrayLen<<&'b T as $op_trait<&'a U>>::Output>,
{
type Output = Vector<<&'b T as $op_trait<&'a U>>::Output, N>;
fn $op_fn(self, other: &'a Vector<U, N>) -> Self::Output {
self.iter().zip(other.iter()).map(|(a, b)| $op_trait::$op_fn(a, b)).collect()
}
}
};
}
impl_vector_arith!(T T: Add, add);
impl_vector_arith!(T T: Sub, sub);
impl_vector_arith!(T &T: Add, add);
impl_vector_arith!(T &T: Sub, sub);
impl_vector_arith!(&T T: Add, add);
impl_vector_arith!(&T T: Sub, sub);
impl_vector_arith!(&T &T: Add, add);
impl_vector_arith!(&T &T: Sub, sub);
impl<T, U, N> AddAssign<Vector<U, N>> for Vector<T, N>
where
T: AddAssign<U>,
N: ArrayLen<T> + ArrayLen<U>,
{
#[inline]
fn add_assign(&mut self, rhs: Vector<U, N>) {
for (a, b) in self.iter_mut().zip(rhs) {
*a += b;
}
}
}
impl<T, U, N> SubAssign<Vector<U, N>> for Vector<T, N>
where
T: SubAssign<U>,
N: ArrayLen<T> + ArrayLen<U>,
{
#[inline]
fn sub_assign(&mut self, rhs: Vector<U, N>) {
for (a, b) in self.iter_mut().zip(rhs) {
*a -= b;
}
}
}
impl<T, U, N> Mul<U> for Vector<T, N>
where
T: Mul<U>,
U: Clone,
N: ArrayLen<T> + ArrayLen<<T as Mul<U>>::Output>,
{
type Output = Vector<<T as Mul<U>>::Output, N>;
#[inline]
fn mul(self, rhs: U) -> Self::Output {
self.into_iter().map(|e| e * rhs.clone()).collect()
}
}
impl<T, U, N> MulAssign<U> for Vector<T, N>
where
T: MulAssign<U>,
U: Clone,
N: ArrayLen<T>,
{
fn mul_assign(&mut self, rhs: U) {
for a in self.iter_mut() {
*a *= rhs.clone();
}
}
}
impl<T, U, N> Div<U> for Vector<T, N>
where
T: Div<U>,
U: Clone,
N: ArrayLen<T> + ArrayLen<<T as Div<U>>::Output>,
{
type Output = Vector<<T as Div<U>>::Output, N>;
#[inline]
fn div(self, rhs: U) -> Self::Output {
self.into_iter().map(|e| e / rhs.clone()).collect()
}
}
impl<T, U, N> DivAssign<U> for Vector<T, N>
where
T: DivAssign<U>,
U: Clone,
N: ArrayLen<T>,
{
#[inline]
fn div_assign(&mut self, rhs: U) {
for a in self.iter_mut() {
*a /= rhs.clone();
}
}
}
impl<T, N> Neg for Vector<T, N>
where T: Neg, N: ArrayLen<T> + ArrayLen<<T as Neg>::Output>
{
type Output = Vector<<T as Neg>::Output, N>;
#[inline]
fn neg(self) -> Self::Output {
self.into_iter().map(|e| -e).collect()
}
}
#[test]
fn test_vector_arith() {
let a = Vector::<i32, U3>::new([1, 2, 3]);
let b = Vector::new([4, 5, 6]);
let a_plus_b = Vector::new([5, 7, 9]);
let a_minus_b = Vector::new([-3, -3, -3]);
assert_eq!(a + b, a_plus_b);
assert_eq!(&a + b, a_plus_b);
assert_eq!(a + &b, a_plus_b);
assert_eq!(&a + &b, a_plus_b);
assert_eq!(a - b, a_minus_b);
assert_eq!(&a - b, a_minus_b);
assert_eq!(a - &b, a_minus_b);
assert_eq!(&a - &b, a_minus_b);
assert_eq!(a * 2, Vector::new([2, 4, 6]));
assert_eq!(b / 2, Vector::new([2, 2, 3]));
let mut a = a;
a *= 4;
assert_eq!(a, Vector::new([4, 8, 12]));
a /= 2;
assert_eq!(a, Vector::new([2, 4, 6]));
a -= Vector::new([1, 1, 1]);
assert_eq!(a, Vector::new([1, 3, 5]));
a += Vector::new([1, 2, 3]);
assert_eq!(a, Vector::new([2, 5, 8]));
}
impl<T, N> Vector<T, N>
where
T: Mul<T, Output = T> + Add<T, Output = T> + num::Zero + Clone,
N: ArrayLen<T>,
{
/// Returns the dot product of this vector and the other.
pub fn dot(&self, other: &Vector<T, N>) -> T {
self.iter().cloned().zip(other.iter().cloned()).map(|(a, b)| a * b)
.fold(T::zero(), Add::add)
}
}
#[test]
fn test_dot() {
let v = Vector::<i32, U3>::new([1, 2, 3]);
let v2 = Vector::<i32, U3>::new([4, 5, 6]);
assert_eq!(v.dot(&v2), 32);
}
impl<T> Vector<T, U3>
where
T: Sub<T, Output = T> + Mul<T, Output = T> + Clone,
{
pub fn cross(&self, other: &Vector<T, U3>) -> Vector<T, U3> {
macro_rules! idx {
($mat:ident, $i:ident) => {
$mat[$i::new()].clone()
}
}
Vector::new([
idx!(self,U1)*idx!(other,U2) - idx!(self,U2)*idx!(other,U1),
idx!(self,U2)*idx!(other,U0) - idx!(self,U0)*idx!(other,U2),
idx!(self,U0)*idx!(other,U1) - idx!(self,U1)*idx!(other,U0),
])
}
}
#[test]
fn test_cross() {
let v = Vector::<i32, U3>::new([1, 2, 3]);
let v2 = Vector::<i32, U3>::new([4, 5, 6]);
assert_eq!(v.cross(&v2), Vector::new([-3, 6, -3]));
}
impl<T, N> Vector<T, N>
where
T: Float + Clone,
N: ArrayLen<T>,
{
pub fn norm(&self) -> T {
let prod = self.iter().cloned().map(|x| x.powi(2)).fold(T::zero(), Add::add);
prod.sqrt()
}
#[inline]
pub fn normalized(&self) -> Vector<T, N> {
self.clone() / self.norm()
}
}
#[test]
fn test_norm() {
let v = Vector::<f32, U2>::new([3.0, 4.0]);
assert_eq!(v.norm(), 5.0);
assert_eq!(v.normalized(), Vector::new([0.6, 0.8]));
}
impl<T, N> FromIterator<T> for Vector<T, N> where N: ArrayLen<T> {
fn from_iter<I>(iter: I) -> Self
where I: IntoIterator<Item = T>
{
let mut it = iter.into_iter();
let arr = unsafe {
// FIXME: do not drop uninitialized
let mut arr = mem::uninitialized::<N::Array>();
for i in 0..N::to_usize() {
let item = it.next()
.unwrap_or_else(|| panic!("Vector<_, U{0}> can only be created with exactly {0} elements.",
N::to_usize()));
ptr::write(arr.as_mut().get_unchecked_mut(i), item);
}
// making this `assert_eq` slows down matrix multiplication by 7x!
debug_assert_eq!(it.count(), 0, "Vector<_, U{0}> can only be created with exactly {0} elements.", N::to_usize());
arr
};
Vector::new(arr)
}
}
impl<T, N> IntoIterator for Vector<T, N> where N: ArrayLen<T> {
type Item = T;
type IntoIter = IntoIter<T, N>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
IntoIter {
arr: NoDrop::new(self.into_inner()),
next: 0,
back: N::to_usize(),
}
}
}
pub struct IntoIter<T, N> where N: ArrayLen<T> {
arr: NoDrop<N::Array>,
next: usize,
back: usize,
}
impl<T, N> Drop for IntoIter<T, N> where N: ArrayLen<T> {
fn drop(&mut self) {
for i in self.next..self.back {
mem::drop(unsafe { ptr::read(self.arr.as_ref().get_unchecked(i)) });
}
}
}
impl<T, N> Iterator for IntoIter<T, N> where N: ArrayLen<T> {
type Item = T;
fn next(&mut self) -> Option<T> {
debug_assert!(self.back <= N::to_usize());
if self.next < self.back {
let i = self.next;
self.next += 1;
Some(unsafe {
ptr::read(self.arr.as_ref().get_unchecked(i))
})
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
#[inline]
fn count(self) -> usize {
self.len()
}
}
impl<T, N> DoubleEndedIterator for IntoIter<T, N> where N: ArrayLen<T> {
fn next_back(&mut self) -> Option<Self::Item> {
debug_assert!(self.back <= N::to_usize());
if self.back > 0 {
self.back -= 1;
Some(unsafe {
ptr::read(self.arr.as_ref().get_unchecked(self.back))
})
} else {
None
}
}
}
#[test]
fn test_vec_intoiter_next_back() {
let v = Vector::<i32, U3>::new([1, 2, 3]);
assert!(v.into_iter().rev().eq(vec![3, 2, 1]));
}
impl<T, N> ExactSizeIterator for IntoIter<T, N> where N: ArrayLen<T> {
#[inline]
fn len(&self) -> usize {
self.back - self.next
}
}
pub struct Chunks<T, N, I> where N: ArrayLen<T> {
it: IntoIter<T, N>,
_i: PhantomData<I>,
}
impl<T, N, I> Chunks<T, N, I> where N: ArrayLen<T> {
fn new(it: IntoIter<T, N>) -> Self {
Chunks {
it: it,
_i: PhantomData,
}
}
}
impl<T, N, I> Iterator for Chunks<T, N, I>
where
N: ArrayLen<T> + Rem<I>,
I: ArrayLen<T>,
Mod<N, I>: Same<U0>,
{
type Item = Vector<T, I>;
fn next(&mut self) -> Option<Self::Item> {
if self.it.len() == 0 { // TODO: use is_empty() when it is stabilized
None
} else {
Some((&mut self.it).take(I::to_usize()).collect())
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.it.len() / I::to_usize();
(len, Some(len))
}
}
impl<T, N, I> DoubleEndedIterator for Chunks<T, N, I>
where
N: ArrayLen<T> + Rem<I>,
I: ArrayLen<T>,
Mod<N, I>: Same<U0>,
{
fn next_back(&mut self) -> Option<Self::Item> {
if self.it.len() == 0 {
None
} else {
Some((&mut self.it).rev().take(I::to_usize()).collect::<Vector<T, I>>().into_iter()
.rev().collect())
}
}
}
impl<T, N, I> ExactSizeIterator for Chunks<T, N, I>
where
N: ArrayLen<T> + Rem<I>,
I: ArrayLen<T>,
Mod<N, I>: Same<U0>,
{
}
#[test]
fn test_vector_chunks() {
let arr: Vector<i32, U6> = Vector::new([1, 2, 3, 4, 5, 6]);
let mut it = arr.into_chunks::<U2>();
let a: [i32; 2] = it.next().unwrap().into_inner();
assert_eq!(a, [1, 2]);
let a: [i32; 2] = it.next().unwrap().into_inner();
assert_eq!(a, [3, 4]);
assert_eq!(it.len(), 1);
let mut it = arr.into_chunks::<U2>().rev();
assert_eq!(it.next(), Some(Vector::new([5, 6])));
assert_eq!(it.next(), Some(Vector::new([3, 4])));
assert_eq!(it.next(), Some(Vector::new([1, 2])));
assert_eq!(it.next(), None);
}
pub trait ArrayLen<T>: typenum::Unsigned {
type Array: AsRef<[T]> + AsMut<[T]>;
}
macro_rules! impl_arraylen {
($tn:ident, $len:expr) => {
impl<T> ArrayLen<T> for $tn {
type Array = [T; $len];
}
}
}
impl_arraylen!(U1, 1);
impl_arraylen!(U2, 2);
impl_arraylen!(U3, 3);
impl_arraylen!(U4, 4);
impl_arraylen!(U5, 5);
impl_arraylen!(U6, 6);
impl_arraylen!(U7, 7);
impl_arraylen!(U8, 8);
impl_arraylen!(U9, 9);
impl_arraylen!(U10, 10);
impl_arraylen!(U11, 11);
impl_arraylen!(U12, 12);
impl_arraylen!(U13, 13);
impl_arraylen!(U14, 14);
impl_arraylen!(U15, 15);
impl_arraylen!(U16, 16);
impl_arraylen!(U17, 17);
impl_arraylen!(U18, 18);
impl_arraylen!(U19, 19);
impl_arraylen!(U20, 20);
impl_arraylen!(U21, 21);
impl_arraylen!(U22, 22);
impl_arraylen!(U23, 23);
impl_arraylen!(U24, 24);
impl_arraylen!(U25, 25);
impl_arraylen!(U26, 26);
impl_arraylen!(U27, 27);
impl_arraylen!(U28, 28);
impl_arraylen!(U29, 29);
impl_arraylen!(U30, 30);
impl_arraylen!(U31, 31);
impl_arraylen!(U32, 32);
#[test]
fn test_array() {
use std::ops::Sub;
// rustc bug (broken MIR) https://github.com/rust-lang/rust/issues/28828
// use typenum::Diff;
// let a: Vector<i32, Diff<U8, U3>> = Default::default();
let a: Vector<i32, <U8 as Sub<U3>>::Output> = Default::default();
assert_eq!(a.len(), 5);
let _: [i32; 5] = a.0;
}
Avoid dropping uninitialized
use typenum;
use typenum::uint::UInt;
use typenum::consts::*;
use typenum::operator_aliases::Mod;
use typenum::type_operators::Same;
use nodrop::NoDrop;
use num;
use num::Float;
use std::ops::{Deref, DerefMut, Add, Sub, Mul, Div, Neg, Rem,
AddAssign, SubAssign, MulAssign, DivAssign, Index, IndexMut};
use std::marker::PhantomData;
use std::slice::{Iter as SliceIter, IterMut as SliceIterMut};
use std::mem;
use std::ptr;
use std::fmt::{Debug, Formatter, Result as FmtResult};
use std::iter::FromIterator;
/// A fixed-size vector whose elements are allocated on the stack.
///
/// ```rust
/// # use rowcol::prelude::*;
///
/// let arr = Vector::<i32, U5>::new([1, 2, 3, 4, 5]);
/// assert_eq!(*arr, [1, 2, 3, 4, 5]);
/// ```
pub struct Vector<T, N: ArrayLen<T>>(N::Array);
impl<T, N: ArrayLen<T>> Vector<T, N> {
/// Creates a vector from an array.
#[inline]
pub fn new(array: N::Array) -> Self {
Vector(array)
}
#[inline]
pub fn generate<F>(f: F) -> Self where F: FnMut(usize) -> T {
(0..N::to_usize()).map(f).collect()
}
/// Returns the inner array.
#[inline]
pub fn into_inner(self) -> N::Array {
self.0
}
/// Returns a slice of entire vector.
#[inline]
pub fn as_slice(&self) -> &[T] {
self.0.as_ref()
}
/// Returns a mutable slice of entire vector.
#[inline]
pub fn as_slice_mut(&mut self) -> &mut [T] {
self.0.as_mut()
}
/// Returns an iterator over this vector.
#[inline]
pub fn iter(&self) -> SliceIter<T> {
self.as_slice().iter()
}
/// Returns an mutable iterator over this vector.
#[inline]
pub fn iter_mut(&mut self) -> SliceIterMut<T> {
self.as_slice_mut().iter_mut()
}
/// Splits this vector into chunks of `I`-length vectors. `N % I` must be zero.
///
/// ```rust
/// # use rowcol::prelude::*;
/// let v = Vector::<i32, U4>::new([1, 2, 3, 4]);
/// let mut it = v.into_chunks::<U2>();
/// assert_eq!(it.next(), Some(Vector::new([1, 2])));
/// assert_eq!(it.next(), Some(Vector::new([3, 4])));
/// assert_eq!(it.next(), None);
/// ```
#[inline]
pub fn into_chunks<I>(self) -> Chunks<T, N, I>
where
N: Rem<I>,
Mod<N, I>: Same<U0>
{
Chunks::new(self.into_iter())
}
#[inline]
pub unsafe fn get_unchecked(&self, i: usize) -> &T {
debug_assert!(i < self.len());
self.as_slice().get_unchecked(i)
}
#[inline]
pub unsafe fn get_unchecked_mut(&mut self, i: usize) -> &mut T {
debug_assert!(i < self.len());
self.as_slice_mut().get_unchecked_mut(i)
}
#[inline]
pub fn len(&self) -> usize {
N::to_usize()
}
#[inline]
pub fn is_empty(&self) -> bool {
false
}
}
impl<T, N> Debug for Vector<T, N>
where T: Debug, N: ArrayLen<T>
{
fn fmt(&self, f: &mut Formatter) -> FmtResult {
try!(f.write_str("Vector"));
f.debug_list().entries(self.iter()).finish()
}
}
impl<T, N> Clone for Vector<T, N>
where
T: Clone,
N: ArrayLen<T>
{
#[inline]
fn clone(&self) -> Self {
self.iter().cloned().collect()
}
}
impl<T, N> Copy for Vector<T, N>
where
T: Copy,
N::Array: Copy,
N: ArrayLen<T>
{
}
impl<T, N> Default for Vector<T, N>
where
T: Default,
N: ArrayLen<T>,
{
#[inline]
fn default() -> Self {
Vector::generate(|_| T::default())
}
}
impl<T, N> Deref for Vector<T, N>
where
N: ArrayLen<T>
{
type Target = N::Array;
#[inline]
fn deref(&self) -> &N::Array {
&self.0
}
}
impl<T, N> DerefMut for Vector<T, N>
where N: ArrayLen<T>
{
#[inline]
fn deref_mut(&mut self) -> &mut N::Array {
&mut self.0
}
}
impl<T, N> AsRef<[T]> for Vector<T, N>
where N: ArrayLen<T>
{
#[inline]
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
impl<T, N> AsMut<[T]> for Vector<T, N>
where N: ArrayLen<T>
{
#[inline]
fn as_mut(&mut self) -> &mut [T] {
self.as_slice_mut()
}
}
impl<T, U, N> PartialEq<Vector<U, N>> for Vector<T, N>
where
T: PartialEq<U>,
N: ArrayLen<T> + ArrayLen<U>,
{
#[inline]
fn eq(&self, other: &Vector<U, N>) -> bool {
self.as_slice() == other.as_slice()
}
}
impl<T, N> Eq for Vector<T, N>
where
T: PartialEq,
N: ArrayLen<T>,
{
}
impl<T, N> num::Zero for Vector<T, N> where T: num::Zero + Clone, N: ArrayLen<T> {
#[inline]
fn zero() -> Self {
Vector::generate(|_| T::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.iter().all(num::Zero::is_zero)
}
}
impl<T, N> num::Bounded for Vector<T, N> where T: num::Bounded, N: ArrayLen<T> {
#[inline]
fn min_value() -> Self {
Vector::generate(|_| T::min_value())
}
#[inline]
fn max_value() -> Self {
Vector::generate(|_| T::max_value())
}
}
impl<T, N> Index<usize> for Vector<T, N> where N: ArrayLen<T> {
type Output = T;
#[inline]
fn index(&self, idx: usize) -> &T {
&self.as_slice()[idx]
}
}
impl<T, N> IndexMut<usize> for Vector<T, N> where N: ArrayLen<T> {
#[inline]
fn index_mut(&mut self, idx: usize) -> &mut T {
&mut self.as_slice_mut()[idx]
}
}
impl<T, N, IU, IB> Index<UInt<IU, IB>> for Vector<T, N>
where
N: ArrayLen<T>,
IU: typenum::Unsigned,
IB: typenum::Bit,
UInt<IU, IB>: typenum::Cmp<N>,
typenum::Compare<UInt<IU, IB>, N>: Same<typenum::Less>,
{
type Output = T;
#[inline]
fn index(&self, _: UInt<IU, IB>) -> &T {
unsafe {
self.get_unchecked(<UInt<IU, IB> as typenum::Unsigned>::to_usize())
}
}
}
impl<T, N> Index<U0> for Vector<T, N>
where N: ArrayLen<T>,
{
type Output = T;
#[inline]
fn index(&self, _: U0) -> &T {
unsafe {
self.get_unchecked(0)
}
}
}
impl<T, N, IU, IB> IndexMut<UInt<IU, IB>> for Vector<T, N>
where
N: ArrayLen<T>,
IU: typenum::Unsigned,
IB: typenum::Bit,
UInt<IU, IB>: typenum::Cmp<N>,
typenum::Compare<UInt<IU, IB>, N>: Same<typenum::Less>,
{
#[inline]
fn index_mut(&mut self, _: UInt<IU, IB>) -> &mut T {
unsafe {
self.get_unchecked_mut(<UInt<IU, IB> as typenum::Unsigned>::to_usize())
}
}
}
impl<T, N> IndexMut<U0> for Vector<T, N>
where N: ArrayLen<T>,
{
#[inline]
fn index_mut(&mut self, _: U0) -> &mut T {
unsafe {
self.get_unchecked_mut(0)
}
}
}
macro_rules! impl_vector_arith {
(T T : $op_trait:ident, $op_fn:ident) => {
impl<T, U, N> $op_trait<Vector<U, N>> for Vector<T, N>
where
T: $op_trait<U> ,
N: ArrayLen<T> + ArrayLen<U> + ArrayLen<<T as $op_trait<U>>::Output>,
{
type Output = Vector<<T as $op_trait<U>>::Output, N>;
fn $op_fn(self, other: Vector<U, N>) -> Self::Output {
self.into_iter().zip(other).map(|(a, b)| $op_trait::$op_fn(a, b)).collect()
}
}
};
(T &T : $op_trait:ident, $op_fn:ident) => {
impl<'a, T, U, N> $op_trait<&'a Vector<U, N>> for Vector<T, N>
where
T: $op_trait<&'a U>,
N: ArrayLen<T> + ArrayLen<U> + ArrayLen<<T as $op_trait<&'a U>>::Output>,
{
type Output = Vector<<T as $op_trait<&'a U>>::Output, N>;
fn $op_fn(self, other: &'a Vector<U, N>) -> Self::Output {
self.into_iter().zip(other.iter()).map(|(a, b)| $op_trait::$op_fn(a, b)).collect()
}
}
};
(&T T : $op_trait:ident, $op_fn:ident) => {
impl<'a, T, U, N> $op_trait<Vector<U, N>> for &'a Vector<T, N>
where
&'a T: $op_trait<U>,
N: ArrayLen<T> + ArrayLen<U> + ArrayLen<<&'a T as $op_trait<U>>::Output>,
{
type Output = Vector<<&'a T as $op_trait<U>>::Output, N>;
fn $op_fn(self, other: Vector<U, N>) -> Self::Output {
self.iter().zip(other).map(|(a, b)| $op_trait::$op_fn(a, b)).collect()
}
}
};
(&T &T : $op_trait:ident, $op_fn:ident) => {
impl<'a, 'b, T, U, N> $op_trait<&'a Vector<U, N>> for &'b Vector<T, N>
where
&'b T: $op_trait<&'a U>,
N: ArrayLen<T> + ArrayLen<U> + ArrayLen<<&'b T as $op_trait<&'a U>>::Output>,
{
type Output = Vector<<&'b T as $op_trait<&'a U>>::Output, N>;
fn $op_fn(self, other: &'a Vector<U, N>) -> Self::Output {
self.iter().zip(other.iter()).map(|(a, b)| $op_trait::$op_fn(a, b)).collect()
}
}
};
}
impl_vector_arith!(T T: Add, add);
impl_vector_arith!(T T: Sub, sub);
impl_vector_arith!(T &T: Add, add);
impl_vector_arith!(T &T: Sub, sub);
impl_vector_arith!(&T T: Add, add);
impl_vector_arith!(&T T: Sub, sub);
impl_vector_arith!(&T &T: Add, add);
impl_vector_arith!(&T &T: Sub, sub);
impl<T, U, N> AddAssign<Vector<U, N>> for Vector<T, N>
where
T: AddAssign<U>,
N: ArrayLen<T> + ArrayLen<U>,
{
#[inline]
fn add_assign(&mut self, rhs: Vector<U, N>) {
for (a, b) in self.iter_mut().zip(rhs) {
*a += b;
}
}
}
impl<T, U, N> SubAssign<Vector<U, N>> for Vector<T, N>
where
T: SubAssign<U>,
N: ArrayLen<T> + ArrayLen<U>,
{
#[inline]
fn sub_assign(&mut self, rhs: Vector<U, N>) {
for (a, b) in self.iter_mut().zip(rhs) {
*a -= b;
}
}
}
impl<T, U, N> Mul<U> for Vector<T, N>
where
T: Mul<U>,
U: Clone,
N: ArrayLen<T> + ArrayLen<<T as Mul<U>>::Output>,
{
type Output = Vector<<T as Mul<U>>::Output, N>;
#[inline]
fn mul(self, rhs: U) -> Self::Output {
self.into_iter().map(|e| e * rhs.clone()).collect()
}
}
impl<T, U, N> MulAssign<U> for Vector<T, N>
where
T: MulAssign<U>,
U: Clone,
N: ArrayLen<T>,
{
fn mul_assign(&mut self, rhs: U) {
for a in self.iter_mut() {
*a *= rhs.clone();
}
}
}
impl<T, U, N> Div<U> for Vector<T, N>
where
T: Div<U>,
U: Clone,
N: ArrayLen<T> + ArrayLen<<T as Div<U>>::Output>,
{
type Output = Vector<<T as Div<U>>::Output, N>;
#[inline]
fn div(self, rhs: U) -> Self::Output {
self.into_iter().map(|e| e / rhs.clone()).collect()
}
}
impl<T, U, N> DivAssign<U> for Vector<T, N>
where
T: DivAssign<U>,
U: Clone,
N: ArrayLen<T>,
{
#[inline]
fn div_assign(&mut self, rhs: U) {
for a in self.iter_mut() {
*a /= rhs.clone();
}
}
}
impl<T, N> Neg for Vector<T, N>
where T: Neg, N: ArrayLen<T> + ArrayLen<<T as Neg>::Output>
{
type Output = Vector<<T as Neg>::Output, N>;
#[inline]
fn neg(self) -> Self::Output {
self.into_iter().map(|e| -e).collect()
}
}
#[test]
fn test_vector_arith() {
let a = Vector::<i32, U3>::new([1, 2, 3]);
let b = Vector::new([4, 5, 6]);
let a_plus_b = Vector::new([5, 7, 9]);
let a_minus_b = Vector::new([-3, -3, -3]);
assert_eq!(a + b, a_plus_b);
assert_eq!(&a + b, a_plus_b);
assert_eq!(a + &b, a_plus_b);
assert_eq!(&a + &b, a_plus_b);
assert_eq!(a - b, a_minus_b);
assert_eq!(&a - b, a_minus_b);
assert_eq!(a - &b, a_minus_b);
assert_eq!(&a - &b, a_minus_b);
assert_eq!(a * 2, Vector::new([2, 4, 6]));
assert_eq!(b / 2, Vector::new([2, 2, 3]));
let mut a = a;
a *= 4;
assert_eq!(a, Vector::new([4, 8, 12]));
a /= 2;
assert_eq!(a, Vector::new([2, 4, 6]));
a -= Vector::new([1, 1, 1]);
assert_eq!(a, Vector::new([1, 3, 5]));
a += Vector::new([1, 2, 3]);
assert_eq!(a, Vector::new([2, 5, 8]));
}
impl<T, N> Vector<T, N>
where
T: Mul<T, Output = T> + Add<T, Output = T> + num::Zero + Clone,
N: ArrayLen<T>,
{
/// Returns the dot product of this vector and the other.
pub fn dot(&self, other: &Vector<T, N>) -> T {
self.iter().cloned().zip(other.iter().cloned()).map(|(a, b)| a * b)
.fold(T::zero(), Add::add)
}
}
#[test]
fn test_dot() {
let v = Vector::<i32, U3>::new([1, 2, 3]);
let v2 = Vector::<i32, U3>::new([4, 5, 6]);
assert_eq!(v.dot(&v2), 32);
}
impl<T> Vector<T, U3>
where
T: Sub<T, Output = T> + Mul<T, Output = T> + Clone,
{
pub fn cross(&self, other: &Vector<T, U3>) -> Vector<T, U3> {
macro_rules! idx {
($mat:ident, $i:ident) => {
$mat[$i::new()].clone()
}
}
Vector::new([
idx!(self,U1)*idx!(other,U2) - idx!(self,U2)*idx!(other,U1),
idx!(self,U2)*idx!(other,U0) - idx!(self,U0)*idx!(other,U2),
idx!(self,U0)*idx!(other,U1) - idx!(self,U1)*idx!(other,U0),
])
}
}
#[test]
fn test_cross() {
let v = Vector::<i32, U3>::new([1, 2, 3]);
let v2 = Vector::<i32, U3>::new([4, 5, 6]);
assert_eq!(v.cross(&v2), Vector::new([-3, 6, -3]));
}
impl<T, N> Vector<T, N>
where
T: Float + Clone,
N: ArrayLen<T>,
{
pub fn norm(&self) -> T {
let prod = self.iter().cloned().map(|x| x.powi(2)).fold(T::zero(), Add::add);
prod.sqrt()
}
#[inline]
pub fn normalized(&self) -> Vector<T, N> {
self.clone() / self.norm()
}
}
#[test]
fn test_norm() {
let v = Vector::<f32, U2>::new([3.0, 4.0]);
assert_eq!(v.norm(), 5.0);
assert_eq!(v.normalized(), Vector::new([0.6, 0.8]));
}
impl<T, N> FromIterator<T> for Vector<T, N> where N: ArrayLen<T> {
fn from_iter<I>(iter: I) -> Self
where I: IntoIterator<Item = T>
{
let mut it = iter.into_iter();
let arr = unsafe {
// NOTE: somehow `NoDrop` drops performance - it should be optimized out
let mut arr = NoDrop::new(mem::uninitialized::<N::Array>());
for i in 0..N::to_usize() {
let item = it.next()
.unwrap_or_else(|| panic!("Vector<_, U{0}> can only be created with exactly {0} elements.",
N::to_usize()));
ptr::write(arr.as_mut().get_unchecked_mut(i), item);
}
// making this `assert_eq` slows down matrix multiplication by 7x!
debug_assert_eq!(it.count(), 0, "Vector<_, U{0}> can only be created with exactly {0} elements.", N::to_usize());
arr.into_inner()
};
Vector::new(arr)
}
}
impl<T, N> IntoIterator for Vector<T, N> where N: ArrayLen<T> {
type Item = T;
type IntoIter = IntoIter<T, N>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
IntoIter {
arr: NoDrop::new(self.into_inner()),
next: 0,
back: N::to_usize(),
}
}
}
pub struct IntoIter<T, N> where N: ArrayLen<T> {
arr: NoDrop<N::Array>,
next: usize,
back: usize,
}
impl<T, N> Drop for IntoIter<T, N> where N: ArrayLen<T> {
fn drop(&mut self) {
for i in self.next..self.back {
mem::drop(unsafe { ptr::read(self.arr.as_ref().get_unchecked(i)) });
}
}
}
impl<T, N> Iterator for IntoIter<T, N> where N: ArrayLen<T> {
type Item = T;
fn next(&mut self) -> Option<T> {
debug_assert!(self.back <= N::to_usize());
if self.next < self.back {
let i = self.next;
self.next += 1;
Some(unsafe {
ptr::read(self.arr.as_ref().get_unchecked(i))
})
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
#[inline]
fn count(self) -> usize {
self.len()
}
}
impl<T, N> DoubleEndedIterator for IntoIter<T, N> where N: ArrayLen<T> {
fn next_back(&mut self) -> Option<Self::Item> {
debug_assert!(self.back <= N::to_usize());
if self.back > 0 {
self.back -= 1;
Some(unsafe {
ptr::read(self.arr.as_ref().get_unchecked(self.back))
})
} else {
None
}
}
}
#[test]
fn test_vec_intoiter_next_back() {
let v = Vector::<i32, U3>::new([1, 2, 3]);
assert!(v.into_iter().rev().eq(vec![3, 2, 1]));
}
impl<T, N> ExactSizeIterator for IntoIter<T, N> where N: ArrayLen<T> {
#[inline]
fn len(&self) -> usize {
self.back - self.next
}
}
pub struct Chunks<T, N, I> where N: ArrayLen<T> {
it: IntoIter<T, N>,
_i: PhantomData<I>,
}
impl<T, N, I> Chunks<T, N, I> where N: ArrayLen<T> {
fn new(it: IntoIter<T, N>) -> Self {
Chunks {
it: it,
_i: PhantomData,
}
}
}
impl<T, N, I> Iterator for Chunks<T, N, I>
where
N: ArrayLen<T> + Rem<I>,
I: ArrayLen<T>,
Mod<N, I>: Same<U0>,
{
type Item = Vector<T, I>;
fn next(&mut self) -> Option<Self::Item> {
if self.it.len() == 0 { // TODO: use is_empty() when it is stabilized
None
} else {
Some((&mut self.it).take(I::to_usize()).collect())
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.it.len() / I::to_usize();
(len, Some(len))
}
}
impl<T, N, I> DoubleEndedIterator for Chunks<T, N, I>
where
N: ArrayLen<T> + Rem<I>,
I: ArrayLen<T>,
Mod<N, I>: Same<U0>,
{
fn next_back(&mut self) -> Option<Self::Item> {
if self.it.len() == 0 {
None
} else {
Some((&mut self.it).rev().take(I::to_usize()).collect::<Vector<T, I>>().into_iter()
.rev().collect())
}
}
}
impl<T, N, I> ExactSizeIterator for Chunks<T, N, I>
where
N: ArrayLen<T> + Rem<I>,
I: ArrayLen<T>,
Mod<N, I>: Same<U0>,
{
}
#[test]
fn test_vector_chunks() {
let arr: Vector<i32, U6> = Vector::new([1, 2, 3, 4, 5, 6]);
let mut it = arr.into_chunks::<U2>();
let a: [i32; 2] = it.next().unwrap().into_inner();
assert_eq!(a, [1, 2]);
let a: [i32; 2] = it.next().unwrap().into_inner();
assert_eq!(a, [3, 4]);
assert_eq!(it.len(), 1);
let mut it = arr.into_chunks::<U2>().rev();
assert_eq!(it.next(), Some(Vector::new([5, 6])));
assert_eq!(it.next(), Some(Vector::new([3, 4])));
assert_eq!(it.next(), Some(Vector::new([1, 2])));
assert_eq!(it.next(), None);
}
pub trait ArrayLen<T>: typenum::Unsigned {
type Array: AsRef<[T]> + AsMut<[T]>;
}
macro_rules! impl_arraylen {
($tn:ident, $len:expr) => {
impl<T> ArrayLen<T> for $tn {
type Array = [T; $len];
}
}
}
impl_arraylen!(U1, 1);
impl_arraylen!(U2, 2);
impl_arraylen!(U3, 3);
impl_arraylen!(U4, 4);
impl_arraylen!(U5, 5);
impl_arraylen!(U6, 6);
impl_arraylen!(U7, 7);
impl_arraylen!(U8, 8);
impl_arraylen!(U9, 9);
impl_arraylen!(U10, 10);
impl_arraylen!(U11, 11);
impl_arraylen!(U12, 12);
impl_arraylen!(U13, 13);
impl_arraylen!(U14, 14);
impl_arraylen!(U15, 15);
impl_arraylen!(U16, 16);
impl_arraylen!(U17, 17);
impl_arraylen!(U18, 18);
impl_arraylen!(U19, 19);
impl_arraylen!(U20, 20);
impl_arraylen!(U21, 21);
impl_arraylen!(U22, 22);
impl_arraylen!(U23, 23);
impl_arraylen!(U24, 24);
impl_arraylen!(U25, 25);
impl_arraylen!(U26, 26);
impl_arraylen!(U27, 27);
impl_arraylen!(U28, 28);
impl_arraylen!(U29, 29);
impl_arraylen!(U30, 30);
impl_arraylen!(U31, 31);
impl_arraylen!(U32, 32);
#[test]
fn test_array() {
use std::ops::Sub;
// rustc bug (broken MIR) https://github.com/rust-lang/rust/issues/28828
// use typenum::Diff;
// let a: Vector<i32, Diff<U8, U3>> = Default::default();
let a: Vector<i32, <U8 as Sub<U3>>::Output> = Default::default();
assert_eq!(a.len(), 5);
let _: [i32; 5] = a.0;
}
|
use std::rc::Rc;
use xcb;
use cairo::{self, XCBSurface};
use cairo_sys;
use text::Text;
fn get_root_visual_type(conn: &xcb::Connection, screen: &xcb::Screen) -> xcb::Visualtype {
for root in conn.get_setup().roots() {
for allowed_depth in root.allowed_depths() {
for visual in allowed_depth.visuals() {
if visual.visual_id() == screen.root_visual() {
return visual;
}
}
}
}
panic!("No visual type found");
}
// TODO: impl Drop?
pub struct Window {
conn: Rc<xcb::Connection>,
screen_idx: usize,
surface: cairo::Surface,
}
impl Window {
pub fn new(conn: Rc<xcb::Connection>, screen_idx: usize) -> Window {
let id = conn.generate_id();
let surface = {
let screen = conn.get_setup()
.roots()
.nth(screen_idx)
.expect("invalid screen");
let values = [(xcb::CW_BACK_PIXEL, screen.black_pixel()),
(xcb::CW_EVENT_MASK, xcb::EVENT_MASK_EXPOSURE)];
let (width, height) = (screen.width_in_pixels(), 100);
xcb::create_window(&conn,
xcb::COPY_FROM_PARENT as u8,
id,
screen.root(),
0,
0,
width,
height,
10,
xcb::WINDOW_CLASS_INPUT_OUTPUT as u16,
screen.root_visual(),
&values);
let surface = unsafe {
let cairo_conn = cairo::XCBConnection::from_raw_none(conn.get_raw_conn() as
*mut cairo_sys::xcb_connection_t);
let visual =
cairo::XCBVisualType::from_raw_none(&mut get_root_visual_type(&conn, &screen).base as
*mut xcb::ffi::xcb_visualtype_t as
*mut cairo_sys::xcb_visualtype_t);
let drawable = cairo::XCBDrawable(id);
cairo::Surface::create(&cairo_conn, &drawable, &visual, width as i32, height as i32)
// TODO: Update surface width/height when window size changes.
};
surface
};
xcb::map_window(&conn, id);
conn.flush();
Window {
conn,
screen_idx,
surface,
}
}
fn screen<'a>(&'a self) -> xcb::Screen<'a> {
self.conn
.get_setup()
.roots()
.nth(self.screen_idx)
.expect("Invalid screen")
}
pub fn expose(&self, texts: Vec<Text>) {
self.render_text_blocks(texts);
self.conn.flush();
}
fn render_text_blocks(&self, texts: Vec<Text>) {
// Layout each block of text. After this, we can query the width of each
// block, which will allow us to do more complex layout below.
let mut layouts: Vec<_> = texts.into_iter().map(|t| t.layout(&self.surface)).collect();
// Calculate how much free space we have after laying out all the
// non-stretch blocks. Split the remaining space (if any) between the
// stretch blocks. If there isn't enough space for the non-stretch blocks
// do nothing and allow it to overflow.
{
let mut width = 0.0;
let mut stretched = Vec::new();
for layout in layouts.iter_mut() {
if !layout.stretch() {
width += layout.width();
} else {
stretched.push(layout);
}
}
let remaining_width = self.screen().width_in_pixels() as f64 - width;
let remaining_width = if remaining_width < 0.0 {
0.0
} else {
remaining_width
};
let width_per_stretched = remaining_width / (stretched.len() as f64);
for layout in stretched.iter_mut() {
layout.set_width(width_per_stretched);
}
}
// TODO: Set the height of the window and the height of each text block(?)
// to the height of the largest bit of text.
// Finally, just render each block of text in turn.
let mut x = 0.0;
for layout in &layouts {
layout.render(x, 0.0);
x += layout.width();
}
}
}
Remove unncessary block.
use std::rc::Rc;
use xcb;
use cairo::{self, XCBSurface};
use cairo_sys;
use text::Text;
fn get_root_visual_type(conn: &xcb::Connection, screen: &xcb::Screen) -> xcb::Visualtype {
for root in conn.get_setup().roots() {
for allowed_depth in root.allowed_depths() {
for visual in allowed_depth.visuals() {
if visual.visual_id() == screen.root_visual() {
return visual;
}
}
}
}
panic!("No visual type found");
}
// TODO: impl Drop?
pub struct Window {
conn: Rc<xcb::Connection>,
screen_idx: usize,
surface: cairo::Surface,
}
impl Window {
pub fn new(conn: Rc<xcb::Connection>, screen_idx: usize) -> Window {
let id = conn.generate_id();
let surface = {
let screen = conn.get_setup()
.roots()
.nth(screen_idx)
.expect("invalid screen");
let values = [(xcb::CW_BACK_PIXEL, screen.black_pixel()),
(xcb::CW_EVENT_MASK, xcb::EVENT_MASK_EXPOSURE)];
let (width, height) = (screen.width_in_pixels(), 100);
xcb::create_window(&conn,
xcb::COPY_FROM_PARENT as u8,
id,
screen.root(),
0,
0,
width,
height,
10,
xcb::WINDOW_CLASS_INPUT_OUTPUT as u16,
screen.root_visual(),
&values);
unsafe {
let cairo_conn = cairo::XCBConnection::from_raw_none(conn.get_raw_conn() as
*mut cairo_sys::xcb_connection_t);
let visual =
cairo::XCBVisualType::from_raw_none(&mut get_root_visual_type(&conn, &screen).base as
*mut xcb::ffi::xcb_visualtype_t as
*mut cairo_sys::xcb_visualtype_t);
let drawable = cairo::XCBDrawable(id);
cairo::Surface::create(&cairo_conn, &drawable, &visual, width as i32, height as i32)
// TODO: Update surface width/height when window size changes.
}
};
xcb::map_window(&conn, id);
conn.flush();
Window {
conn,
screen_idx,
surface,
}
}
fn screen<'a>(&'a self) -> xcb::Screen<'a> {
self.conn
.get_setup()
.roots()
.nth(self.screen_idx)
.expect("Invalid screen")
}
pub fn expose(&self, texts: Vec<Text>) {
self.render_text_blocks(texts);
self.conn.flush();
}
fn render_text_blocks(&self, texts: Vec<Text>) {
// Layout each block of text. After this, we can query the width of each
// block, which will allow us to do more complex layout below.
let mut layouts: Vec<_> = texts.into_iter().map(|t| t.layout(&self.surface)).collect();
// Calculate how much free space we have after laying out all the
// non-stretch blocks. Split the remaining space (if any) between the
// stretch blocks. If there isn't enough space for the non-stretch blocks
// do nothing and allow it to overflow.
{
let mut width = 0.0;
let mut stretched = Vec::new();
for layout in layouts.iter_mut() {
if !layout.stretch() {
width += layout.width();
} else {
stretched.push(layout);
}
}
let remaining_width = self.screen().width_in_pixels() as f64 - width;
let remaining_width = if remaining_width < 0.0 {
0.0
} else {
remaining_width
};
let width_per_stretched = remaining_width / (stretched.len() as f64);
for layout in stretched.iter_mut() {
layout.set_width(width_per_stretched);
}
}
// TODO: Set the height of the window and the height of each text block(?)
// to the height of the largest bit of text.
// Finally, just render each block of text in turn.
let mut x = 0.0;
for layout in &layouts {
layout.render(x, 0.0);
x += layout.width();
}
}
}
|
use compression;
use types::ZipFile;
use spec;
use writer_spec;
use crc32;
use std::default::Default;
use std::io;
use std::io::{IoResult, IoError};
use std::mem;
use time;
use flate2;
use flate2::FlateWriter;
use flate2::writer::DeflateEncoder;
enum GenericZipWriter<W>
{
Closed,
Storer(W),
Deflater(DeflateEncoder<W>),
}
/// Generator for ZIP files.
///
/// ```
/// fn doit() -> std::io::IoResult<()>
/// {
/// // For this example we write to a buffer, but normally you should use a File
/// let mut buf = [0u8, ..65536];
/// let w = std::io::BufWriter::new(&mut buf);
/// let mut zip = zip::ZipWriter::new(w);
///
/// try!(zip.start_file("hello_world.txt", zip::compression::Stored));
/// try!(zip.write(b"Hello, World!"));
///
/// // Optionally finish the zip. (this is also done on drop)
/// try!(zip.finish());
///
/// Ok(())
/// }
///
/// println!("Result: {}", doit());
/// ```
pub struct ZipWriter<W>
{
inner: GenericZipWriter<W>,
files: Vec<ZipFile>,
stats: ZipWriterStats,
}
#[deriving(Default)]
struct ZipWriterStats
{
crc32: u32,
start: u64,
bytes_written: u64,
}
fn writer_closed_error<T>() -> IoResult<T>
{
Err(IoError { kind: io::Closed, desc: "This writer has been closed", detail: None })
}
impl<W: Writer+Seek> Writer for ZipWriter<W>
{
fn write(&mut self, buf: &[u8]) -> IoResult<()>
{
if self.files.len() == 0 { return Err(IoError { kind: io::OtherIoError, desc: "No file has been started", detail: None, }) }
self.stats.update(buf);
match self.inner
{
Storer(ref mut w) => w.write(buf),
Deflater(ref mut w) => w.write(buf),
Closed => writer_closed_error(),
}
}
}
impl ZipWriterStats
{
fn update(&mut self, buf: &[u8])
{
self.crc32 = crc32::update(self.crc32, buf);
self.bytes_written += buf.len() as u64;
}
}
impl<W: Writer+Seek> ZipWriter<W>
{
/// Initializes the ZipWriter.
///
/// Before writing to this object, the start_file command should be called.
pub fn new(inner: W) -> ZipWriter<W>
{
ZipWriter
{
inner: Storer(inner),
files: Vec::new(),
stats: Default::default(),
}
}
/// Start a new file for with the requested compression method.
pub fn start_file(&mut self, name: &str, compression: compression::CompressionMethod) -> IoResult<()>
{
try!(self.finish_file());
{
let writer = self.inner.get_plain();
let header_start = try!(writer.tell());
let mut file = ZipFile
{
encrypted: false,
compression_method: compression,
last_modified_time: time::now(),
crc32: 0,
compressed_size: 0,
uncompressed_size: 0,
file_name: String::from_str(name),
file_comment: String::new(),
header_start: header_start,
data_start: 0,
};
try!(writer_spec::write_local_file_header(writer, &file));
let header_end = try!(writer.tell());
self.stats.start = header_end;
file.data_start = header_end;
self.stats.bytes_written = 0;
self.stats.crc32 = 0;
self.files.push(file);
}
try!(self.inner.switch_to(compression));
Ok(())
}
fn finish_file(&mut self) -> IoResult<()>
{
try!(self.inner.switch_to(compression::Stored));
let writer = self.inner.get_plain();
let file = match self.files.last_mut()
{
None => return Ok(()),
Some(f) => f,
};
file.crc32 = self.stats.crc32;
file.uncompressed_size = self.stats.bytes_written;
file.compressed_size = try!(writer.tell()) - self.stats.start;
try!(writer_spec::update_local_file_header(writer, file));
try!(writer.seek(0, io::SeekEnd));
Ok(())
}
/// Finish the last file and write all other zip-structures
///
/// This will return the writer, but one should normally not append any data to the end of the file.
/// Note that the zipfile will also be finished on drop.
pub fn finish(mut self) -> IoResult<W>
{
try!(self.finalize());
let inner = mem::replace(&mut self.inner, Closed);
Ok(inner.unwrap())
}
fn finalize(&mut self) -> IoResult<()>
{
try!(self.finish_file());
{
let writer = self.inner.get_plain();
let central_start = try!(writer.tell());
for file in self.files.iter()
{
try!(writer_spec::write_central_directory_header(writer, file));
}
let central_size = try!(writer.tell()) - central_start;
let footer = spec::CentralDirectoryEnd
{
disk_number: 0,
disk_with_central_directory: 0,
number_of_files_on_this_disk: self.files.len() as u16,
number_of_files: self.files.len() as u16,
central_directory_size: central_size as u32,
central_directory_offset: central_start as u32,
zip_file_comment: b"zip-rs".to_vec(),
};
try!(footer.write(writer));
}
Ok(())
}
}
#[unsafe_destructor]
impl<W: Writer+Seek> Drop for ZipWriter<W>
{
fn drop(&mut self)
{
if !self.inner.is_closed()
{
match self.finalize()
{
Ok(_) => {},
Err(e) => warn!("ZipWriter drop failed: {}", e),
}
}
}
}
impl<W: Writer+Seek> GenericZipWriter<W>
{
fn switch_to(&mut self, compression: compression::CompressionMethod) -> IoResult<()>
{
let bare = match mem::replace(self, Closed)
{
Storer(w) => w,
Deflater(w) => try!(w.finish()),
Closed => return writer_closed_error(),
};
*self = match compression
{
compression::Stored => Storer(bare),
compression::Deflated => Deflater(bare.deflate_encode(flate2::Default)),
_ => return Err(IoError { kind: io::OtherIoError, desc: "Unsupported compression requested", detail: None }),
};
Ok(())
}
fn is_closed(&self) -> bool
{
match *self
{
Closed => true,
_ => false,
}
}
fn get_plain(&mut self) -> &mut W
{
match *self
{
Storer(ref mut w) => w,
_ => fail!("Should have switched to stored beforehand"),
}
}
fn unwrap(self) -> W
{
match self
{
Storer(w) => w,
_ => fail!("Should have switched to stored beforehand"),
}
}
}
fail -> panic
use compression;
use types::ZipFile;
use spec;
use writer_spec;
use crc32;
use std::default::Default;
use std::io;
use std::io::{IoResult, IoError};
use std::mem;
use time;
use flate2;
use flate2::FlateWriter;
use flate2::writer::DeflateEncoder;
enum GenericZipWriter<W>
{
Closed,
Storer(W),
Deflater(DeflateEncoder<W>),
}
/// Generator for ZIP files.
///
/// ```
/// fn doit() -> std::io::IoResult<()>
/// {
/// // For this example we write to a buffer, but normally you should use a File
/// let mut buf = [0u8, ..65536];
/// let w = std::io::BufWriter::new(&mut buf);
/// let mut zip = zip::ZipWriter::new(w);
///
/// try!(zip.start_file("hello_world.txt", zip::compression::Stored));
/// try!(zip.write(b"Hello, World!"));
///
/// // Optionally finish the zip. (this is also done on drop)
/// try!(zip.finish());
///
/// Ok(())
/// }
///
/// println!("Result: {}", doit());
/// ```
pub struct ZipWriter<W>
{
inner: GenericZipWriter<W>,
files: Vec<ZipFile>,
stats: ZipWriterStats,
}
#[deriving(Default)]
struct ZipWriterStats
{
crc32: u32,
start: u64,
bytes_written: u64,
}
fn writer_closed_error<T>() -> IoResult<T>
{
Err(IoError { kind: io::Closed, desc: "This writer has been closed", detail: None })
}
impl<W: Writer+Seek> Writer for ZipWriter<W>
{
fn write(&mut self, buf: &[u8]) -> IoResult<()>
{
if self.files.len() == 0 { return Err(IoError { kind: io::OtherIoError, desc: "No file has been started", detail: None, }) }
self.stats.update(buf);
match self.inner
{
Storer(ref mut w) => w.write(buf),
Deflater(ref mut w) => w.write(buf),
Closed => writer_closed_error(),
}
}
}
impl ZipWriterStats
{
fn update(&mut self, buf: &[u8])
{
self.crc32 = crc32::update(self.crc32, buf);
self.bytes_written += buf.len() as u64;
}
}
impl<W: Writer+Seek> ZipWriter<W>
{
/// Initializes the ZipWriter.
///
/// Before writing to this object, the start_file command should be called.
pub fn new(inner: W) -> ZipWriter<W>
{
ZipWriter
{
inner: Storer(inner),
files: Vec::new(),
stats: Default::default(),
}
}
/// Start a new file for with the requested compression method.
pub fn start_file(&mut self, name: &str, compression: compression::CompressionMethod) -> IoResult<()>
{
try!(self.finish_file());
{
let writer = self.inner.get_plain();
let header_start = try!(writer.tell());
let mut file = ZipFile
{
encrypted: false,
compression_method: compression,
last_modified_time: time::now(),
crc32: 0,
compressed_size: 0,
uncompressed_size: 0,
file_name: String::from_str(name),
file_comment: String::new(),
header_start: header_start,
data_start: 0,
};
try!(writer_spec::write_local_file_header(writer, &file));
let header_end = try!(writer.tell());
self.stats.start = header_end;
file.data_start = header_end;
self.stats.bytes_written = 0;
self.stats.crc32 = 0;
self.files.push(file);
}
try!(self.inner.switch_to(compression));
Ok(())
}
fn finish_file(&mut self) -> IoResult<()>
{
try!(self.inner.switch_to(compression::Stored));
let writer = self.inner.get_plain();
let file = match self.files.last_mut()
{
None => return Ok(()),
Some(f) => f,
};
file.crc32 = self.stats.crc32;
file.uncompressed_size = self.stats.bytes_written;
file.compressed_size = try!(writer.tell()) - self.stats.start;
try!(writer_spec::update_local_file_header(writer, file));
try!(writer.seek(0, io::SeekEnd));
Ok(())
}
/// Finish the last file and write all other zip-structures
///
/// This will return the writer, but one should normally not append any data to the end of the file.
/// Note that the zipfile will also be finished on drop.
pub fn finish(mut self) -> IoResult<W>
{
try!(self.finalize());
let inner = mem::replace(&mut self.inner, Closed);
Ok(inner.unwrap())
}
fn finalize(&mut self) -> IoResult<()>
{
try!(self.finish_file());
{
let writer = self.inner.get_plain();
let central_start = try!(writer.tell());
for file in self.files.iter()
{
try!(writer_spec::write_central_directory_header(writer, file));
}
let central_size = try!(writer.tell()) - central_start;
let footer = spec::CentralDirectoryEnd
{
disk_number: 0,
disk_with_central_directory: 0,
number_of_files_on_this_disk: self.files.len() as u16,
number_of_files: self.files.len() as u16,
central_directory_size: central_size as u32,
central_directory_offset: central_start as u32,
zip_file_comment: b"zip-rs".to_vec(),
};
try!(footer.write(writer));
}
Ok(())
}
}
#[unsafe_destructor]
impl<W: Writer+Seek> Drop for ZipWriter<W>
{
fn drop(&mut self)
{
if !self.inner.is_closed()
{
match self.finalize()
{
Ok(_) => {},
Err(e) => warn!("ZipWriter drop failed: {}", e),
}
}
}
}
impl<W: Writer+Seek> GenericZipWriter<W>
{
fn switch_to(&mut self, compression: compression::CompressionMethod) -> IoResult<()>
{
let bare = match mem::replace(self, Closed)
{
Storer(w) => w,
Deflater(w) => try!(w.finish()),
Closed => return writer_closed_error(),
};
*self = match compression
{
compression::Stored => Storer(bare),
compression::Deflated => Deflater(bare.deflate_encode(flate2::Default)),
_ => return Err(IoError { kind: io::OtherIoError, desc: "Unsupported compression requested", detail: None }),
};
Ok(())
}
fn is_closed(&self) -> bool
{
match *self
{
Closed => true,
_ => false,
}
}
fn get_plain(&mut self) -> &mut W
{
match *self
{
Storer(ref mut w) => w,
_ => panic!("Should have switched to stored beforehand"),
}
}
fn unwrap(self) -> W
{
match self
{
Storer(w) => w,
_ => panic!("Should have switched to stored beforehand"),
}
}
}
|
//! Formats a DOM structure to a Writer
//!
//! ### Example
//! ```
//! use document::Package;
//! use document::writer::format_document;
//!
//! let package = Package::new();
//! let doc = package.as_document();
//!
//! let hello = doc.create_element("hello");
//! hello.set_attribute_value("planet", "Earth");
//! doc.root().append_child(hello);
//!
//! let mut output = std::io::stdio::stdout();
//! format_document(&doc, &mut output).ok().expect("unable to output XML");
//! ```
//!
//! ### Known issues
//!
//! Output is not escaped in any way,
//! it's very easy to create malformed XML!
//!
//! ### Potential options to support
//!
//! - Space before `/>`
//! - Single vs double quotes
//! - Fixed ordering of attributes
use std::num::Int;
use std::collections::HashMap;
use std::slice;
use std::io::IoResult;
use self::Content::*;
use super::QName;
use super::dom4;
use super::dom4::ChildOfElement::*;
use super::dom4::ChildOfRoot::*;
// TODO: Duplicating the String seems inefficient...
struct PrefixScope<'d> {
ns_to_prefix: HashMap<&'d str, String>,
prefix_to_ns: HashMap<String, &'d str>,
defined_prefixes: Vec<(String, &'d str)>,
}
impl<'d> PrefixScope<'d> {
fn new() -> PrefixScope<'d> {
PrefixScope {
ns_to_prefix: HashMap::new(),
prefix_to_ns: HashMap::new(),
defined_prefixes: Vec::new(),
}
}
fn has_prefix(&self, prefix: &str) -> bool {
self.prefix_to_ns.contains_key(prefix)
}
fn has_namespace_uri(&self, namespace_uri: &str) -> bool {
self.ns_to_prefix.contains_key(namespace_uri)
}
fn prefix_is(&self, prefix: &str, namespace_uri: &str) -> bool {
match self.prefix_to_ns.get(prefix) {
Some(ns) => *ns == namespace_uri,
_ => false,
}
}
fn prefix_for(&self, namespace_uri: &str) -> Option<&str> {
self.ns_to_prefix.get(namespace_uri).map(|p| p.as_slice())
}
fn add_mapping(&mut self, prefix: &str, namespace_uri: &'d str) {
let prefix = String::from_str(prefix);
self.prefix_to_ns.insert(prefix.clone(), namespace_uri);
self.ns_to_prefix.insert(namespace_uri, prefix);
}
fn define_prefix(&mut self, prefix: String, namespace_uri: &'d str) {
self.defined_prefixes.push((prefix, namespace_uri));
}
}
struct PrefixMapping<'d> {
scopes: Vec<PrefixScope<'d>>,
generated_prefix_count: uint,
}
impl<'d> PrefixMapping<'d> {
fn new() -> PrefixMapping<'d> {
PrefixMapping {
scopes: vec![PrefixScope::new()],
generated_prefix_count: 0,
}
}
fn push_scope(&mut self) {
self.scopes.push(PrefixScope::new());
}
fn pop_scope(&mut self) {
self.scopes.pop();
}
fn prefixes_in_current_scope(&self) -> slice::Items<(String, &'d str)> {
self.scopes.last().unwrap().defined_prefixes.iter()
}
fn populate_scope(&mut self, element: &dom4::Element<'d>, attributes: &[dom4::Attribute<'d>]) {
if let Some(prefix) = element.preferred_prefix() {
let name = element.name();
if let Some(uri) = name.namespace_uri {
self.set_prefix(prefix, uri);
}
}
for attribute in attributes.iter() {
if let Some(prefix) = attribute.preferred_prefix() {
let name = attribute.name();
if let Some(uri) = name.namespace_uri {
self.set_prefix(prefix, uri);
}
}
}
let name = element.name();
if let Some(uri) = name.namespace_uri {
self.generate_prefix(uri);
}
for attribute in attributes.iter() {
let name = attribute.name();
if let Some(uri) = name.namespace_uri {
self.generate_prefix(uri);
}
}
}
fn set_prefix(&mut self, prefix: &str, namespace_uri: &'d str) {
let idx_of_last = self.scopes.len().saturating_sub(1);
let (parents, current_scope) = self.scopes.split_at_mut(idx_of_last);
let current_scope = &mut current_scope[0];
// If we're already using this prefix, we can't redefine it.
if current_scope.has_prefix(prefix) {
return;
}
// We are definitely going to use this prefix, claim it
current_scope.add_mapping(prefix, namespace_uri);
for parent_scope in parents.iter().rev() {
if parent_scope.prefix_is(prefix, namespace_uri) {
// A parent defines it as the URI we want.
// Prevent redefining it
return;
}
}
// Defined by us, must be added to the element
current_scope.define_prefix(String::from_str(prefix), namespace_uri);
}
fn generate_prefix(&mut self, namespace_uri: &'d str) {
let idx_of_last = self.scopes.len().saturating_sub(1);
let (parents, current_scope) = self.scopes.split_at_mut(idx_of_last);
let current_scope = &mut current_scope[0];
if current_scope.has_namespace_uri(namespace_uri) {
// We already map this namespace to *some* prefix
return;
}
// Check if the parent already defined a prefix for this ns
for parent_scope in parents.iter().rev() {
if let Some(prefix) = parent_scope.prefix_for(namespace_uri) {
// A parent happens to have a prefix for this URI.
// Prevent redefining it
current_scope.add_mapping(prefix.as_slice(), namespace_uri);
return;
}
}
loop {
let prefix = format!("autons{}", self.generated_prefix_count);
self.generated_prefix_count += 1;
if ! current_scope.has_prefix(prefix.as_slice()) {
current_scope.add_mapping(prefix.as_slice(), namespace_uri);
current_scope.define_prefix(prefix, namespace_uri);
break;
}
}
}
fn prefix(&self, namespace_uri: &str) -> &str {
for scope in self.scopes.iter().rev() {
if let Some(prefix) = scope.prefix_for(namespace_uri) {
return prefix;
}
}
panic!("No namespace prefix available for {}", namespace_uri);
}
}
enum Content<'d> {
Element(dom4::Element<'d>),
ElementEnd(dom4::Element<'d>),
Text(dom4::Text<'d>),
Comment(dom4::Comment<'d>),
ProcessingInstruction(dom4::ProcessingInstruction<'d>),
}
fn format_qname<'d, W>(q: QName<'d>, mapping: &mut PrefixMapping<'d>, writer: &mut W) -> IoResult<()>
where W: Writer
{
if let Some(namespace_uri) = q.namespace_uri {
let prefix = mapping.prefix(namespace_uri);
try!(writer.write_str(prefix));
try!(writer.write_str(":"));
}
writer.write_str(q.local_part)
}
fn format_element<'d, W>(element: dom4::Element<'d>,
todo: &mut Vec<Content<'d>>,
mapping: &mut PrefixMapping<'d>,
writer: &mut W)
-> IoResult<()>
where W: Writer
{
let attrs = element.attributes();
mapping.populate_scope(&element, attrs.as_slice());
try!(writer.write_str("<"));
try!(format_qname(element.name(), mapping, writer));
for attr in attrs.iter() {
try!(writer.write_str(" "));
try!(format_qname(attr.name(), mapping, writer));
try!(write!(writer, "='{}'", attr.value()));
}
for &(ref prefix, ref ns_uri) in mapping.prefixes_in_current_scope() {
try!(writer.write_str(" xmlns:"));
try!(writer.write_str(prefix.as_slice()));
try!(write!(writer, "='{}'", ns_uri));
}
let mut children = element.children();
if children.is_empty() {
writer.write_str("/>")
} else {
try!(writer.write_str(">"));
todo.push(ElementEnd(element));
children.reverse();
let x = children.into_iter().map(|c| match c {
ElementCOE(element) => Element(element),
TextCOE(t) => Text(t),
CommentCOE(c) => Comment(c),
ProcessingInstructionCOE(p) => ProcessingInstruction(p),
});
todo.extend(x);
Ok(())
}
}
fn format_element_end<'d, W>(element: dom4::Element<'d>,
mapping: &mut PrefixMapping<'d>,
writer: &mut W)
-> IoResult<()>
where W: Writer
{
try!(writer.write_str("</"));
try!(format_qname(element.name(), mapping, writer));
writer.write_str(">")
}
fn format_comment<W>(comment: dom4::Comment, writer: &mut W) -> IoResult<()>
where W: Writer
{
write!(writer, "<!--{}-->", comment.text())
}
fn format_processing_instruction<W>(pi: dom4::ProcessingInstruction, writer: &mut W) -> IoResult<()>
where W: Writer
{
match pi.value() {
None => write!(writer, "<?{}?>", pi.target()),
Some(v) => write!(writer, "<?{} {}?>", pi.target(), v),
}
}
fn format_one<'d, W>(content: Content<'d>,
todo: &mut Vec<Content<'d>>,
mapping: &mut PrefixMapping<'d>,
writer: &mut W)
-> IoResult<()>
where W: Writer
{
match content {
Element(e) => {
mapping.push_scope();
format_element(e, todo, mapping, writer)
},
ElementEnd(e) => {
let r = format_element_end(e, mapping, writer);
mapping.pop_scope();
r
},
Text(t) => writer.write_str(t.text().as_slice()),
Comment(c) => format_comment(c, writer),
ProcessingInstruction(p) => format_processing_instruction(p, writer),
}
}
fn format_body<W>(element: dom4::Element, writer: &mut W) -> IoResult<()>
where W: Writer
{
let mut todo = vec![Element(element)];
let mut mapping = PrefixMapping::new();
while ! todo.is_empty() {
try!(format_one(todo.pop().unwrap(), &mut todo, &mut mapping, writer));
}
Ok(())
}
/// Formats a document into a Writer
pub fn format_document<'d, W>(doc: &'d dom4::Document<'d>, writer: &mut W) -> IoResult<()>
where W: Writer
{
try!(writer.write_str("<?xml version='1.0'?>"));
for child in doc.root().children().into_iter() {
try!(match child {
ElementCOR(e) => format_body(e, writer),
CommentCOR(c) => format_comment(c, writer),
ProcessingInstructionCOR(p) => format_processing_instruction(p, writer),
})
}
Ok(())
}
#[cfg(test)]
mod test {
use std::io::MemWriter;
use super::super::{Package,QName};
use super::super::dom4;
use super::format_document;
macro_rules! assert_str_eq(
($l:expr, $r:expr) => (assert_eq!($l.as_slice(), $r.as_slice()));
)
fn format_xml<'d>(doc: &'d dom4::Document<'d>) -> String {
let mut w = MemWriter::new();
format_document(doc, &mut w).ok().expect("Not formatted");
String::from_utf8(w.into_inner()).ok().expect("Not a string")
}
#[test]
fn top_element() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello/>");
}
#[test]
fn element_with_namespace() {
let p = Package::new();
let d = p.as_document();
let name = QName::with_namespace_uri(Some("namespace"), "local-part");
let e = d.create_element(name);
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><autons0:local-part xmlns:autons0='namespace'/>");
}
#[test]
fn element_with_preferred_namespace_prefix() {
let p = Package::new();
let d = p.as_document();
let name = QName::with_namespace_uri(Some("namespace"), "local-part");
let e = d.create_element(name);
e.set_preferred_prefix(Some("prefix"));
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><prefix:local-part xmlns:prefix='namespace'/>");
}
#[test]
fn element_with_attributes() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
e.set_attribute_value("a", "b");
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello a='b'/>");
}
#[test]
fn attribute_with_namespace() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
let name = QName::with_namespace_uri(Some("namespace"), "a");
e.set_attribute_value(name, "b");
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello autons0:a='b' xmlns:autons0='namespace'/>");
}
#[test]
fn attribute_with_preferred_namespace_prefix() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
let name = QName::with_namespace_uri(Some("namespace"), "a");
let a = e.set_attribute_value(name, "b");
a.set_preferred_prefix(Some("p"));
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello p:a='b' xmlns:p='namespace'/>");
}
#[test]
fn attributes_with_conflicting_preferred_namespace_prefixes() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
let name = QName::with_namespace_uri(Some("namespace1"), "a1");
let a = e.set_attribute_value(name, "b1");
a.set_preferred_prefix(Some("p"));
let name = QName::with_namespace_uri(Some("namespace2"), "a2");
let a = e.set_attribute_value(name, "b2");
a.set_preferred_prefix(Some("p"));
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello p:a1='b1' autons0:a2='b2' xmlns:p='namespace1' xmlns:autons0='namespace2'/>");
}
#[test]
fn nested_element() {
let p = Package::new();
let d = p.as_document();
let hello = d.create_element("hello");
let world = d.create_element("world");
hello.append_child(world);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello><world/></hello>");
}
#[test]
fn nested_element_with_namespaces() {
let p = Package::new();
let d = p.as_document();
let outer_name = QName::with_namespace_uri(Some("outer"), "hello");
let inner_name = QName::with_namespace_uri(Some("inner"), "world");
let hello = d.create_element(outer_name);
let world = d.create_element(inner_name);
hello.append_child(world);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><autons0:hello xmlns:autons0='outer'><autons1:world xmlns:autons1='inner'/></autons0:hello>");
}
#[test]
fn nested_element_with_namespaces_with_reused_namespaces() {
let p = Package::new();
let d = p.as_document();
let outer_name = QName::with_namespace_uri(Some("ns"), "hello");
let inner_name = QName::with_namespace_uri(Some("ns"), "world");
let hello = d.create_element(outer_name);
let world = d.create_element(inner_name);
hello.append_child(world);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><autons0:hello xmlns:autons0='ns'><autons0:world/></autons0:hello>");
}
#[test]
fn nested_element_with_with_conflicting_preferred_namespace_prefixes() {
let p = Package::new();
let d = p.as_document();
let outer_name = QName::with_namespace_uri(Some("outer"), "hello");
let inner_name = QName::with_namespace_uri(Some("inner"), "world");
let hello = d.create_element(outer_name);
let world = d.create_element(inner_name);
hello.set_preferred_prefix(Some("p"));
world.set_preferred_prefix(Some("p"));
hello.append_child(world);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><p:hello xmlns:p='outer'><p:world xmlns:p='inner'/></p:hello>");
}
#[test]
fn nested_text() {
let p = Package::new();
let d = p.as_document();
let hello = d.create_element("hello");
let text = d.create_text("A fine day to you!");
hello.append_child(text);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello>A fine day to you!</hello>");
}
#[test]
fn nested_comment() {
let p = Package::new();
let d = p.as_document();
let hello = d.create_element("hello");
let comment = d.create_comment(" Fill this in ");
hello.append_child(comment);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello><!-- Fill this in --></hello>");
}
#[test]
fn nested_processing_instruction_without_value() {
let p = Package::new();
let d = p.as_document();
let hello = d.create_element("hello");
let pi = d.create_processing_instruction("display", None);
hello.append_child(pi);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello><?display?></hello>");
}
#[test]
fn nested_processing_instruction_with_value() {
let p = Package::new();
let d = p.as_document();
let hello = d.create_element("hello");
let pi = d.create_processing_instruction("display", Some("screen"));
hello.append_child(pi);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello><?display screen?></hello>");
}
#[test]
fn top_level_comment() {
let p = Package::new();
let d = p.as_document();
let comment = d.create_comment(" Fill this in ");
d.root().append_child(comment);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><!-- Fill this in -->");
}
#[test]
fn top_level_processing_instruction() {
let p = Package::new();
let d = p.as_document();
let pi = d.create_processing_instruction("display", None);
d.root().append_child(pi);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><?display?>");
}
}
Multiple preferred prefixes can refer to the same namespace
//! Formats a DOM structure to a Writer
//!
//! ### Example
//! ```
//! use document::Package;
//! use document::writer::format_document;
//!
//! let package = Package::new();
//! let doc = package.as_document();
//!
//! let hello = doc.create_element("hello");
//! hello.set_attribute_value("planet", "Earth");
//! doc.root().append_child(hello);
//!
//! let mut output = std::io::stdio::stdout();
//! format_document(&doc, &mut output).ok().expect("unable to output XML");
//! ```
//!
//! ### Known issues
//!
//! Output is not escaped in any way,
//! it's very easy to create malformed XML!
//!
//! ### Potential options to support
//!
//! - Space before `/>`
//! - Single vs double quotes
//! - Fixed ordering of attributes
use std::num::Int;
use std::collections::HashMap;
use std::slice;
use std::io::IoResult;
use self::Content::*;
use super::QName;
use super::dom4;
use super::dom4::ChildOfElement::*;
use super::dom4::ChildOfRoot::*;
// TODO: Duplicating the String seems inefficient...
struct PrefixScope<'d> {
ns_to_prefix: HashMap<&'d str, String>,
prefix_to_ns: HashMap<String, &'d str>,
defined_prefixes: Vec<(String, &'d str)>,
}
impl<'d> PrefixScope<'d> {
fn new() -> PrefixScope<'d> {
PrefixScope {
ns_to_prefix: HashMap::new(),
prefix_to_ns: HashMap::new(),
defined_prefixes: Vec::new(),
}
}
fn has_prefix(&self, prefix: &str) -> bool {
self.prefix_to_ns.contains_key(prefix)
}
fn has_namespace_uri(&self, namespace_uri: &str) -> bool {
self.ns_to_prefix.contains_key(namespace_uri)
}
fn prefix_is(&self, prefix: &str, namespace_uri: &str) -> bool {
match self.prefix_to_ns.get(prefix) {
Some(ns) => *ns == namespace_uri,
_ => false,
}
}
fn prefix_for(&self, namespace_uri: &str) -> Option<&str> {
self.ns_to_prefix.get(namespace_uri).map(|p| p.as_slice())
}
fn add_mapping(&mut self, prefix: &str, namespace_uri: &'d str) {
let prefix = String::from_str(prefix);
self.prefix_to_ns.insert(prefix.clone(), namespace_uri);
self.ns_to_prefix.insert(namespace_uri, prefix);
}
fn define_prefix(&mut self, prefix: String, namespace_uri: &'d str) {
self.defined_prefixes.push((prefix, namespace_uri));
}
}
struct PrefixMapping<'d> {
scopes: Vec<PrefixScope<'d>>,
generated_prefix_count: uint,
}
impl<'d> PrefixMapping<'d> {
fn new() -> PrefixMapping<'d> {
PrefixMapping {
scopes: vec![PrefixScope::new()],
generated_prefix_count: 0,
}
}
fn push_scope(&mut self) {
self.scopes.push(PrefixScope::new());
}
fn pop_scope(&mut self) {
self.scopes.pop();
}
fn prefixes_in_current_scope(&self) -> slice::Items<(String, &'d str)> {
self.scopes.last().unwrap().defined_prefixes.iter()
}
fn populate_scope(&mut self, element: &dom4::Element<'d>, attributes: &[dom4::Attribute<'d>]) {
if let Some(prefix) = element.preferred_prefix() {
let name = element.name();
if let Some(uri) = name.namespace_uri {
self.set_prefix(prefix, uri);
}
}
for attribute in attributes.iter() {
if let Some(prefix) = attribute.preferred_prefix() {
let name = attribute.name();
if let Some(uri) = name.namespace_uri {
self.set_prefix(prefix, uri);
}
}
}
let name = element.name();
if let Some(uri) = name.namespace_uri {
self.generate_prefix(uri);
}
for attribute in attributes.iter() {
let name = attribute.name();
if let Some(uri) = name.namespace_uri {
self.generate_prefix(uri);
}
}
}
fn set_prefix(&mut self, prefix: &str, namespace_uri: &'d str) {
let idx_of_last = self.scopes.len().saturating_sub(1);
let (parents, current_scope) = self.scopes.split_at_mut(idx_of_last);
let current_scope = &mut current_scope[0];
// If we're already using this prefix, we can't redefine it.
if current_scope.has_prefix(prefix) {
return;
}
// We are definitely going to use this prefix, claim it
current_scope.add_mapping(prefix, namespace_uri);
for parent_scope in parents.iter().rev() {
if parent_scope.prefix_is(prefix, namespace_uri) {
// A parent defines it as the URI we want.
// Prevent redefining it
return;
}
}
// Defined by us, must be added to the element
current_scope.define_prefix(String::from_str(prefix), namespace_uri);
}
fn generate_prefix(&mut self, namespace_uri: &'d str) {
let idx_of_last = self.scopes.len().saturating_sub(1);
let (parents, current_scope) = self.scopes.split_at_mut(idx_of_last);
let current_scope = &mut current_scope[0];
if current_scope.has_namespace_uri(namespace_uri) {
// We already map this namespace to *some* prefix
return;
}
// Check if the parent already defined a prefix for this ns
for parent_scope in parents.iter().rev() {
if let Some(prefix) = parent_scope.prefix_for(namespace_uri) {
// A parent happens to have a prefix for this URI.
// Prevent redefining it
current_scope.add_mapping(prefix.as_slice(), namespace_uri);
return;
}
}
loop {
let prefix = format!("autons{}", self.generated_prefix_count);
self.generated_prefix_count += 1;
if ! current_scope.has_prefix(prefix.as_slice()) {
current_scope.add_mapping(prefix.as_slice(), namespace_uri);
current_scope.define_prefix(prefix, namespace_uri);
break;
}
}
}
fn prefix<'a : 'c, 'b : 'c, 'c>(&'a self, preferred_prefix: Option<&'b str>, namespace_uri: &str) -> &'c str {
if let Some(prefix) = preferred_prefix {
let scope = self.scopes.last().unwrap();
if scope.prefix_is(prefix, namespace_uri) {
return prefix;
}
}
for scope in self.scopes.iter().rev() {
if let Some(prefix) = scope.prefix_for(namespace_uri) {
return prefix;
}
}
panic!("No namespace prefix available for {}", namespace_uri);
}
}
enum Content<'d> {
Element(dom4::Element<'d>),
ElementEnd(dom4::Element<'d>),
Text(dom4::Text<'d>),
Comment(dom4::Comment<'d>),
ProcessingInstruction(dom4::ProcessingInstruction<'d>),
}
fn format_qname<'d, W>(q: QName<'d>,
mapping: &mut PrefixMapping<'d>,
preferred_prefix: Option<&str>,
writer: &mut W)
-> IoResult<()>
where W: Writer
{
if let Some(namespace_uri) = q.namespace_uri {
let prefix = mapping.prefix(preferred_prefix, namespace_uri);
try!(writer.write_str(prefix));
try!(writer.write_str(":"));
}
writer.write_str(q.local_part)
}
fn format_element<'d, W>(element: dom4::Element<'d>,
todo: &mut Vec<Content<'d>>,
mapping: &mut PrefixMapping<'d>,
writer: &mut W)
-> IoResult<()>
where W: Writer
{
let attrs = element.attributes();
mapping.populate_scope(&element, attrs.as_slice());
try!(writer.write_str("<"));
try!(format_qname(element.name(), mapping, element.preferred_prefix(), writer));
for attr in attrs.iter() {
try!(writer.write_str(" "));
try!(format_qname(attr.name(), mapping, attr.preferred_prefix(), writer));
try!(write!(writer, "='{}'", attr.value()));
}
for &(ref prefix, ref ns_uri) in mapping.prefixes_in_current_scope() {
try!(writer.write_str(" xmlns:"));
try!(writer.write_str(prefix.as_slice()));
try!(write!(writer, "='{}'", ns_uri));
}
let mut children = element.children();
if children.is_empty() {
writer.write_str("/>")
} else {
try!(writer.write_str(">"));
todo.push(ElementEnd(element));
children.reverse();
let x = children.into_iter().map(|c| match c {
ElementCOE(element) => Element(element),
TextCOE(t) => Text(t),
CommentCOE(c) => Comment(c),
ProcessingInstructionCOE(p) => ProcessingInstruction(p),
});
todo.extend(x);
Ok(())
}
}
fn format_element_end<'d, W>(element: dom4::Element<'d>,
mapping: &mut PrefixMapping<'d>,
writer: &mut W)
-> IoResult<()>
where W: Writer
{
try!(writer.write_str("</"));
try!(format_qname(element.name(), mapping, element.preferred_prefix(), writer));
writer.write_str(">")
}
fn format_comment<W>(comment: dom4::Comment, writer: &mut W) -> IoResult<()>
where W: Writer
{
write!(writer, "<!--{}-->", comment.text())
}
fn format_processing_instruction<W>(pi: dom4::ProcessingInstruction, writer: &mut W) -> IoResult<()>
where W: Writer
{
match pi.value() {
None => write!(writer, "<?{}?>", pi.target()),
Some(v) => write!(writer, "<?{} {}?>", pi.target(), v),
}
}
fn format_one<'d, W>(content: Content<'d>,
todo: &mut Vec<Content<'d>>,
mapping: &mut PrefixMapping<'d>,
writer: &mut W)
-> IoResult<()>
where W: Writer
{
match content {
Element(e) => {
mapping.push_scope();
format_element(e, todo, mapping, writer)
},
ElementEnd(e) => {
let r = format_element_end(e, mapping, writer);
mapping.pop_scope();
r
},
Text(t) => writer.write_str(t.text().as_slice()),
Comment(c) => format_comment(c, writer),
ProcessingInstruction(p) => format_processing_instruction(p, writer),
}
}
fn format_body<W>(element: dom4::Element, writer: &mut W) -> IoResult<()>
where W: Writer
{
let mut todo = vec![Element(element)];
let mut mapping = PrefixMapping::new();
while ! todo.is_empty() {
try!(format_one(todo.pop().unwrap(), &mut todo, &mut mapping, writer));
}
Ok(())
}
/// Formats a document into a Writer
pub fn format_document<'d, W>(doc: &'d dom4::Document<'d>, writer: &mut W) -> IoResult<()>
where W: Writer
{
try!(writer.write_str("<?xml version='1.0'?>"));
for child in doc.root().children().into_iter() {
try!(match child {
ElementCOR(e) => format_body(e, writer),
CommentCOR(c) => format_comment(c, writer),
ProcessingInstructionCOR(p) => format_processing_instruction(p, writer),
})
}
Ok(())
}
#[cfg(test)]
mod test {
use std::io::MemWriter;
use super::super::{Package,QName};
use super::super::dom4;
use super::format_document;
macro_rules! assert_str_eq(
($l:expr, $r:expr) => (assert_eq!($l.as_slice(), $r.as_slice()));
)
fn format_xml<'d>(doc: &'d dom4::Document<'d>) -> String {
let mut w = MemWriter::new();
format_document(doc, &mut w).ok().expect("Not formatted");
String::from_utf8(w.into_inner()).ok().expect("Not a string")
}
#[test]
fn top_element() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello/>");
}
#[test]
fn element_with_namespace() {
let p = Package::new();
let d = p.as_document();
let name = QName::with_namespace_uri(Some("namespace"), "local-part");
let e = d.create_element(name);
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><autons0:local-part xmlns:autons0='namespace'/>");
}
#[test]
fn element_with_preferred_namespace_prefix() {
let p = Package::new();
let d = p.as_document();
let name = QName::with_namespace_uri(Some("namespace"), "local-part");
let e = d.create_element(name);
e.set_preferred_prefix(Some("prefix"));
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><prefix:local-part xmlns:prefix='namespace'/>");
}
#[test]
fn element_with_attributes() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
e.set_attribute_value("a", "b");
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello a='b'/>");
}
#[test]
fn attribute_with_namespace() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
let name = QName::with_namespace_uri(Some("namespace"), "a");
e.set_attribute_value(name, "b");
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello autons0:a='b' xmlns:autons0='namespace'/>");
}
#[test]
fn attribute_with_preferred_namespace_prefix() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
let name = QName::with_namespace_uri(Some("namespace"), "a");
let a = e.set_attribute_value(name, "b");
a.set_preferred_prefix(Some("p"));
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello p:a='b' xmlns:p='namespace'/>");
}
#[test]
fn attributes_with_conflicting_preferred_namespace_prefixes() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
let name = QName::with_namespace_uri(Some("namespace1"), "a1");
let a = e.set_attribute_value(name, "b1");
a.set_preferred_prefix(Some("p"));
let name = QName::with_namespace_uri(Some("namespace2"), "a2");
let a = e.set_attribute_value(name, "b2");
a.set_preferred_prefix(Some("p"));
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello p:a1='b1' autons0:a2='b2' xmlns:p='namespace1' xmlns:autons0='namespace2'/>");
}
#[test]
fn attributes_with_different_preferred_namespace_prefixes_for_same_namespace() {
let p = Package::new();
let d = p.as_document();
let e = d.create_element("hello");
let name = QName::with_namespace_uri(Some("namespace"), "a1");
let a = e.set_attribute_value(name, "b1");
a.set_preferred_prefix(Some("p1"));
let name = QName::with_namespace_uri(Some("namespace"), "a2");
let a = e.set_attribute_value(name, "b2");
a.set_preferred_prefix(Some("p2"));
d.root().append_child(e);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello p1:a1='b1' p2:a2='b2' xmlns:p1='namespace' xmlns:p2='namespace'/>");
}
#[test]
fn nested_element() {
let p = Package::new();
let d = p.as_document();
let hello = d.create_element("hello");
let world = d.create_element("world");
hello.append_child(world);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello><world/></hello>");
}
#[test]
fn nested_element_with_namespaces() {
let p = Package::new();
let d = p.as_document();
let outer_name = QName::with_namespace_uri(Some("outer"), "hello");
let inner_name = QName::with_namespace_uri(Some("inner"), "world");
let hello = d.create_element(outer_name);
let world = d.create_element(inner_name);
hello.append_child(world);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><autons0:hello xmlns:autons0='outer'><autons1:world xmlns:autons1='inner'/></autons0:hello>");
}
#[test]
fn nested_element_with_namespaces_with_reused_namespaces() {
let p = Package::new();
let d = p.as_document();
let outer_name = QName::with_namespace_uri(Some("ns"), "hello");
let inner_name = QName::with_namespace_uri(Some("ns"), "world");
let hello = d.create_element(outer_name);
let world = d.create_element(inner_name);
hello.append_child(world);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><autons0:hello xmlns:autons0='ns'><autons0:world/></autons0:hello>");
}
#[test]
fn nested_element_with_with_conflicting_preferred_namespace_prefixes() {
let p = Package::new();
let d = p.as_document();
let outer_name = QName::with_namespace_uri(Some("outer"), "hello");
let inner_name = QName::with_namespace_uri(Some("inner"), "world");
let hello = d.create_element(outer_name);
let world = d.create_element(inner_name);
hello.set_preferred_prefix(Some("p"));
world.set_preferred_prefix(Some("p"));
hello.append_child(world);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><p:hello xmlns:p='outer'><p:world xmlns:p='inner'/></p:hello>");
}
#[test]
fn nested_text() {
let p = Package::new();
let d = p.as_document();
let hello = d.create_element("hello");
let text = d.create_text("A fine day to you!");
hello.append_child(text);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello>A fine day to you!</hello>");
}
#[test]
fn nested_comment() {
let p = Package::new();
let d = p.as_document();
let hello = d.create_element("hello");
let comment = d.create_comment(" Fill this in ");
hello.append_child(comment);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello><!-- Fill this in --></hello>");
}
#[test]
fn nested_processing_instruction_without_value() {
let p = Package::new();
let d = p.as_document();
let hello = d.create_element("hello");
let pi = d.create_processing_instruction("display", None);
hello.append_child(pi);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello><?display?></hello>");
}
#[test]
fn nested_processing_instruction_with_value() {
let p = Package::new();
let d = p.as_document();
let hello = d.create_element("hello");
let pi = d.create_processing_instruction("display", Some("screen"));
hello.append_child(pi);
d.root().append_child(hello);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><hello><?display screen?></hello>");
}
#[test]
fn top_level_comment() {
let p = Package::new();
let d = p.as_document();
let comment = d.create_comment(" Fill this in ");
d.root().append_child(comment);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><!-- Fill this in -->");
}
#[test]
fn top_level_processing_instruction() {
let p = Package::new();
let d = p.as_document();
let pi = d.create_processing_instruction("display", None);
d.root().append_child(pi);
let xml = format_xml(&d);
assert_str_eq!(xml, "<?xml version='1.0'?><?display?>");
}
}
|
//! Write las files.
use std::path::Path;
use Result;
use file::File;
use header::Header;
use point::Point;
/// A las writer.
///
/// This wrapper conforms to the more standard structure of requiring a filename on create, not on
/// close.
///
/// I recognize that it's pretty messy to have both this and `File`, and TODO I need to clean
/// things up.
#[derive(Debug)]
pub struct Writer<P: AsRef<Path>> {
auto_offsets: bool,
file: File,
header: Header,
path: P,
}
impl<P: AsRef<Path>> Writer<P> {
/// Creates a new writer that will write las data to the given path.
///
/// This won't actually write anything until the writer is closed.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// let writer = Writer::from_path("temp.las");
/// ```
pub fn from_path(path: P) -> Writer<P> {
Writer {
auto_offsets: false,
file: File::new(),
header: Header::new(),
path: path,
}
}
/// Sets the scale factors on a writer.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// let writer = Writer::from_path("temp.las").scale_factors(0.01, 0.01, 0.01);
/// ```
pub fn scale_factors(mut self,
x_scale_factor: f64,
y_scale_factor: f64,
z_scale_factor: f64)
-> Writer<P> {
self.header.x_scale_factor = x_scale_factor;
self.header.y_scale_factor = y_scale_factor;
self.header.z_scale_factor = z_scale_factor;
self
}
/// Sets the offset values for a file.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// let writer = Writer::from_path("temp.las").offsets(1000.0, 2000.0, 100.0);
/// ```
pub fn offsets(mut self, x_offset: f64, y_offset: f64, z_offset: f64) -> Writer<P> {
self.header.x_offset = x_offset;
self.header.y_offset = y_offset;
self.header.z_offset = z_offset;
self
}
/// Enables auto-offsetting.
///
/// If auto-offsetting is enabled, this file will set the header offset values to sensible
/// values before writing anything. This is usually easier than calculating the offsets
/// yourself.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// let writer = Writer::from_path("temp.las").auto_offsets(true);
/// ```
pub fn auto_offsets(mut self, enable: bool) -> Writer<P> {
self.auto_offsets = enable;
self
}
/// Writes a point to this writer.
///
/// Note that this point won't actually be written until the writer is closed.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// use las::point::Point;
/// let mut writer = Writer::from_path("temp.las");
/// writer.write_point(Point::new());
/// ```
pub fn write_point(&mut self, point: Point) {
self.file.add_point(point)
}
/// Closes this writer and actually writes data out to disc.
///
/// Since we need to calculate some stats on the points for the header, we delay writing until
/// the very last minute. If you don't want to hold all those points in memory, we'll need to
/// come up with some other way to do that.
///
/// This function consumes the writer.
///
/// # Examples
///
/// ```
/// use std::fs::remove_file;
/// use las::writer::Writer;
/// use las::point::Point;
/// let mut writer = Writer::from_path("temp.las");
/// writer.write_point(Point::new());
/// writer.close().unwrap();
/// remove_file("temp.las").unwrap();
/// ```
pub fn close(&mut self) -> Result<()> {
self.file.set_header(self.header);
self.file.to_path(&self.path, self.auto_offsets)
}
}
Provide more builders on a writer
//! Write las files.
use std::path::Path;
use Result;
use file::File;
use header::{Header, PointFormat};
use point::Point;
/// A las writer.
///
/// This wrapper conforms to the more standard structure of requiring a filename on create, not on
/// close.
///
/// I recognize that it's pretty messy to have both this and `File`, and TODO I need to clean
/// things up.
#[derive(Debug)]
pub struct Writer<P: AsRef<Path>> {
auto_offsets: bool,
file: File,
header: Header,
path: P,
}
impl<P: AsRef<Path>> Writer<P> {
/// Creates a new writer that will write las data to the given path.
///
/// This won't actually write anything until the writer is closed.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// let writer = Writer::from_path("temp.las");
/// ```
pub fn from_path(path: P) -> Writer<P> {
Writer {
auto_offsets: false,
file: File::new(),
header: Header::new(),
path: path,
}
}
/// Sets the scale factors on a writer.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// let writer = Writer::from_path("temp.las").scale_factors(0.01, 0.01, 0.01);
/// ```
pub fn scale_factors(mut self,
x_scale_factor: f64,
y_scale_factor: f64,
z_scale_factor: f64)
-> Writer<P> {
self.header.x_scale_factor = x_scale_factor;
self.header.y_scale_factor = y_scale_factor;
self.header.z_scale_factor = z_scale_factor;
self
}
/// Sets the offset values for a file.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// let writer = Writer::from_path("temp.las").offsets(1000.0, 2000.0, 100.0);
/// ```
pub fn offsets(mut self, x_offset: f64, y_offset: f64, z_offset: f64) -> Writer<P> {
self.header.x_offset = x_offset;
self.header.y_offset = y_offset;
self.header.z_offset = z_offset;
self
}
/// Enables auto-offsetting.
///
/// If auto-offsetting is enabled, this file will set the header offset values to sensible
/// values before writing anything. This is usually easier than calculating the offsets
/// yourself.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// let writer = Writer::from_path("temp.las").auto_offsets(true);
/// ```
pub fn auto_offsets(mut self, enable: bool) -> Writer<P> {
self.auto_offsets = enable;
self
}
/// Sets the las version for this writer.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// let writer = Writer::from_path("temp.las").version(1, 2);
/// ```
pub fn version(mut self, major: u8, minor: u8) -> Writer<P> {
self.header.version_major = major;
self.header.version_minor = minor;
self
}
/// Sets the point format for this writer.
///
/// # Examples
///
/// ```
/// use las::PointFormat;
/// use las::writer::Writer;
/// let writer = Writer::from_path("temp.las").point_format(PointFormat(1));
/// ```
pub fn point_format(mut self, point_format: PointFormat) -> Writer<P> {
self.header.point_data_format = point_format;
self
}
/// Writes a point to this writer.
///
/// Note that this point won't actually be written until the writer is closed.
///
/// # Examples
///
/// ```
/// use las::writer::Writer;
/// use las::point::Point;
/// let mut writer = Writer::from_path("temp.las");
/// writer.write_point(Point::new());
/// ```
pub fn write_point(&mut self, point: Point) {
self.file.add_point(point)
}
/// Closes this writer and actually writes data out to disc.
///
/// Since we need to calculate some stats on the points for the header, we delay writing until
/// the very last minute. If you don't want to hold all those points in memory, we'll need to
/// come up with some other way to do that.
///
/// This function consumes the writer.
///
/// # Examples
///
/// ```
/// use std::fs::remove_file;
/// use las::writer::Writer;
/// use las::point::Point;
/// let mut writer = Writer::from_path("temp.las");
/// writer.write_point(Point::new());
/// writer.close().unwrap();
/// remove_file("temp.las").unwrap();
/// ```
pub fn close(&mut self) -> Result<()> {
self.file.set_header(self.header);
self.file.to_path(&self.path, self.auto_offsets)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::remove_file;
use {PointFormat, File};
#[test]
fn builder() {
let mut writer = Writer::from_path("builder.las")
.scale_factors(1.0, 2.0, 3.0)
.offsets(4.0, 5.0, 6.0)
.version(1, 2)
.point_format(PointFormat(1));
writer.close().unwrap();
let file = File::from_path("builder.las").unwrap();
let header = file.header();
assert_eq!(1.0, header.x_scale_factor);
assert_eq!(2.0, header.y_scale_factor);
assert_eq!(3.0, header.z_scale_factor);
assert_eq!(4.0, header.x_offset);
assert_eq!(5.0, header.y_offset);
assert_eq!(6.0, header.z_offset);
assert_eq!(1, header.version_major);
assert_eq!(2, header.version_minor);
assert_eq!(PointFormat(1), header.point_data_format);
remove_file("builder.las").unwrap();
}
}
|
#![deny(missing_doc)]
//! Example for Rust-Event using expression based design.
//!
//! The idea is to use combinators of events to describe more complex events.
//! An 'Action' is a variant of an event that spans across time.
//! You program the actions like a state machine, controlling how they interact with the world.
//!
//! Assume you have a complete list of the actions.
//! Any event you can construct from these actions has a corresponding `Cursor`.
//! The cursor keeps track of the combinatorial state.
//!
//! This design is useful in environments where all actions can be broken down
//! into simple interacitons while needing complex combinations of those actions.
extern crate debug;
/// Describes an event.
pub enum Event<A> {
/// An event where some action is performed.
Action(A),
/// An event
Wait(f64),
/// An event where sub events are happening sequentially.
Sequence(Vec<Event<A>>),
/// An event where sub events are repeated sequentially forever.
Repeat(Vec<Event<A>>),
/// An event where any sub event might happen.
WhenAny(Vec<Event<A>>),
/// An event where all sub events happen.
WhenAll(Vec<Event<A>>),
}
/// Keeps track of an event.
pub enum Cursor<'a, A, S> {
/// Keeps track of an event where you have a state of an action.
State(&'a A, S),
/// Keeps track of an event where you wait and do nothing.
WaitCursor(f64, f64),
/// Keeps track of an event where sub events happens sequentially.
SequenceCursor(&'a Vec<Event<A>>, uint, f64, f64, Box<Cursor<'a, A, S>>),
/// Keeps track of an event where sub events are repeated sequentially.
RepeatCursor(&'a Vec<Event<A>>, uint, Box<Cursor<'a, A, S>>),
/// Keeps track of an event where any sub event might happen.
WhenAnyCursor(&'a Vec<Event<A>>, Vec<Cursor<'a, A, S>>),
/// Keeps track of an event where all sub events must happen.
WhenAllCursor(&'a Vec<Event<A>>, Vec<Option<Cursor<'a, A, S>>>),
}
/// Implemented by all actions.
pub trait StartState<S> {
/// Creates a state from action, which tracks the state.
fn start_state(&self) -> S;
}
impl<A: StartState<S>, S> Event<A> {
/// Creates a cursor structure from an event structure.
///
/// The cursor structure keeps track of the state.
/// You can define your own actions and use the combinations
/// to create more complex states.
pub fn to_cursor<'a>(&'a self) -> Cursor<'a, A, S> {
match *self {
Action(ref action)
=> State(action, action.start_state()),
Wait(dt)
=> WaitCursor(dt, 0.0),
Sequence(ref seq)
=> SequenceCursor(seq, 0, 0.0, 0.0, box seq.get(0).to_cursor()),
Repeat(ref rep)
=> RepeatCursor(rep, 0, box rep.get(0).to_cursor()),
WhenAny(ref any)
=> WhenAnyCursor(any, any.iter().map(|ev| ev.to_cursor()).collect()),
WhenAll(ref all)
=> WhenAllCursor(all, all.iter().map(|ev| Some(ev.to_cursor())).collect()),
}
}
}
impl<'a, A: StartState<S>, S> Cursor<'a, A, S> {
/// Updates the cursor that tracks an event.
///
/// Returns `None` if the action did not terminate.
/// or `Some(dt)` that tells how much time was consumed by the action.
pub fn update(
&mut self,
dt: f64,
f: |action: &'a A, state: &S| -> Option<S>
) -> Option<f64> {
match *self {
State(action, ref mut state) => {
// Call the function that updates the state.
match f(action, state) {
Some(new_state) => {*state = new_state; None},
None => Some(0.0),
}
},
WaitCursor(dt, ref mut t) => {
// Update the time and return 'false' if we completed.
*t = dt.min(dt + *t);
if *t < dt { None } else { Some(dt) }
},
SequenceCursor(
seq,
ref mut i,
ref mut inc_dt,
ref mut waited_dt,
ref mut cursor
) => {
// Update a sequence of events.
let next_update = *waited_dt + dt;
let cur = cursor;
while *i < seq.len() && *inc_dt <= next_update {
// If the sub event terminates,
// decrement the delta time for next events.
match cur.update(next_update - *inc_dt, |action, state| f(action, state)) {
None => { *waited_dt += dt; break },
Some(consumed_dt) => { *waited_dt = 0.0; *inc_dt += consumed_dt },
};
// Create a new cursor for next event.
// Use the same pointer to avoid allocation.
*i += 1;
if *i >= seq.len() { return Some(*inc_dt); }
**cur = seq.get(*i).to_cursor();
}
None
},
_ => unimplemented!(),
}
}
}
/////////////////////////////////////////////////////////////////////////////////
/// Some test actions.
pub enum TestActions {
/// Increment accumulator.
Inc,
/// Decrement accumulator.
Dec,
}
impl StartState<()> for TestActions {
fn start_state(&self) {}
}
fn exec(mut acc: u32, dt: f64, cursor: &mut Cursor<TestActions, ()>) -> u32 {
cursor.update(dt, |action, _| {
println!("{:?}", action);
match *action {
Inc => { acc += 1; None },
Dec => { acc -= 1; None },
}
});
acc
}
fn print_2() {
// Prints 2.
let a: u32 = 0;
let seq = Sequence(vec![Action(Inc), Action(Inc)]);
let mut cursor = seq.to_cursor();
let a = exec(a, 0.0, &mut cursor);
println!("{}", a);
}
fn wait_sec() {
// Prints 2.
let a: u32 = 0;
let seq = Sequence(vec![Wait(1.0), Action(Inc)]);
let mut cursor = seq.to_cursor();
let a = exec(a, 1.0, &mut cursor);
println!("{}", a);
}
fn main() {
print_2();
wait_sec();
}
Fixed sequence algorithm
Closes https://github.com/PistonDevelopers/rust-event/issues/50
#![deny(missing_doc)]
//! Example for Rust-Event using expression based design.
//!
//! The idea is to use combinators of events to describe more complex events.
//! An 'Action' is a variant of an event that spans across time.
//! You program the actions like a state machine, controlling how they interact with the world.
//!
//! Assume you have a complete list of the actions.
//! Any event you can construct from these actions has a corresponding `Cursor`.
//! The cursor keeps track of the combinatorial state.
//!
//! This design is useful in environments where all actions can be broken down
//! into simple interacitons while needing complex combinations of those actions.
extern crate debug;
/// Describes an event.
pub enum Event<A> {
/// An event where some action is performed.
Action(A),
/// An event
Wait(f64),
/// An event where sub events are happening sequentially.
Sequence(Vec<Event<A>>),
/// An event where sub events are repeated sequentially forever.
Repeat(Vec<Event<A>>),
/// An event where any sub event might happen.
WhenAny(Vec<Event<A>>),
/// An event where all sub events happen.
WhenAll(Vec<Event<A>>),
}
/// Keeps track of an event.
pub enum Cursor<'a, A, S> {
/// Keeps track of an event where you have a state of an action.
State(&'a A, S),
/// Keeps track of an event where you wait and do nothing.
WaitCursor(f64, f64),
/// Keeps track of an event where sub events happens sequentially.
SequenceCursor(&'a Vec<Event<A>>, uint, Box<Cursor<'a, A, S>>),
/// Keeps track of an event where sub events are repeated sequentially.
RepeatCursor(&'a Vec<Event<A>>, uint, Box<Cursor<'a, A, S>>),
/// Keeps track of an event where any sub event might happen.
WhenAnyCursor(&'a Vec<Event<A>>, Vec<Cursor<'a, A, S>>),
/// Keeps track of an event where all sub events must happen.
WhenAllCursor(&'a Vec<Event<A>>, Vec<Option<Cursor<'a, A, S>>>),
}
/// Implemented by all actions.
pub trait StartState<S> {
/// Creates a state from action, which tracks the state.
fn start_state(&self) -> S;
}
impl<A: StartState<S>, S> Event<A> {
/// Creates a cursor structure from an event structure.
///
/// The cursor structure keeps track of the state.
/// You can define your own actions and use the combinations
/// to create more complex states.
pub fn to_cursor<'a>(&'a self) -> Cursor<'a, A, S> {
match *self {
Action(ref action)
=> State(action, action.start_state()),
Wait(dt)
=> WaitCursor(dt, 0.0),
Sequence(ref seq)
=> SequenceCursor(seq, 0, box seq.get(0).to_cursor()),
Repeat(ref rep)
=> RepeatCursor(rep, 0, box rep.get(0).to_cursor()),
WhenAny(ref any)
=> WhenAnyCursor(any, any.iter().map(|ev| ev.to_cursor()).collect()),
WhenAll(ref all)
=> WhenAllCursor(all, all.iter().map(|ev| Some(ev.to_cursor())).collect()),
}
}
}
impl<'a, A: StartState<S>, S> Cursor<'a, A, S> {
/// Updates the cursor that tracks an event.
///
/// Returns `None` if the action did not terminate.
/// or `Some(dt)` that tells how much time is left of the update time.
pub fn update(
&mut self,
dt: f64,
f: |action: &'a A, state: &S| -> Option<S>
) -> Option<f64> {
match *self {
State(action, ref mut state) => {
// Call the function that updates the state.
match f(action, state) {
Some(new_state) => {
*state = new_state;
None
},
// Actions are considered instant,
// so there is always a full 'dt' left.
None => Some(dt),
}
},
WaitCursor(wait_t, ref mut t) => {
if *t + dt >= wait_t {
*t = wait_t;
// Return the 'dt' that is left.
Some(*t + dt - wait_t)
} else {
*t += dt;
None
}
},
SequenceCursor(
seq,
ref mut i,
ref mut cursor
) => {
let cur = cursor;
let mut dt = dt;
while *i < seq.len() {
match cur.update(dt, |action, state| f(action, state)) {
None => { break },
Some(new_dt) => { dt = new_dt; }
};
*i += 1;
// If end of sequence,
// return the 'dt' that is left.
if *i >= seq.len() { return Some(dt); }
// Create a new cursor for next event.
// Use the same pointer to avoid allocation.
**cur = seq.get(*i).to_cursor();
}
None
},
_ => unimplemented!(),
}
}
}
/////////////////////////////////////////////////////////////////////////////////
/// Some test actions.
pub enum TestActions {
/// Increment accumulator.
Inc,
/// Decrement accumulator.
Dec,
}
impl StartState<()> for TestActions {
fn start_state(&self) {}
}
fn exec(mut acc: u32, dt: f64, cursor: &mut Cursor<TestActions, ()>) -> u32 {
cursor.update(dt, |action, _| {
match *action {
Inc => { acc += 1; None },
Dec => { acc -= 1; None },
}
});
acc
}
// Each action that terminates immediately
// consumes a time of 0.0 seconds.
// This makes it possible to execute one action
// after another without delay or waiting for next update.
fn print_2() {
let a: u32 = 0;
let seq = Sequence(vec![Action(Inc), Action(Inc)]);
let mut cursor = seq.to_cursor();
let a = exec(a, 0.0, &mut cursor);
assert_eq!(a, 2);
}
// If you wait the exact amount before to execute an action,
// it will execute. This behavior makes it easy to predict
// when an action will run.
fn wait_sec() {
let a: u32 = 0;
let seq = Sequence(vec![Wait(1.0), Action(Inc)]);
let mut cursor = seq.to_cursor();
let a = exec(a, 1.0, &mut cursor);
assert_eq!(a, 1);
}
// When we execute half the time and then the other half,
// then the action should be executed.
fn wait_half_sec() {
let a: u32 = 0;
let seq = Sequence(vec![Wait(1.0), Action(Inc)]);
let mut cursor = seq.to_cursor();
let a = exec(a, 0.5, &mut cursor);
assert_eq!(a, 0);
let a = exec(a, 0.5, &mut cursor);
assert_eq!(a, 1);
}
// A sequence of wait events is the same as one wait event.
fn wait_two_waits() {
let a: u32 = 0;
let seq = Sequence(vec![Wait(0.5), Wait(0.5), Action(Inc)]);
let mut cursor = seq.to_cursor();
let a = exec(a, 1.0, &mut cursor);
assert_eq!(a, 1);
}
fn main() {
print_2();
wait_sec();
wait_half_sec();
wait_two_waits();
}
|
// Copyright © 2015-2017 winapi-rs developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
//! ApiSet Contract for api-ms-win-core-libraryloader-l1
use ctypes::c_int;
use shared::basetsd::LONG_PTR;
use shared::minwindef::{
BOOL, DWORD, FARPROC, HGLOBAL, HINSTANCE, HMODULE, HRSRC, LPVOID, UINT, WORD
};
use um::winnt::{HANDLE, LANGID, LPCSTR, LPCWSTR, LPSTR, LPWSTR, PCWSTR, PVOID};
FN!{stdcall ENUMRESLANGPROCA(
hModule: HMODULE,
lpType: LPCSTR,
lpName: LPCSTR,
wLanguage: WORD,
lParam: LONG_PTR,
) -> BOOL}
FN!{stdcall ENUMRESLANGPROCW(
hModule: HMODULE,
lpType: LPCWSTR,
lpName: LPCWSTR,
wLanguage: WORD,
lParam: LONG_PTR,
) -> BOOL}
FN!{stdcall ENUMRESNAMEPROCA(
hModule: HMODULE,
lpType: LPCSTR,
lpName: LPSTR,
lParam: LONG_PTR,
) -> BOOL}
FN!{stdcall ENUMRESNAMEPROCW(
hModule: HMODULE,
lpType: LPCWSTR,
lpName: LPWSTR,
lParam: LONG_PTR,
) -> BOOL}
FN!{stdcall ENUMRESTYPEPROCA(
hModule: HMODULE,
lpType: LPSTR,
lParam: LONG_PTR,
) -> BOOL}
FN!{stdcall ENUMRESTYPEPROCW(
hModule: HMODULE,
lpType: LPWSTR,
lParam: LONG_PTR,
) -> BOOL}
extern "system" {
pub fn DisableThreadLibraryCalls(
hLibModule: HMODULE,
) -> BOOL;
pub fn FindResourceExW(
hModule: HMODULE,
lpName: LPCWSTR,
lpType: LPCWSTR,
wLanguage: WORD,
) -> HRSRC;
pub fn FindStringOrdinal(
dwFindStringOrdinalFlags: DWORD,
lpStringSource: LPCWSTR,
cchSource: c_int,
lpStringValue: LPCWSTR,
cchValue: c_int,
bIgnoreCase: BOOL,
) -> c_int;
pub fn FreeLibrary(
hLibModule: HMODULE,
) -> BOOL;
pub fn FreeLibraryAndExitThread(
hLibModule: HMODULE,
dwExitCode: DWORD,
);
pub fn FreeResource(
hResData: HGLOBAL,
) -> BOOL;
pub fn GetModuleFileNameA(
hModule: HMODULE,
lpFilename: LPSTR,
nSize: DWORD,
) -> DWORD;
pub fn GetModuleFileNameW(
hModule: HMODULE,
lpFilename: LPWSTR,
nSize: DWORD,
) -> DWORD;
pub fn GetModuleHandleA(
lpModuleName: LPCSTR,
) -> HMODULE;
pub fn GetModuleHandleW(
lpModuleName: LPCWSTR,
) -> HMODULE;
pub fn GetModuleHandleExA(
dwFlags: DWORD,
lpModuleName: LPCSTR,
phModule: *mut HMODULE,
) -> BOOL;
pub fn GetModuleHandleExW(
dwFlags: DWORD,
lpModuleName: LPCWSTR,
phModule: *mut HMODULE,
) -> BOOL;
pub fn GetProcAddress(
hModule: HMODULE,
lpProcName: LPCSTR,
) -> FARPROC;
pub fn LoadLibraryExA(
lpLibFileName: LPCSTR,
hFile: HANDLE,
dwFlags: DWORD,
) -> HMODULE;
pub fn LoadLibraryExW(
lpLibFileName: LPCWSTR,
hFile: HANDLE,
dwFlags: DWORD,
) -> HMODULE;
pub fn LoadResource(
hModule: HMODULE,
hResInfo: HRSRC,
) -> HGLOBAL;
pub fn LoadStringA(
hInstance: HINSTANCE,
uID: UINT,
lpBuffer: LPSTR,
cchBufferMax: c_int,
) -> c_int;
pub fn LoadStringW(
hInstance: HINSTANCE,
uID: UINT,
lpBuffer: LPWSTR,
cchBufferMax: c_int,
) -> c_int;
pub fn LockResource(
hResData: HGLOBAL,
) -> LPVOID;
pub fn SizeofResource(
hModule: HMODULE,
hResInfo: HRSRC,
) -> DWORD;
}
pub type DLL_DIRECTORY_COOKIE = PVOID;
pub type PDLL_DIRECTORY_COOKIE = *mut PVOID;
extern "system" {
pub fn AddDllDirectory(
NewDirectory: PCWSTR,
) -> DLL_DIRECTORY_COOKIE;
pub fn RemoveDllDirectory(
Cookie: DLL_DIRECTORY_COOKIE,
) -> BOOL;
pub fn SetDefaultDllDirectories(
DirectoryFlags: DWORD,
) -> BOOL;
pub fn EnumResourceLanguagesExA(
hModule: HMODULE,
lpType: LPCSTR,
lpName: LPCSTR,
lpEnumFunc: ENUMRESLANGPROCA,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn EnumResourceLanguagesExW(
hModule: HMODULE,
lpType: LPCWSTR,
lpName: LPCWSTR,
lpEnumFunc: ENUMRESLANGPROCW,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn EnumResourceNamesExA(
hModule: HMODULE,
lpType: LPCSTR,
lpEnumFunc: ENUMRESNAMEPROCA,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn EnumResourceNamesExW(
hModule: HMODULE,
lpType: LPCWSTR,
lpEnumFunc: ENUMRESNAMEPROCW,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn EnumResourceTypesExA(
hModule: HMODULE,
lpEnumFunc: ENUMRESTYPEPROCA,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn EnumResourceTypesExW(
hModule: HMODULE,
lpEnumFunc: ENUMRESTYPEPROCW,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn FindResourceW(
hModule: HMODULE,
lpName: LPCWSTR,
lpType: LPCWSTR,
) -> HRSRC;
pub fn LoadLibraryA(
lpFileName: LPCSTR,
) -> HMODULE;
pub fn LoadLibraryW(
lpFileName: LPCWSTR,
) -> HMODULE;
pub fn EnumResourceNamesW(
hModule: HMODULE,
lpType: LPCWSTR,
lpEnumFunc: ENUMRESNAMEPROCW,
lParam: LONG_PTR,
) -> BOOL;
}
Add missing constants and remove all spaces (#502)
// Copyright © 2015-2017 winapi-rs developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
//! ApiSet Contract for api-ms-win-core-libraryloader-l1
use ctypes::c_int;
use shared::basetsd::LONG_PTR;
use shared::minwindef::{
BOOL, DWORD, FARPROC, HGLOBAL, HINSTANCE, HMODULE, HRSRC, LPVOID, UINT, WORD
};
use um::winnt::{HANDLE, LANGID, LPCSTR, LPCWSTR, LPSTR, LPWSTR, PCWSTR, PVOID};
pub const GET_MODULE_HANDLE_EX_FLAG_PIN: DWORD = 0x00000001;
pub const GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT: DWORD = 0x00000002;
pub const GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS: DWORD = 0x00000004;
pub const DONT_RESOLVE_DLL_REFERENCES: DWORD = 0x00000001;
pub const LOAD_LIBRARY_AS_DATAFILE: DWORD = 0x00000002;
pub const LOAD_WITH_ALTERED_SEARCH_PATH: DWORD = 0x00000008;
pub const LOAD_IGNORE_CODE_AUTHZ_LEVEL: DWORD = 0x00000010;
pub const LOAD_LIBRARY_AS_IMAGE_RESOURCE: DWORD = 0x00000020;
pub const LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE: DWORD = 0x00000040;
pub const LOAD_LIBRARY_REQUIRE_SIGNED_TARGET: DWORD = 0x00000080;
pub const LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR: DWORD = 0x00000100;
pub const LOAD_LIBRARY_SEARCH_APPLICATION_DIR: DWORD = 0x00000200;
pub const LOAD_LIBRARY_SEARCH_USER_DIRS: DWORD = 0x00000400;
pub const LOAD_LIBRARY_SEARCH_SYSTEM32: DWORD = 0x00000800;
pub const LOAD_LIBRARY_SEARCH_DEFAULT_DIRS: DWORD = 0x00001000;
pub const LOAD_LIBRARY_SAFE_CURRENT_DIRS: DWORD = 0x00002000;
pub const LOAD_LIBRARY_SEARCH_SYSTEM32_NO_FORWARDER: DWORD = 0x00004000;
pub const LOAD_LIBRARY_OS_INTEGRITY_CONTINUITY: DWORD = 0x00008000;
FN!{stdcall ENUMRESLANGPROCA(
hModule: HMODULE,
lpType: LPCSTR,
lpName: LPCSTR,
wLanguage: WORD,
lParam: LONG_PTR,
) -> BOOL}
FN!{stdcall ENUMRESLANGPROCW(
hModule: HMODULE,
lpType: LPCWSTR,
lpName: LPCWSTR,
wLanguage: WORD,
lParam: LONG_PTR,
) -> BOOL}
FN!{stdcall ENUMRESNAMEPROCA(
hModule: HMODULE,
lpType: LPCSTR,
lpName: LPSTR,
lParam: LONG_PTR,
) -> BOOL}
FN!{stdcall ENUMRESNAMEPROCW(
hModule: HMODULE,
lpType: LPCWSTR,
lpName: LPWSTR,
lParam: LONG_PTR,
) -> BOOL}
FN!{stdcall ENUMRESTYPEPROCA(
hModule: HMODULE,
lpType: LPSTR,
lParam: LONG_PTR,
) -> BOOL}
FN!{stdcall ENUMRESTYPEPROCW(
hModule: HMODULE,
lpType: LPWSTR,
lParam: LONG_PTR,
) -> BOOL}
extern "system" {
pub fn DisableThreadLibraryCalls(
hLibModule: HMODULE,
) -> BOOL;
pub fn FindResourceExW(
hModule: HMODULE,
lpName: LPCWSTR,
lpType: LPCWSTR,
wLanguage: WORD,
) -> HRSRC;
pub fn FindStringOrdinal(
dwFindStringOrdinalFlags: DWORD,
lpStringSource: LPCWSTR,
cchSource: c_int,
lpStringValue: LPCWSTR,
cchValue: c_int,
bIgnoreCase: BOOL,
) -> c_int;
pub fn FreeLibrary(
hLibModule: HMODULE,
) -> BOOL;
pub fn FreeLibraryAndExitThread(
hLibModule: HMODULE,
dwExitCode: DWORD,
);
pub fn FreeResource(
hResData: HGLOBAL,
) -> BOOL;
pub fn GetModuleFileNameA(
hModule: HMODULE,
lpFilename: LPSTR,
nSize: DWORD,
) -> DWORD;
pub fn GetModuleFileNameW(
hModule: HMODULE,
lpFilename: LPWSTR,
nSize: DWORD,
) -> DWORD;
pub fn GetModuleHandleA(
lpModuleName: LPCSTR,
) -> HMODULE;
pub fn GetModuleHandleW(
lpModuleName: LPCWSTR,
) -> HMODULE;
pub fn GetModuleHandleExA(
dwFlags: DWORD,
lpModuleName: LPCSTR,
phModule: *mut HMODULE,
) -> BOOL;
pub fn GetModuleHandleExW(
dwFlags: DWORD,
lpModuleName: LPCWSTR,
phModule: *mut HMODULE,
) -> BOOL;
pub fn GetProcAddress(
hModule: HMODULE,
lpProcName: LPCSTR,
) -> FARPROC;
pub fn LoadLibraryExA(
lpLibFileName: LPCSTR,
hFile: HANDLE,
dwFlags: DWORD,
) -> HMODULE;
pub fn LoadLibraryExW(
lpLibFileName: LPCWSTR,
hFile: HANDLE,
dwFlags: DWORD,
) -> HMODULE;
pub fn LoadResource(
hModule: HMODULE,
hResInfo: HRSRC,
) -> HGLOBAL;
pub fn LoadStringA(
hInstance: HINSTANCE,
uID: UINT,
lpBuffer: LPSTR,
cchBufferMax: c_int,
) -> c_int;
pub fn LoadStringW(
hInstance: HINSTANCE,
uID: UINT,
lpBuffer: LPWSTR,
cchBufferMax: c_int,
) -> c_int;
pub fn LockResource(
hResData: HGLOBAL,
) -> LPVOID;
pub fn SizeofResource(
hModule: HMODULE,
hResInfo: HRSRC,
) -> DWORD;
}
pub type DLL_DIRECTORY_COOKIE = PVOID;
pub type PDLL_DIRECTORY_COOKIE = *mut PVOID;
extern "system" {
pub fn AddDllDirectory(
NewDirectory: PCWSTR,
) -> DLL_DIRECTORY_COOKIE;
pub fn RemoveDllDirectory(
Cookie: DLL_DIRECTORY_COOKIE,
) -> BOOL;
pub fn SetDefaultDllDirectories(
DirectoryFlags: DWORD,
) -> BOOL;
pub fn EnumResourceLanguagesExA(
hModule: HMODULE,
lpType: LPCSTR,
lpName: LPCSTR,
lpEnumFunc: ENUMRESLANGPROCA,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn EnumResourceLanguagesExW(
hModule: HMODULE,
lpType: LPCWSTR,
lpName: LPCWSTR,
lpEnumFunc: ENUMRESLANGPROCW,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn EnumResourceNamesExA(
hModule: HMODULE,
lpType: LPCSTR,
lpEnumFunc: ENUMRESNAMEPROCA,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn EnumResourceNamesExW(
hModule: HMODULE,
lpType: LPCWSTR,
lpEnumFunc: ENUMRESNAMEPROCW,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn EnumResourceTypesExA(
hModule: HMODULE,
lpEnumFunc: ENUMRESTYPEPROCA,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn EnumResourceTypesExW(
hModule: HMODULE,
lpEnumFunc: ENUMRESTYPEPROCW,
lParam: LONG_PTR,
dwFlags: DWORD,
LangId: LANGID,
) -> BOOL;
pub fn FindResourceW(
hModule: HMODULE,
lpName: LPCWSTR,
lpType: LPCWSTR,
) -> HRSRC;
pub fn LoadLibraryA(
lpFileName: LPCSTR,
) -> HMODULE;
pub fn LoadLibraryW(
lpFileName: LPCWSTR,
) -> HMODULE;
pub fn EnumResourceNamesW(
hModule: HMODULE,
lpType: LPCWSTR,
lpEnumFunc: ENUMRESNAMEPROCW,
lParam: LONG_PTR,
) -> BOOL;
}
|
use dox::mem;
pub type sa_family_t = u16;
pub type pthread_key_t = ::c_uint;
pub type speed_t = ::c_uint;
pub type tcflag_t = ::c_uint;
pub type loff_t = ::c_longlong;
pub type clockid_t = ::c_int;
pub type key_t = ::c_int;
pub type id_t = ::c_uint;
pub enum timezone {}
s! {
pub struct sockaddr {
pub sa_family: sa_family_t,
pub sa_data: [::c_char; 14],
}
pub struct sockaddr_in {
pub sin_family: sa_family_t,
pub sin_port: ::in_port_t,
pub sin_addr: ::in_addr,
pub sin_zero: [u8; 8],
}
pub struct sockaddr_in6 {
pub sin6_family: sa_family_t,
pub sin6_port: ::in_port_t,
pub sin6_flowinfo: u32,
pub sin6_addr: ::in6_addr,
pub sin6_scope_id: u32,
}
pub struct sockaddr_un {
pub sun_family: sa_family_t,
pub sun_path: [::c_char; 108]
}
pub struct sockaddr_storage {
pub ss_family: sa_family_t,
__ss_align: ::size_t,
#[cfg(target_pointer_width = "32")]
__ss_pad2: [u8; 128 - 2 * 4],
#[cfg(target_pointer_width = "64")]
__ss_pad2: [u8; 128 - 2 * 8],
}
pub struct addrinfo {
pub ai_flags: ::c_int,
pub ai_family: ::c_int,
pub ai_socktype: ::c_int,
pub ai_protocol: ::c_int,
pub ai_addrlen: socklen_t,
#[cfg(any(target_os = "linux",
target_os = "emscripten",
target_os = "fuchsia"))]
pub ai_addr: *mut ::sockaddr,
pub ai_canonname: *mut c_char,
#[cfg(target_os = "android")]
pub ai_addr: *mut ::sockaddr,
pub ai_next: *mut addrinfo,
}
pub struct sockaddr_nl {
pub nl_family: ::sa_family_t,
nl_pad: ::c_ushort,
pub nl_pid: u32,
pub nl_groups: u32
}
pub struct sockaddr_ll {
pub sll_family: ::c_ushort,
pub sll_protocol: ::c_ushort,
pub sll_ifindex: ::c_int,
pub sll_hatype: ::c_ushort,
pub sll_pkttype: ::c_uchar,
pub sll_halen: ::c_uchar,
pub sll_addr: [::c_uchar; 8]
}
pub struct fd_set {
fds_bits: [::c_ulong; FD_SETSIZE / ULONG_SIZE],
}
pub struct tm {
pub tm_sec: ::c_int,
pub tm_min: ::c_int,
pub tm_hour: ::c_int,
pub tm_mday: ::c_int,
pub tm_mon: ::c_int,
pub tm_year: ::c_int,
pub tm_wday: ::c_int,
pub tm_yday: ::c_int,
pub tm_isdst: ::c_int,
pub tm_gmtoff: ::c_long,
pub tm_zone: *const ::c_char,
}
pub struct sched_param {
pub sched_priority: ::c_int,
#[cfg(any(target_env = "musl"))]
pub sched_ss_low_priority: ::c_int,
#[cfg(any(target_env = "musl"))]
pub sched_ss_repl_period: ::timespec,
#[cfg(any(target_env = "musl"))]
pub sched_ss_init_budget: ::timespec,
#[cfg(any(target_env = "musl"))]
pub sched_ss_max_repl: ::c_int,
}
pub struct Dl_info {
pub dli_fname: *const ::c_char,
pub dli_fbase: *mut ::c_void,
pub dli_sname: *const ::c_char,
pub dli_saddr: *mut ::c_void,
}
#[cfg_attr(any(all(target_arch = "x86",
not(target_env = "musl"),
not(target_os = "android")),
target_arch = "x86_64"),
repr(packed))]
pub struct epoll_event {
pub events: ::uint32_t,
pub u64: ::uint64_t,
}
pub struct utsname {
pub sysname: [::c_char; 65],
pub nodename: [::c_char; 65],
pub release: [::c_char; 65],
pub version: [::c_char; 65],
pub machine: [::c_char; 65],
pub domainname: [::c_char; 65]
}
pub struct lconv {
pub decimal_point: *mut ::c_char,
pub thousands_sep: *mut ::c_char,
pub grouping: *mut ::c_char,
pub int_curr_symbol: *mut ::c_char,
pub currency_symbol: *mut ::c_char,
pub mon_decimal_point: *mut ::c_char,
pub mon_thousands_sep: *mut ::c_char,
pub mon_grouping: *mut ::c_char,
pub positive_sign: *mut ::c_char,
pub negative_sign: *mut ::c_char,
pub int_frac_digits: ::c_char,
pub frac_digits: ::c_char,
pub p_cs_precedes: ::c_char,
pub p_sep_by_space: ::c_char,
pub n_cs_precedes: ::c_char,
pub n_sep_by_space: ::c_char,
pub p_sign_posn: ::c_char,
pub n_sign_posn: ::c_char,
pub int_p_cs_precedes: ::c_char,
pub int_p_sep_by_space: ::c_char,
pub int_n_cs_precedes: ::c_char,
pub int_n_sep_by_space: ::c_char,
pub int_p_sign_posn: ::c_char,
pub int_n_sign_posn: ::c_char,
}
pub struct sigevent {
pub sigev_value: ::sigval,
pub sigev_signo: ::c_int,
pub sigev_notify: ::c_int,
// Actually a union. We only expose sigev_notify_thread_id because it's
// the most useful member
pub sigev_notify_thread_id: ::c_int,
#[cfg(target_pointer_width = "64")]
__unused1: [::c_int; 11],
#[cfg(target_pointer_width = "32")]
__unused1: [::c_int; 12]
}
}
// intentionally not public, only used for fd_set
cfg_if! {
if #[cfg(target_pointer_width = "32")] {
const ULONG_SIZE: usize = 32;
} else if #[cfg(target_pointer_width = "64")] {
const ULONG_SIZE: usize = 64;
} else {
// Unknown target_pointer_width
}
}
pub const EXIT_FAILURE: ::c_int = 1;
pub const EXIT_SUCCESS: ::c_int = 0;
pub const RAND_MAX: ::c_int = 2147483647;
pub const EOF: ::c_int = -1;
pub const SEEK_SET: ::c_int = 0;
pub const SEEK_CUR: ::c_int = 1;
pub const SEEK_END: ::c_int = 2;
pub const _IOFBF: ::c_int = 0;
pub const _IONBF: ::c_int = 2;
pub const _IOLBF: ::c_int = 1;
pub const F_DUPFD: ::c_int = 0;
pub const F_GETFD: ::c_int = 1;
pub const F_SETFD: ::c_int = 2;
pub const F_GETFL: ::c_int = 3;
pub const F_SETFL: ::c_int = 4;
// Linux-specific fcntls
pub const F_SETLEASE: ::c_int = 1024;
pub const F_GETLEASE: ::c_int = 1025;
pub const F_NOTIFY: ::c_int = 1026;
pub const F_DUPFD_CLOEXEC: ::c_int = 1030;
pub const F_SETPIPE_SZ: ::c_int = 1031;
pub const F_GETPIPE_SZ: ::c_int = 1032;
// TODO(#235): Include file sealing fcntls once we have a way to verify them.
pub const SIGTRAP: ::c_int = 5;
pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0;
pub const PTHREAD_CREATE_DETACHED: ::c_int = 1;
pub const CLOCK_REALTIME: clockid_t = 0;
pub const CLOCK_MONOTONIC: clockid_t = 1;
pub const CLOCK_PROCESS_CPUTIME_ID: clockid_t = 2;
pub const CLOCK_THREAD_CPUTIME_ID: clockid_t = 3;
pub const CLOCK_MONOTONIC_RAW: clockid_t = 4;
pub const CLOCK_REALTIME_COARSE: clockid_t = 5;
pub const CLOCK_MONOTONIC_COARSE: clockid_t = 6;
pub const CLOCK_BOOTTIME: clockid_t = 7;
pub const CLOCK_REALTIME_ALARM: clockid_t = 8;
pub const CLOCK_BOOTTIME_ALARM: clockid_t = 9;
// TODO(#247) Someday our Travis shall have glibc 2.21 (released in Sep
// 2014.) See also musl/mod.rs
// pub const CLOCK_SGI_CYCLE: clockid_t = 10;
// pub const CLOCK_TAI: clockid_t = 11;
pub const TIMER_ABSTIME: ::c_int = 1;
pub const RLIMIT_CPU: ::c_int = 0;
pub const RLIMIT_FSIZE: ::c_int = 1;
pub const RLIMIT_DATA: ::c_int = 2;
pub const RLIMIT_STACK: ::c_int = 3;
pub const RLIMIT_CORE: ::c_int = 4;
pub const RLIMIT_LOCKS: ::c_int = 10;
pub const RLIMIT_SIGPENDING: ::c_int = 11;
pub const RLIMIT_MSGQUEUE: ::c_int = 12;
pub const RLIMIT_NICE: ::c_int = 13;
pub const RLIMIT_RTPRIO: ::c_int = 14;
pub const RUSAGE_SELF: ::c_int = 0;
pub const O_RDONLY: ::c_int = 0;
pub const O_WRONLY: ::c_int = 1;
pub const O_RDWR: ::c_int = 2;
pub const O_TMPFILE: ::c_int = 0o20000000 | O_DIRECTORY;
pub const SOCK_CLOEXEC: ::c_int = O_CLOEXEC;
pub const S_IFIFO: ::mode_t = 4096;
pub const S_IFCHR: ::mode_t = 8192;
pub const S_IFBLK: ::mode_t = 24576;
pub const S_IFDIR: ::mode_t = 16384;
pub const S_IFREG: ::mode_t = 32768;
pub const S_IFLNK: ::mode_t = 40960;
pub const S_IFSOCK: ::mode_t = 49152;
pub const S_IFMT: ::mode_t = 61440;
pub const S_IRWXU: ::mode_t = 448;
pub const S_IXUSR: ::mode_t = 64;
pub const S_IWUSR: ::mode_t = 128;
pub const S_IRUSR: ::mode_t = 256;
pub const S_IRWXG: ::mode_t = 56;
pub const S_IXGRP: ::mode_t = 8;
pub const S_IWGRP: ::mode_t = 16;
pub const S_IRGRP: ::mode_t = 32;
pub const S_IRWXO: ::mode_t = 7;
pub const S_IXOTH: ::mode_t = 1;
pub const S_IWOTH: ::mode_t = 2;
pub const S_IROTH: ::mode_t = 4;
pub const F_OK: ::c_int = 0;
pub const R_OK: ::c_int = 4;
pub const W_OK: ::c_int = 2;
pub const X_OK: ::c_int = 1;
pub const STDIN_FILENO: ::c_int = 0;
pub const STDOUT_FILENO: ::c_int = 1;
pub const STDERR_FILENO: ::c_int = 2;
pub const SIGHUP: ::c_int = 1;
pub const SIGINT: ::c_int = 2;
pub const SIGQUIT: ::c_int = 3;
pub const SIGILL: ::c_int = 4;
pub const SIGABRT: ::c_int = 6;
pub const SIGFPE: ::c_int = 8;
pub const SIGKILL: ::c_int = 9;
pub const SIGSEGV: ::c_int = 11;
pub const SIGPIPE: ::c_int = 13;
pub const SIGALRM: ::c_int = 14;
pub const SIGTERM: ::c_int = 15;
pub const PROT_NONE: ::c_int = 0;
pub const PROT_READ: ::c_int = 1;
pub const PROT_WRITE: ::c_int = 2;
pub const PROT_EXEC: ::c_int = 4;
pub const LC_CTYPE: ::c_int = 0;
pub const LC_NUMERIC: ::c_int = 1;
pub const LC_TIME: ::c_int = 2;
pub const LC_COLLATE: ::c_int = 3;
pub const LC_MONETARY: ::c_int = 4;
pub const LC_MESSAGES: ::c_int = 5;
pub const LC_ALL: ::c_int = 6;
pub const LC_CTYPE_MASK: ::c_int = (1 << LC_CTYPE);
pub const LC_NUMERIC_MASK: ::c_int = (1 << LC_NUMERIC);
pub const LC_TIME_MASK: ::c_int = (1 << LC_TIME);
pub const LC_COLLATE_MASK: ::c_int = (1 << LC_COLLATE);
pub const LC_MONETARY_MASK: ::c_int = (1 << LC_MONETARY);
pub const LC_MESSAGES_MASK: ::c_int = (1 << LC_MESSAGES);
// LC_ALL_MASK defined per platform
pub const MAP_FILE: ::c_int = 0x0000;
pub const MAP_SHARED: ::c_int = 0x0001;
pub const MAP_PRIVATE: ::c_int = 0x0002;
pub const MAP_FIXED: ::c_int = 0x0010;
pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void;
// MS_ flags for msync(2)
pub const MS_ASYNC: ::c_int = 0x0001;
pub const MS_INVALIDATE: ::c_int = 0x0002;
pub const MS_SYNC: ::c_int = 0x0004;
// MS_ flags for mount(2)
pub const MS_RDONLY: ::c_ulong = 0x01;
pub const MS_NOSUID: ::c_ulong = 0x02;
pub const MS_NODEV: ::c_ulong = 0x04;
pub const MS_NOEXEC: ::c_ulong = 0x08;
pub const MS_SYNCHRONOUS: ::c_ulong = 0x10;
pub const MS_REMOUNT: ::c_ulong = 0x20;
pub const MS_MANDLOCK: ::c_ulong = 0x40;
pub const MS_DIRSYNC: ::c_ulong = 0x80;
pub const MS_NOATIME: ::c_ulong = 0x0400;
pub const MS_NODIRATIME: ::c_ulong = 0x0800;
pub const MS_BIND: ::c_ulong = 0x1000;
pub const MS_MOVE: ::c_ulong = 0x2000;
pub const MS_REC: ::c_ulong = 0x4000;
pub const MS_SILENT: ::c_ulong = 0x8000;
pub const MS_POSIXACL: ::c_ulong = 0x010000;
pub const MS_UNBINDABLE: ::c_ulong = 0x020000;
pub const MS_PRIVATE: ::c_ulong = 0x040000;
pub const MS_SLAVE: ::c_ulong = 0x080000;
pub const MS_SHARED: ::c_ulong = 0x100000;
pub const MS_RELATIME: ::c_ulong = 0x200000;
pub const MS_KERNMOUNT: ::c_ulong = 0x400000;
pub const MS_I_VERSION: ::c_ulong = 0x800000;
pub const MS_STRICTATIME: ::c_ulong = 0x1000000;
pub const MS_ACTIVE: ::c_ulong = 0x40000000;
pub const MS_NOUSER: ::c_ulong = 0x80000000;
pub const MS_MGC_VAL: ::c_ulong = 0xc0ed0000;
pub const MS_MGC_MSK: ::c_ulong = 0xffff0000;
pub const MS_RMT_MASK: ::c_ulong = 0x800051;
pub const EPERM: ::c_int = 1;
pub const ENOENT: ::c_int = 2;
pub const ESRCH: ::c_int = 3;
pub const EINTR: ::c_int = 4;
pub const EIO: ::c_int = 5;
pub const ENXIO: ::c_int = 6;
pub const E2BIG: ::c_int = 7;
pub const ENOEXEC: ::c_int = 8;
pub const EBADF: ::c_int = 9;
pub const ECHILD: ::c_int = 10;
pub const EAGAIN: ::c_int = 11;
pub const ENOMEM: ::c_int = 12;
pub const EACCES: ::c_int = 13;
pub const EFAULT: ::c_int = 14;
pub const ENOTBLK: ::c_int = 15;
pub const EBUSY: ::c_int = 16;
pub const EEXIST: ::c_int = 17;
pub const EXDEV: ::c_int = 18;
pub const ENODEV: ::c_int = 19;
pub const ENOTDIR: ::c_int = 20;
pub const EISDIR: ::c_int = 21;
pub const EINVAL: ::c_int = 22;
pub const ENFILE: ::c_int = 23;
pub const EMFILE: ::c_int = 24;
pub const ENOTTY: ::c_int = 25;
pub const ETXTBSY: ::c_int = 26;
pub const EFBIG: ::c_int = 27;
pub const ENOSPC: ::c_int = 28;
pub const ESPIPE: ::c_int = 29;
pub const EROFS: ::c_int = 30;
pub const EMLINK: ::c_int = 31;
pub const EPIPE: ::c_int = 32;
pub const EDOM: ::c_int = 33;
pub const ERANGE: ::c_int = 34;
pub const EWOULDBLOCK: ::c_int = EAGAIN;
pub const SCM_RIGHTS: ::c_int = 0x01;
pub const SCM_CREDENTIALS: ::c_int = 0x02;
pub const IPPROTO_RAW: ::c_int = 255;
pub const PROT_GROWSDOWN: ::c_int = 0x1000000;
pub const PROT_GROWSUP: ::c_int = 0x2000000;
pub const MAP_TYPE: ::c_int = 0x000f;
pub const MADV_NORMAL: ::c_int = 0;
pub const MADV_RANDOM: ::c_int = 1;
pub const MADV_SEQUENTIAL: ::c_int = 2;
pub const MADV_WILLNEED: ::c_int = 3;
pub const MADV_DONTNEED: ::c_int = 4;
pub const MADV_REMOVE: ::c_int = 9;
pub const MADV_DONTFORK: ::c_int = 10;
pub const MADV_DOFORK: ::c_int = 11;
pub const MADV_MERGEABLE: ::c_int = 12;
pub const MADV_UNMERGEABLE: ::c_int = 13;
pub const MADV_HWPOISON: ::c_int = 100;
pub const IFF_UP: ::c_int = 0x1;
pub const IFF_BROADCAST: ::c_int = 0x2;
pub const IFF_DEBUG: ::c_int = 0x4;
pub const IFF_LOOPBACK: ::c_int = 0x8;
pub const IFF_POINTOPOINT: ::c_int = 0x10;
pub const IFF_NOTRAILERS: ::c_int = 0x20;
pub const IFF_RUNNING: ::c_int = 0x40;
pub const IFF_NOARP: ::c_int = 0x80;
pub const IFF_PROMISC: ::c_int = 0x100;
pub const IFF_ALLMULTI: ::c_int = 0x200;
pub const IFF_MASTER: ::c_int = 0x400;
pub const IFF_SLAVE: ::c_int = 0x800;
pub const IFF_MULTICAST: ::c_int = 0x1000;
pub const IFF_PORTSEL: ::c_int = 0x2000;
pub const IFF_AUTOMEDIA: ::c_int = 0x4000;
pub const IFF_DYNAMIC: ::c_int = 0x8000;
pub const SOL_IP: ::c_int = 0;
pub const SOL_TCP: ::c_int = 6;
pub const SOL_IPV6: ::c_int = 41;
pub const SOL_ICMPV6: ::c_int = 58;
pub const SOL_RAW: ::c_int = 255;
pub const SOL_DECNET: ::c_int = 261;
pub const SOL_X25: ::c_int = 262;
pub const SOL_PACKET: ::c_int = 263;
pub const SOL_ATM: ::c_int = 264;
pub const SOL_AAL: ::c_int = 265;
pub const SOL_IRDA: ::c_int = 266;
pub const SOL_NETBEUI: ::c_int = 267;
pub const SOL_LLC: ::c_int = 268;
pub const SOL_DCCP: ::c_int = 269;
pub const SOL_NETLINK: ::c_int = 270;
pub const SOL_TIPC: ::c_int = 271;
pub const AF_UNSPEC: ::c_int = 0;
pub const AF_UNIX: ::c_int = 1;
pub const AF_LOCAL: ::c_int = 1;
pub const AF_INET: ::c_int = 2;
pub const AF_AX25: ::c_int = 3;
pub const AF_IPX: ::c_int = 4;
pub const AF_APPLETALK: ::c_int = 5;
pub const AF_NETROM: ::c_int = 6;
pub const AF_BRIDGE: ::c_int = 7;
pub const AF_ATMPVC: ::c_int = 8;
pub const AF_X25: ::c_int = 9;
pub const AF_INET6: ::c_int = 10;
pub const AF_ROSE: ::c_int = 11;
pub const AF_DECnet: ::c_int = 12;
pub const AF_NETBEUI: ::c_int = 13;
pub const AF_SECURITY: ::c_int = 14;
pub const AF_KEY: ::c_int = 15;
pub const AF_NETLINK: ::c_int = 16;
pub const AF_ROUTE: ::c_int = AF_NETLINK;
pub const AF_PACKET: ::c_int = 17;
pub const AF_ASH: ::c_int = 18;
pub const AF_ECONET: ::c_int = 19;
pub const AF_ATMSVC: ::c_int = 20;
pub const AF_RDS: ::c_int = 21;
pub const AF_SNA: ::c_int = 22;
pub const AF_IRDA: ::c_int = 23;
pub const AF_PPPOX: ::c_int = 24;
pub const AF_WANPIPE: ::c_int = 25;
pub const AF_LLC: ::c_int = 26;
pub const AF_CAN: ::c_int = 29;
pub const AF_TIPC: ::c_int = 30;
pub const AF_BLUETOOTH: ::c_int = 31;
pub const AF_IUCV: ::c_int = 32;
pub const AF_RXRPC: ::c_int = 33;
pub const AF_ISDN: ::c_int = 34;
pub const AF_PHONET: ::c_int = 35;
pub const AF_IEEE802154: ::c_int = 36;
pub const AF_CAIF: ::c_int = 37;
pub const AF_ALG: ::c_int = 38;
pub const PF_UNSPEC: ::c_int = AF_UNSPEC;
pub const PF_UNIX: ::c_int = AF_UNIX;
pub const PF_LOCAL: ::c_int = AF_LOCAL;
pub const PF_INET: ::c_int = AF_INET;
pub const PF_AX25: ::c_int = AF_AX25;
pub const PF_IPX: ::c_int = AF_IPX;
pub const PF_APPLETALK: ::c_int = AF_APPLETALK;
pub const PF_NETROM: ::c_int = AF_NETROM;
pub const PF_BRIDGE: ::c_int = AF_BRIDGE;
pub const PF_ATMPVC: ::c_int = AF_ATMPVC;
pub const PF_X25: ::c_int = AF_X25;
pub const PF_INET6: ::c_int = AF_INET6;
pub const PF_ROSE: ::c_int = AF_ROSE;
pub const PF_DECnet: ::c_int = AF_DECnet;
pub const PF_NETBEUI: ::c_int = AF_NETBEUI;
pub const PF_SECURITY: ::c_int = AF_SECURITY;
pub const PF_KEY: ::c_int = AF_KEY;
pub const PF_NETLINK: ::c_int = AF_NETLINK;
pub const PF_ROUTE: ::c_int = AF_ROUTE;
pub const PF_PACKET: ::c_int = AF_PACKET;
pub const PF_ASH: ::c_int = AF_ASH;
pub const PF_ECONET: ::c_int = AF_ECONET;
pub const PF_ATMSVC: ::c_int = AF_ATMSVC;
pub const PF_RDS: ::c_int = AF_RDS;
pub const PF_SNA: ::c_int = AF_SNA;
pub const PF_IRDA: ::c_int = AF_IRDA;
pub const PF_PPPOX: ::c_int = AF_PPPOX;
pub const PF_WANPIPE: ::c_int = AF_WANPIPE;
pub const PF_LLC: ::c_int = AF_LLC;
pub const PF_CAN: ::c_int = AF_CAN;
pub const PF_TIPC: ::c_int = AF_TIPC;
pub const PF_BLUETOOTH: ::c_int = AF_BLUETOOTH;
pub const PF_IUCV: ::c_int = AF_IUCV;
pub const PF_RXRPC: ::c_int = AF_RXRPC;
pub const PF_ISDN: ::c_int = AF_ISDN;
pub const PF_PHONET: ::c_int = AF_PHONET;
pub const PF_IEEE802154: ::c_int = AF_IEEE802154;
pub const PF_CAIF: ::c_int = AF_CAIF;
pub const PF_ALG: ::c_int = AF_ALG;
pub const SOMAXCONN: ::c_int = 128;
pub const MSG_OOB: ::c_int = 1;
pub const MSG_PEEK: ::c_int = 2;
pub const MSG_DONTROUTE: ::c_int = 4;
pub const MSG_CTRUNC: ::c_int = 8;
pub const MSG_TRUNC: ::c_int = 0x20;
pub const MSG_DONTWAIT: ::c_int = 0x40;
pub const MSG_EOR: ::c_int = 0x80;
pub const MSG_WAITALL: ::c_int = 0x100;
pub const MSG_FIN: ::c_int = 0x200;
pub const MSG_SYN: ::c_int = 0x400;
pub const MSG_CONFIRM: ::c_int = 0x800;
pub const MSG_RST: ::c_int = 0x1000;
pub const MSG_ERRQUEUE: ::c_int = 0x2000;
pub const MSG_NOSIGNAL: ::c_int = 0x4000;
pub const MSG_MORE: ::c_int = 0x8000;
pub const MSG_WAITFORONE: ::c_int = 0x10000;
pub const MSG_FASTOPEN: ::c_int = 0x20000000;
pub const MSG_CMSG_CLOEXEC: ::c_int = 0x40000000;
pub const SOCK_RAW: ::c_int = 3;
pub const IPPROTO_ICMP: ::c_int = 1;
pub const IPPROTO_ICMPV6: ::c_int = 58;
pub const IPPROTO_TCP: ::c_int = 6;
pub const IPPROTO_IP: ::c_int = 0;
pub const IPPROTO_IPV6: ::c_int = 41;
pub const IP_MULTICAST_TTL: ::c_int = 33;
pub const IP_MULTICAST_LOOP: ::c_int = 34;
pub const IP_TTL: ::c_int = 2;
pub const IP_HDRINCL: ::c_int = 3;
pub const IP_ADD_MEMBERSHIP: ::c_int = 35;
pub const IP_DROP_MEMBERSHIP: ::c_int = 36;
pub const IP_TRANSPARENT: ::c_int = 19;
pub const IPV6_ADD_MEMBERSHIP: ::c_int = 20;
pub const IPV6_DROP_MEMBERSHIP: ::c_int = 21;
pub const TCP_NODELAY: ::c_int = 1;
pub const TCP_MAXSEG: ::c_int = 2;
pub const TCP_CORK: ::c_int = 3;
pub const TCP_KEEPIDLE: ::c_int = 4;
pub const TCP_KEEPINTVL: ::c_int = 5;
pub const TCP_KEEPCNT: ::c_int = 6;
pub const TCP_SYNCNT: ::c_int = 7;
pub const TCP_LINGER2: ::c_int = 8;
pub const TCP_DEFER_ACCEPT: ::c_int = 9;
pub const TCP_WINDOW_CLAMP: ::c_int = 10;
pub const TCP_INFO: ::c_int = 11;
pub const TCP_QUICKACK: ::c_int = 12;
pub const TCP_CONGESTION: ::c_int = 13;
pub const IPV6_MULTICAST_LOOP: ::c_int = 19;
pub const IPV6_V6ONLY: ::c_int = 26;
pub const SO_DEBUG: ::c_int = 1;
pub const SHUT_RD: ::c_int = 0;
pub const SHUT_WR: ::c_int = 1;
pub const SHUT_RDWR: ::c_int = 2;
pub const LOCK_SH: ::c_int = 1;
pub const LOCK_EX: ::c_int = 2;
pub const LOCK_NB: ::c_int = 4;
pub const LOCK_UN: ::c_int = 8;
pub const SS_ONSTACK: ::c_int = 1;
pub const SS_DISABLE: ::c_int = 2;
pub const PATH_MAX: ::c_int = 4096;
pub const FD_SETSIZE: usize = 1024;
pub const EPOLLIN: ::c_int = 0x1;
pub const EPOLLPRI: ::c_int = 0x2;
pub const EPOLLOUT: ::c_int = 0x4;
pub const EPOLLRDNORM: ::c_int = 0x40;
pub const EPOLLRDBAND: ::c_int = 0x80;
pub const EPOLLWRNORM: ::c_int = 0x100;
pub const EPOLLWRBAND: ::c_int = 0x200;
pub const EPOLLMSG: ::c_int = 0x400;
pub const EPOLLERR: ::c_int = 0x8;
pub const EPOLLHUP: ::c_int = 0x10;
pub const EPOLLET: ::c_int = 0x80000000;
pub const EPOLL_CTL_ADD: ::c_int = 1;
pub const EPOLL_CTL_MOD: ::c_int = 3;
pub const EPOLL_CTL_DEL: ::c_int = 2;
pub const MNT_DETACH: ::c_int = 0x2;
pub const MNT_EXPIRE: ::c_int = 0x4;
pub const Q_GETFMT: ::c_int = 0x800004;
pub const Q_GETINFO: ::c_int = 0x800005;
pub const Q_SETINFO: ::c_int = 0x800006;
pub const QIF_BLIMITS: ::uint32_t = 1;
pub const QIF_SPACE: ::uint32_t = 2;
pub const QIF_ILIMITS: ::uint32_t = 4;
pub const QIF_INODES: ::uint32_t = 8;
pub const QIF_BTIME: ::uint32_t = 16;
pub const QIF_ITIME: ::uint32_t = 32;
pub const QIF_LIMITS: ::uint32_t = 5;
pub const QIF_USAGE: ::uint32_t = 10;
pub const QIF_TIMES: ::uint32_t = 48;
pub const QIF_ALL: ::uint32_t = 63;
pub const MNT_FORCE: ::c_int = 0x1;
pub const Q_SYNC: ::c_int = 0x800001;
pub const Q_QUOTAON: ::c_int = 0x800002;
pub const Q_QUOTAOFF: ::c_int = 0x800003;
pub const Q_GETQUOTA: ::c_int = 0x800007;
pub const Q_SETQUOTA: ::c_int = 0x800008;
pub const TCIOFF: ::c_int = 2;
pub const TCION: ::c_int = 3;
pub const TCOOFF: ::c_int = 0;
pub const TCOON: ::c_int = 1;
pub const TCIFLUSH: ::c_int = 0;
pub const TCOFLUSH: ::c_int = 1;
pub const TCIOFLUSH: ::c_int = 2;
pub const NL0: ::c_int = 0x00000000;
pub const NL1: ::c_int = 0x00000100;
pub const TAB0: ::c_int = 0x00000000;
pub const CR0: ::c_int = 0x00000000;
pub const FF0: ::c_int = 0x00000000;
pub const BS0: ::c_int = 0x00000000;
pub const VT0: ::c_int = 0x00000000;
pub const VERASE: usize = 2;
pub const VKILL: usize = 3;
pub const VINTR: usize = 0;
pub const VQUIT: usize = 1;
pub const VLNEXT: usize = 15;
pub const IGNBRK: ::tcflag_t = 0x00000001;
pub const BRKINT: ::tcflag_t = 0x00000002;
pub const IGNPAR: ::tcflag_t = 0x00000004;
pub const PARMRK: ::tcflag_t = 0x00000008;
pub const INPCK: ::tcflag_t = 0x00000010;
pub const ISTRIP: ::tcflag_t = 0x00000020;
pub const INLCR: ::tcflag_t = 0x00000040;
pub const IGNCR: ::tcflag_t = 0x00000080;
pub const ICRNL: ::tcflag_t = 0x00000100;
pub const IXANY: ::tcflag_t = 0x00000800;
pub const IMAXBEL: ::tcflag_t = 0x00002000;
pub const OPOST: ::tcflag_t = 0x1;
pub const CS5: ::tcflag_t = 0x00000000;
pub const CRTSCTS: ::tcflag_t = 0x80000000;
pub const ECHO: ::tcflag_t = 0x00000008;
pub const CLONE_VM: ::c_int = 0x100;
pub const CLONE_FS: ::c_int = 0x200;
pub const CLONE_FILES: ::c_int = 0x400;
pub const CLONE_SIGHAND: ::c_int = 0x800;
pub const CLONE_PTRACE: ::c_int = 0x2000;
pub const CLONE_VFORK: ::c_int = 0x4000;
pub const CLONE_PARENT: ::c_int = 0x8000;
pub const CLONE_THREAD: ::c_int = 0x10000;
pub const CLONE_NEWNS: ::c_int = 0x20000;
pub const CLONE_SYSVSEM: ::c_int = 0x40000;
pub const CLONE_SETTLS: ::c_int = 0x80000;
pub const CLONE_PARENT_SETTID: ::c_int = 0x100000;
pub const CLONE_CHILD_CLEARTID: ::c_int = 0x200000;
pub const CLONE_DETACHED: ::c_int = 0x400000;
pub const CLONE_UNTRACED: ::c_int = 0x800000;
pub const CLONE_CHILD_SETTID: ::c_int = 0x01000000;
pub const CLONE_NEWUTS: ::c_int = 0x04000000;
pub const CLONE_NEWIPC: ::c_int = 0x08000000;
pub const CLONE_NEWUSER: ::c_int = 0x10000000;
pub const CLONE_NEWPID: ::c_int = 0x20000000;
pub const CLONE_NEWNET: ::c_int = 0x40000000;
pub const CLONE_IO: ::c_int = 0x80000000;
pub const WNOHANG: ::c_int = 0x00000001;
pub const WUNTRACED: ::c_int = 0x00000002;
pub const WSTOPPED: ::c_int = WUNTRACED;
pub const WEXITED: ::c_int = 0x00000004;
pub const WCONTINUED: ::c_int = 0x00000008;
pub const WNOWAIT: ::c_int = 0x01000000;
pub const __WNOTHREAD: ::c_int = 0x20000000;
pub const __WALL: ::c_int = 0x40000000;
pub const __WCLONE: ::c_int = 0x80000000;
pub const SPLICE_F_MOVE: ::c_uint = 0x01;
pub const SPLICE_F_NONBLOCK: ::c_uint = 0x02;
pub const SPLICE_F_MORE: ::c_uint = 0x04;
pub const SPLICE_F_GIFT: ::c_uint = 0x08;
pub const RTLD_LOCAL: ::c_int = 0;
pub const POSIX_FADV_NORMAL: ::c_int = 0;
pub const POSIX_FADV_RANDOM: ::c_int = 1;
pub const POSIX_FADV_SEQUENTIAL: ::c_int = 2;
pub const POSIX_FADV_WILLNEED: ::c_int = 3;
pub const AT_FDCWD: ::c_int = -100;
pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x100;
pub const LOG_CRON: ::c_int = 9 << 3;
pub const LOG_AUTHPRIV: ::c_int = 10 << 3;
pub const LOG_FTP: ::c_int = 11 << 3;
pub const LOG_PERROR: ::c_int = 0x20;
pub const PIPE_BUF: usize = 4096;
pub const SI_LOAD_SHIFT: ::c_uint = 16;
pub const SIGEV_SIGNAL: ::c_int = 0;
pub const SIGEV_NONE: ::c_int = 1;
pub const SIGEV_THREAD: ::c_int = 2;
pub const P_ALL: idtype_t = 0;
pub const P_PID: idtype_t = 1;
pub const P_PGID: idtype_t = 2;
f! {
pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] &= !(1 << (fd % size));
return
}
pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool {
let fd = fd as usize;
let size = mem::size_of_val(&(*set).fds_bits[0]) * 8;
return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0
}
pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] |= 1 << (fd % size);
return
}
pub fn FD_ZERO(set: *mut fd_set) -> () {
for slot in (*set).fds_bits.iter_mut() {
*slot = 0;
}
}
pub fn WIFSTOPPED(status: ::c_int) -> bool {
(status & 0xff) == 0x7f
}
pub fn WSTOPSIG(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff
}
pub fn WIFCONTINUED(status: ::c_int) -> bool {
status == 0xffff
}
pub fn WIFSIGNALED(status: ::c_int) -> bool {
((status & 0x7f) + 1) as i8 >= 2
}
pub fn WTERMSIG(status: ::c_int) -> ::c_int {
status & 0x7f
}
pub fn WIFEXITED(status: ::c_int) -> bool {
(status & 0x7f) == 0
}
pub fn WEXITSTATUS(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff
}
pub fn WCOREDUMP(status: ::c_int) -> bool {
(status & 0x80) != 0
}
}
extern {
pub fn getpwnam_r(name: *const ::c_char,
pwd: *mut passwd,
buf: *mut ::c_char,
buflen: ::size_t,
result: *mut *mut passwd) -> ::c_int;
pub fn getpwuid_r(uid: ::uid_t,
pwd: *mut passwd,
buf: *mut ::c_char,
buflen: ::size_t,
result: *mut *mut passwd) -> ::c_int;
pub fn fdatasync(fd: ::c_int) -> ::c_int;
pub fn mincore(addr: *mut ::c_void, len: ::size_t,
vec: *mut ::c_uchar) -> ::c_int;
pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
pub fn clock_nanosleep(clk_id: clockid_t,
flags: ::c_int,
rqtp: *const ::timespec,
rmtp: *mut ::timespec) -> ::c_int;
pub fn clock_settime(clk_id: clockid_t, tp: *const ::timespec) -> ::c_int;
pub fn prctl(option: ::c_int, ...) -> ::c_int;
pub fn pthread_getattr_np(native: ::pthread_t,
attr: *mut ::pthread_attr_t) -> ::c_int;
pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t,
guardsize: *mut ::size_t) -> ::c_int;
pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t,
stackaddr: *mut *mut ::c_void,
stacksize: *mut ::size_t) -> ::c_int;
pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void;
pub fn setgroups(ngroups: ::size_t,
ptr: *const ::gid_t) -> ::c_int;
pub fn initgroups(user: *const ::c_char, group: ::gid_t) -> ::c_int;
pub fn sched_setscheduler(pid: ::pid_t,
policy: ::c_int,
param: *const sched_param) -> ::c_int;
pub fn sched_getscheduler(pid: ::pid_t) -> ::c_int;
pub fn sched_get_priority_max(policy: ::c_int) -> ::c_int;
pub fn sched_get_priority_min(policy: ::c_int) -> ::c_int;
pub fn epoll_create(size: ::c_int) -> ::c_int;
pub fn epoll_create1(flags: ::c_int) -> ::c_int;
pub fn epoll_ctl(epfd: ::c_int,
op: ::c_int,
fd: ::c_int,
event: *mut epoll_event) -> ::c_int;
pub fn epoll_wait(epfd: ::c_int,
events: *mut epoll_event,
maxevents: ::c_int,
timeout: ::c_int) -> ::c_int;
pub fn pipe2(fds: *mut ::c_int, flags: ::c_int) -> ::c_int;
pub fn mount(src: *const ::c_char,
target: *const ::c_char,
fstype: *const ::c_char,
flags: ::c_ulong,
data: *const ::c_void) -> ::c_int;
pub fn umount(target: *const ::c_char) -> ::c_int;
pub fn umount2(target: *const ::c_char, flags: ::c_int) -> ::c_int;
pub fn clone(cb: extern fn(*mut ::c_void) -> ::c_int,
child_stack: *mut ::c_void,
flags: ::c_int,
arg: *mut ::c_void, ...) -> ::c_int;
pub fn statfs(path: *const ::c_char, buf: *mut statfs) -> ::c_int;
pub fn fstatfs(fd: ::c_int, buf: *mut statfs) -> ::c_int;
pub fn memrchr(cx: *const ::c_void,
c: ::c_int,
n: ::size_t) -> *mut ::c_void;
pub fn syscall(num: ::c_long, ...) -> ::c_long;
pub fn sendfile(out_fd: ::c_int,
in_fd: ::c_int,
offset: *mut off_t,
count: ::size_t) -> ::ssize_t;
pub fn splice(fd_in: ::c_int,
off_in: *mut ::loff_t,
fd_out: ::c_int,
off_out: *mut ::loff_t,
len: ::size_t,
flags: ::c_uint) -> ::ssize_t;
pub fn tee(fd_in: ::c_int,
fd_out: ::c_int,
len: ::size_t,
flags: ::c_uint) -> ::ssize_t;
pub fn vmsplice(fd: ::c_int,
iov: *const ::iovec,
nr_segs: ::size_t,
flags: ::c_uint) -> ::ssize_t;
pub fn posix_fadvise(fd: ::c_int, offset: ::off_t, len: ::off_t,
advise: ::c_int) -> ::c_int;
pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int;
pub fn utimensat(dirfd: ::c_int, path: *const ::c_char,
times: *const ::timespec, flag: ::c_int) -> ::c_int;
pub fn duplocale(base: ::locale_t) -> ::locale_t;
pub fn freelocale(loc: ::locale_t);
pub fn newlocale(mask: ::c_int,
locale: *const ::c_char,
base: ::locale_t) -> ::locale_t;
pub fn uselocale(loc: ::locale_t) -> ::locale_t;
pub fn creat64(path: *const c_char, mode: mode_t) -> ::c_int;
pub fn fstat64(fildes: ::c_int, buf: *mut stat64) -> ::c_int;
pub fn ftruncate64(fd: ::c_int, length: off64_t) -> ::c_int;
pub fn getrlimit64(resource: ::c_int, rlim: *mut rlimit64) -> ::c_int;
pub fn lseek64(fd: ::c_int, offset: off64_t, whence: ::c_int) -> off64_t;
pub fn lstat64(path: *const c_char, buf: *mut stat64) -> ::c_int;
pub fn mmap64(addr: *mut ::c_void,
len: ::size_t,
prot: ::c_int,
flags: ::c_int,
fd: ::c_int,
offset: off64_t)
-> *mut ::c_void;
pub fn open64(path: *const c_char, oflag: ::c_int, ...) -> ::c_int;
pub fn pread64(fd: ::c_int, buf: *mut ::c_void, count: ::size_t,
offset: off64_t) -> ::ssize_t;
pub fn pwrite64(fd: ::c_int, buf: *const ::c_void, count: ::size_t,
offset: off64_t) -> ::ssize_t;
pub fn readdir64_r(dirp: *mut ::DIR, entry: *mut ::dirent64,
result: *mut *mut ::dirent64) -> ::c_int;
pub fn setrlimit64(resource: ::c_int, rlim: *const rlimit64) -> ::c_int;
pub fn stat64(path: *const c_char, buf: *mut stat64) -> ::c_int;
pub fn eventfd(init: ::c_uint, flags: ::c_int) -> ::c_int;
pub fn sysinfo (info: *mut ::sysinfo) -> ::c_int;
pub fn fdopendir(fd: ::c_int) -> *mut ::DIR;
pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char,
mode: ::mode_t, dev: dev_t) -> ::c_int;
pub fn ppoll(fds: *mut ::pollfd,
nfds: nfds_t,
timeout: *const ::timespec,
sigmask: *const sigset_t) -> ::c_int;
pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t,
clock_id: *mut clockid_t) -> ::c_int;
pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t,
clock_id: clockid_t) -> ::c_int;
pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t,
pshared: ::c_int) -> ::c_int;
pub fn pthread_condattr_getpshared(attr: *const pthread_condattr_t,
pshared: *mut ::c_int) -> ::c_int;
pub fn sched_getaffinity(pid: ::pid_t,
cpusetsize: ::size_t,
cpuset: *mut cpu_set_t) -> ::c_int;
pub fn sched_setaffinity(pid: ::pid_t,
cpusetsize: ::size_t,
cpuset: *const cpu_set_t) -> ::c_int;
pub fn unshare(flags: ::c_int) -> ::c_int;
pub fn setns(fd: ::c_int, nstype: ::c_int) -> ::c_int;
pub fn sem_timedwait(sem: *mut sem_t,
abstime: *const ::timespec) -> ::c_int;
pub fn accept4(fd: ::c_int, addr: *mut ::sockaddr, len: *mut ::socklen_t,
flg: ::c_int) -> ::c_int;
pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t,
abstime: *const ::timespec) -> ::c_int;
pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t,
pshared: ::c_int) -> ::c_int;
pub fn pthread_mutexattr_getpshared(attr: *const pthread_mutexattr_t,
pshared: *mut ::c_int) -> ::c_int;
pub fn ptsname_r(fd: ::c_int,
buf: *mut ::c_char,
buflen: ::size_t) -> ::c_int;
pub fn clearenv() -> ::c_int;
pub fn waitid(idtype: idtype_t, id: id_t, infop: *mut ::siginfo_t,
options: ::c_int) -> ::c_int;
}
cfg_if! {
if #[cfg(any(target_os = "linux",
target_os = "emscripten",
target_os = "fuchsia"))] {
mod linux;
pub use self::linux::*;
} else if #[cfg(target_os = "android")] {
mod android;
pub use self::android::*;
} else {
// Unknown target_os
}
}
add truncate64 on linux
use dox::mem;
pub type sa_family_t = u16;
pub type pthread_key_t = ::c_uint;
pub type speed_t = ::c_uint;
pub type tcflag_t = ::c_uint;
pub type loff_t = ::c_longlong;
pub type clockid_t = ::c_int;
pub type key_t = ::c_int;
pub type id_t = ::c_uint;
pub enum timezone {}
s! {
pub struct sockaddr {
pub sa_family: sa_family_t,
pub sa_data: [::c_char; 14],
}
pub struct sockaddr_in {
pub sin_family: sa_family_t,
pub sin_port: ::in_port_t,
pub sin_addr: ::in_addr,
pub sin_zero: [u8; 8],
}
pub struct sockaddr_in6 {
pub sin6_family: sa_family_t,
pub sin6_port: ::in_port_t,
pub sin6_flowinfo: u32,
pub sin6_addr: ::in6_addr,
pub sin6_scope_id: u32,
}
pub struct sockaddr_un {
pub sun_family: sa_family_t,
pub sun_path: [::c_char; 108]
}
pub struct sockaddr_storage {
pub ss_family: sa_family_t,
__ss_align: ::size_t,
#[cfg(target_pointer_width = "32")]
__ss_pad2: [u8; 128 - 2 * 4],
#[cfg(target_pointer_width = "64")]
__ss_pad2: [u8; 128 - 2 * 8],
}
pub struct addrinfo {
pub ai_flags: ::c_int,
pub ai_family: ::c_int,
pub ai_socktype: ::c_int,
pub ai_protocol: ::c_int,
pub ai_addrlen: socklen_t,
#[cfg(any(target_os = "linux",
target_os = "emscripten",
target_os = "fuchsia"))]
pub ai_addr: *mut ::sockaddr,
pub ai_canonname: *mut c_char,
#[cfg(target_os = "android")]
pub ai_addr: *mut ::sockaddr,
pub ai_next: *mut addrinfo,
}
pub struct sockaddr_nl {
pub nl_family: ::sa_family_t,
nl_pad: ::c_ushort,
pub nl_pid: u32,
pub nl_groups: u32
}
pub struct sockaddr_ll {
pub sll_family: ::c_ushort,
pub sll_protocol: ::c_ushort,
pub sll_ifindex: ::c_int,
pub sll_hatype: ::c_ushort,
pub sll_pkttype: ::c_uchar,
pub sll_halen: ::c_uchar,
pub sll_addr: [::c_uchar; 8]
}
pub struct fd_set {
fds_bits: [::c_ulong; FD_SETSIZE / ULONG_SIZE],
}
pub struct tm {
pub tm_sec: ::c_int,
pub tm_min: ::c_int,
pub tm_hour: ::c_int,
pub tm_mday: ::c_int,
pub tm_mon: ::c_int,
pub tm_year: ::c_int,
pub tm_wday: ::c_int,
pub tm_yday: ::c_int,
pub tm_isdst: ::c_int,
pub tm_gmtoff: ::c_long,
pub tm_zone: *const ::c_char,
}
pub struct sched_param {
pub sched_priority: ::c_int,
#[cfg(any(target_env = "musl"))]
pub sched_ss_low_priority: ::c_int,
#[cfg(any(target_env = "musl"))]
pub sched_ss_repl_period: ::timespec,
#[cfg(any(target_env = "musl"))]
pub sched_ss_init_budget: ::timespec,
#[cfg(any(target_env = "musl"))]
pub sched_ss_max_repl: ::c_int,
}
pub struct Dl_info {
pub dli_fname: *const ::c_char,
pub dli_fbase: *mut ::c_void,
pub dli_sname: *const ::c_char,
pub dli_saddr: *mut ::c_void,
}
#[cfg_attr(any(all(target_arch = "x86",
not(target_env = "musl"),
not(target_os = "android")),
target_arch = "x86_64"),
repr(packed))]
pub struct epoll_event {
pub events: ::uint32_t,
pub u64: ::uint64_t,
}
pub struct utsname {
pub sysname: [::c_char; 65],
pub nodename: [::c_char; 65],
pub release: [::c_char; 65],
pub version: [::c_char; 65],
pub machine: [::c_char; 65],
pub domainname: [::c_char; 65]
}
pub struct lconv {
pub decimal_point: *mut ::c_char,
pub thousands_sep: *mut ::c_char,
pub grouping: *mut ::c_char,
pub int_curr_symbol: *mut ::c_char,
pub currency_symbol: *mut ::c_char,
pub mon_decimal_point: *mut ::c_char,
pub mon_thousands_sep: *mut ::c_char,
pub mon_grouping: *mut ::c_char,
pub positive_sign: *mut ::c_char,
pub negative_sign: *mut ::c_char,
pub int_frac_digits: ::c_char,
pub frac_digits: ::c_char,
pub p_cs_precedes: ::c_char,
pub p_sep_by_space: ::c_char,
pub n_cs_precedes: ::c_char,
pub n_sep_by_space: ::c_char,
pub p_sign_posn: ::c_char,
pub n_sign_posn: ::c_char,
pub int_p_cs_precedes: ::c_char,
pub int_p_sep_by_space: ::c_char,
pub int_n_cs_precedes: ::c_char,
pub int_n_sep_by_space: ::c_char,
pub int_p_sign_posn: ::c_char,
pub int_n_sign_posn: ::c_char,
}
pub struct sigevent {
pub sigev_value: ::sigval,
pub sigev_signo: ::c_int,
pub sigev_notify: ::c_int,
// Actually a union. We only expose sigev_notify_thread_id because it's
// the most useful member
pub sigev_notify_thread_id: ::c_int,
#[cfg(target_pointer_width = "64")]
__unused1: [::c_int; 11],
#[cfg(target_pointer_width = "32")]
__unused1: [::c_int; 12]
}
}
// intentionally not public, only used for fd_set
cfg_if! {
if #[cfg(target_pointer_width = "32")] {
const ULONG_SIZE: usize = 32;
} else if #[cfg(target_pointer_width = "64")] {
const ULONG_SIZE: usize = 64;
} else {
// Unknown target_pointer_width
}
}
pub const EXIT_FAILURE: ::c_int = 1;
pub const EXIT_SUCCESS: ::c_int = 0;
pub const RAND_MAX: ::c_int = 2147483647;
pub const EOF: ::c_int = -1;
pub const SEEK_SET: ::c_int = 0;
pub const SEEK_CUR: ::c_int = 1;
pub const SEEK_END: ::c_int = 2;
pub const _IOFBF: ::c_int = 0;
pub const _IONBF: ::c_int = 2;
pub const _IOLBF: ::c_int = 1;
pub const F_DUPFD: ::c_int = 0;
pub const F_GETFD: ::c_int = 1;
pub const F_SETFD: ::c_int = 2;
pub const F_GETFL: ::c_int = 3;
pub const F_SETFL: ::c_int = 4;
// Linux-specific fcntls
pub const F_SETLEASE: ::c_int = 1024;
pub const F_GETLEASE: ::c_int = 1025;
pub const F_NOTIFY: ::c_int = 1026;
pub const F_DUPFD_CLOEXEC: ::c_int = 1030;
pub const F_SETPIPE_SZ: ::c_int = 1031;
pub const F_GETPIPE_SZ: ::c_int = 1032;
// TODO(#235): Include file sealing fcntls once we have a way to verify them.
pub const SIGTRAP: ::c_int = 5;
pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0;
pub const PTHREAD_CREATE_DETACHED: ::c_int = 1;
pub const CLOCK_REALTIME: clockid_t = 0;
pub const CLOCK_MONOTONIC: clockid_t = 1;
pub const CLOCK_PROCESS_CPUTIME_ID: clockid_t = 2;
pub const CLOCK_THREAD_CPUTIME_ID: clockid_t = 3;
pub const CLOCK_MONOTONIC_RAW: clockid_t = 4;
pub const CLOCK_REALTIME_COARSE: clockid_t = 5;
pub const CLOCK_MONOTONIC_COARSE: clockid_t = 6;
pub const CLOCK_BOOTTIME: clockid_t = 7;
pub const CLOCK_REALTIME_ALARM: clockid_t = 8;
pub const CLOCK_BOOTTIME_ALARM: clockid_t = 9;
// TODO(#247) Someday our Travis shall have glibc 2.21 (released in Sep
// 2014.) See also musl/mod.rs
// pub const CLOCK_SGI_CYCLE: clockid_t = 10;
// pub const CLOCK_TAI: clockid_t = 11;
pub const TIMER_ABSTIME: ::c_int = 1;
pub const RLIMIT_CPU: ::c_int = 0;
pub const RLIMIT_FSIZE: ::c_int = 1;
pub const RLIMIT_DATA: ::c_int = 2;
pub const RLIMIT_STACK: ::c_int = 3;
pub const RLIMIT_CORE: ::c_int = 4;
pub const RLIMIT_LOCKS: ::c_int = 10;
pub const RLIMIT_SIGPENDING: ::c_int = 11;
pub const RLIMIT_MSGQUEUE: ::c_int = 12;
pub const RLIMIT_NICE: ::c_int = 13;
pub const RLIMIT_RTPRIO: ::c_int = 14;
pub const RUSAGE_SELF: ::c_int = 0;
pub const O_RDONLY: ::c_int = 0;
pub const O_WRONLY: ::c_int = 1;
pub const O_RDWR: ::c_int = 2;
pub const O_TMPFILE: ::c_int = 0o20000000 | O_DIRECTORY;
pub const SOCK_CLOEXEC: ::c_int = O_CLOEXEC;
pub const S_IFIFO: ::mode_t = 4096;
pub const S_IFCHR: ::mode_t = 8192;
pub const S_IFBLK: ::mode_t = 24576;
pub const S_IFDIR: ::mode_t = 16384;
pub const S_IFREG: ::mode_t = 32768;
pub const S_IFLNK: ::mode_t = 40960;
pub const S_IFSOCK: ::mode_t = 49152;
pub const S_IFMT: ::mode_t = 61440;
pub const S_IRWXU: ::mode_t = 448;
pub const S_IXUSR: ::mode_t = 64;
pub const S_IWUSR: ::mode_t = 128;
pub const S_IRUSR: ::mode_t = 256;
pub const S_IRWXG: ::mode_t = 56;
pub const S_IXGRP: ::mode_t = 8;
pub const S_IWGRP: ::mode_t = 16;
pub const S_IRGRP: ::mode_t = 32;
pub const S_IRWXO: ::mode_t = 7;
pub const S_IXOTH: ::mode_t = 1;
pub const S_IWOTH: ::mode_t = 2;
pub const S_IROTH: ::mode_t = 4;
pub const F_OK: ::c_int = 0;
pub const R_OK: ::c_int = 4;
pub const W_OK: ::c_int = 2;
pub const X_OK: ::c_int = 1;
pub const STDIN_FILENO: ::c_int = 0;
pub const STDOUT_FILENO: ::c_int = 1;
pub const STDERR_FILENO: ::c_int = 2;
pub const SIGHUP: ::c_int = 1;
pub const SIGINT: ::c_int = 2;
pub const SIGQUIT: ::c_int = 3;
pub const SIGILL: ::c_int = 4;
pub const SIGABRT: ::c_int = 6;
pub const SIGFPE: ::c_int = 8;
pub const SIGKILL: ::c_int = 9;
pub const SIGSEGV: ::c_int = 11;
pub const SIGPIPE: ::c_int = 13;
pub const SIGALRM: ::c_int = 14;
pub const SIGTERM: ::c_int = 15;
pub const PROT_NONE: ::c_int = 0;
pub const PROT_READ: ::c_int = 1;
pub const PROT_WRITE: ::c_int = 2;
pub const PROT_EXEC: ::c_int = 4;
pub const LC_CTYPE: ::c_int = 0;
pub const LC_NUMERIC: ::c_int = 1;
pub const LC_TIME: ::c_int = 2;
pub const LC_COLLATE: ::c_int = 3;
pub const LC_MONETARY: ::c_int = 4;
pub const LC_MESSAGES: ::c_int = 5;
pub const LC_ALL: ::c_int = 6;
pub const LC_CTYPE_MASK: ::c_int = (1 << LC_CTYPE);
pub const LC_NUMERIC_MASK: ::c_int = (1 << LC_NUMERIC);
pub const LC_TIME_MASK: ::c_int = (1 << LC_TIME);
pub const LC_COLLATE_MASK: ::c_int = (1 << LC_COLLATE);
pub const LC_MONETARY_MASK: ::c_int = (1 << LC_MONETARY);
pub const LC_MESSAGES_MASK: ::c_int = (1 << LC_MESSAGES);
// LC_ALL_MASK defined per platform
pub const MAP_FILE: ::c_int = 0x0000;
pub const MAP_SHARED: ::c_int = 0x0001;
pub const MAP_PRIVATE: ::c_int = 0x0002;
pub const MAP_FIXED: ::c_int = 0x0010;
pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void;
// MS_ flags for msync(2)
pub const MS_ASYNC: ::c_int = 0x0001;
pub const MS_INVALIDATE: ::c_int = 0x0002;
pub const MS_SYNC: ::c_int = 0x0004;
// MS_ flags for mount(2)
pub const MS_RDONLY: ::c_ulong = 0x01;
pub const MS_NOSUID: ::c_ulong = 0x02;
pub const MS_NODEV: ::c_ulong = 0x04;
pub const MS_NOEXEC: ::c_ulong = 0x08;
pub const MS_SYNCHRONOUS: ::c_ulong = 0x10;
pub const MS_REMOUNT: ::c_ulong = 0x20;
pub const MS_MANDLOCK: ::c_ulong = 0x40;
pub const MS_DIRSYNC: ::c_ulong = 0x80;
pub const MS_NOATIME: ::c_ulong = 0x0400;
pub const MS_NODIRATIME: ::c_ulong = 0x0800;
pub const MS_BIND: ::c_ulong = 0x1000;
pub const MS_MOVE: ::c_ulong = 0x2000;
pub const MS_REC: ::c_ulong = 0x4000;
pub const MS_SILENT: ::c_ulong = 0x8000;
pub const MS_POSIXACL: ::c_ulong = 0x010000;
pub const MS_UNBINDABLE: ::c_ulong = 0x020000;
pub const MS_PRIVATE: ::c_ulong = 0x040000;
pub const MS_SLAVE: ::c_ulong = 0x080000;
pub const MS_SHARED: ::c_ulong = 0x100000;
pub const MS_RELATIME: ::c_ulong = 0x200000;
pub const MS_KERNMOUNT: ::c_ulong = 0x400000;
pub const MS_I_VERSION: ::c_ulong = 0x800000;
pub const MS_STRICTATIME: ::c_ulong = 0x1000000;
pub const MS_ACTIVE: ::c_ulong = 0x40000000;
pub const MS_NOUSER: ::c_ulong = 0x80000000;
pub const MS_MGC_VAL: ::c_ulong = 0xc0ed0000;
pub const MS_MGC_MSK: ::c_ulong = 0xffff0000;
pub const MS_RMT_MASK: ::c_ulong = 0x800051;
pub const EPERM: ::c_int = 1;
pub const ENOENT: ::c_int = 2;
pub const ESRCH: ::c_int = 3;
pub const EINTR: ::c_int = 4;
pub const EIO: ::c_int = 5;
pub const ENXIO: ::c_int = 6;
pub const E2BIG: ::c_int = 7;
pub const ENOEXEC: ::c_int = 8;
pub const EBADF: ::c_int = 9;
pub const ECHILD: ::c_int = 10;
pub const EAGAIN: ::c_int = 11;
pub const ENOMEM: ::c_int = 12;
pub const EACCES: ::c_int = 13;
pub const EFAULT: ::c_int = 14;
pub const ENOTBLK: ::c_int = 15;
pub const EBUSY: ::c_int = 16;
pub const EEXIST: ::c_int = 17;
pub const EXDEV: ::c_int = 18;
pub const ENODEV: ::c_int = 19;
pub const ENOTDIR: ::c_int = 20;
pub const EISDIR: ::c_int = 21;
pub const EINVAL: ::c_int = 22;
pub const ENFILE: ::c_int = 23;
pub const EMFILE: ::c_int = 24;
pub const ENOTTY: ::c_int = 25;
pub const ETXTBSY: ::c_int = 26;
pub const EFBIG: ::c_int = 27;
pub const ENOSPC: ::c_int = 28;
pub const ESPIPE: ::c_int = 29;
pub const EROFS: ::c_int = 30;
pub const EMLINK: ::c_int = 31;
pub const EPIPE: ::c_int = 32;
pub const EDOM: ::c_int = 33;
pub const ERANGE: ::c_int = 34;
pub const EWOULDBLOCK: ::c_int = EAGAIN;
pub const SCM_RIGHTS: ::c_int = 0x01;
pub const SCM_CREDENTIALS: ::c_int = 0x02;
pub const IPPROTO_RAW: ::c_int = 255;
pub const PROT_GROWSDOWN: ::c_int = 0x1000000;
pub const PROT_GROWSUP: ::c_int = 0x2000000;
pub const MAP_TYPE: ::c_int = 0x000f;
pub const MADV_NORMAL: ::c_int = 0;
pub const MADV_RANDOM: ::c_int = 1;
pub const MADV_SEQUENTIAL: ::c_int = 2;
pub const MADV_WILLNEED: ::c_int = 3;
pub const MADV_DONTNEED: ::c_int = 4;
pub const MADV_REMOVE: ::c_int = 9;
pub const MADV_DONTFORK: ::c_int = 10;
pub const MADV_DOFORK: ::c_int = 11;
pub const MADV_MERGEABLE: ::c_int = 12;
pub const MADV_UNMERGEABLE: ::c_int = 13;
pub const MADV_HWPOISON: ::c_int = 100;
pub const IFF_UP: ::c_int = 0x1;
pub const IFF_BROADCAST: ::c_int = 0x2;
pub const IFF_DEBUG: ::c_int = 0x4;
pub const IFF_LOOPBACK: ::c_int = 0x8;
pub const IFF_POINTOPOINT: ::c_int = 0x10;
pub const IFF_NOTRAILERS: ::c_int = 0x20;
pub const IFF_RUNNING: ::c_int = 0x40;
pub const IFF_NOARP: ::c_int = 0x80;
pub const IFF_PROMISC: ::c_int = 0x100;
pub const IFF_ALLMULTI: ::c_int = 0x200;
pub const IFF_MASTER: ::c_int = 0x400;
pub const IFF_SLAVE: ::c_int = 0x800;
pub const IFF_MULTICAST: ::c_int = 0x1000;
pub const IFF_PORTSEL: ::c_int = 0x2000;
pub const IFF_AUTOMEDIA: ::c_int = 0x4000;
pub const IFF_DYNAMIC: ::c_int = 0x8000;
pub const SOL_IP: ::c_int = 0;
pub const SOL_TCP: ::c_int = 6;
pub const SOL_IPV6: ::c_int = 41;
pub const SOL_ICMPV6: ::c_int = 58;
pub const SOL_RAW: ::c_int = 255;
pub const SOL_DECNET: ::c_int = 261;
pub const SOL_X25: ::c_int = 262;
pub const SOL_PACKET: ::c_int = 263;
pub const SOL_ATM: ::c_int = 264;
pub const SOL_AAL: ::c_int = 265;
pub const SOL_IRDA: ::c_int = 266;
pub const SOL_NETBEUI: ::c_int = 267;
pub const SOL_LLC: ::c_int = 268;
pub const SOL_DCCP: ::c_int = 269;
pub const SOL_NETLINK: ::c_int = 270;
pub const SOL_TIPC: ::c_int = 271;
pub const AF_UNSPEC: ::c_int = 0;
pub const AF_UNIX: ::c_int = 1;
pub const AF_LOCAL: ::c_int = 1;
pub const AF_INET: ::c_int = 2;
pub const AF_AX25: ::c_int = 3;
pub const AF_IPX: ::c_int = 4;
pub const AF_APPLETALK: ::c_int = 5;
pub const AF_NETROM: ::c_int = 6;
pub const AF_BRIDGE: ::c_int = 7;
pub const AF_ATMPVC: ::c_int = 8;
pub const AF_X25: ::c_int = 9;
pub const AF_INET6: ::c_int = 10;
pub const AF_ROSE: ::c_int = 11;
pub const AF_DECnet: ::c_int = 12;
pub const AF_NETBEUI: ::c_int = 13;
pub const AF_SECURITY: ::c_int = 14;
pub const AF_KEY: ::c_int = 15;
pub const AF_NETLINK: ::c_int = 16;
pub const AF_ROUTE: ::c_int = AF_NETLINK;
pub const AF_PACKET: ::c_int = 17;
pub const AF_ASH: ::c_int = 18;
pub const AF_ECONET: ::c_int = 19;
pub const AF_ATMSVC: ::c_int = 20;
pub const AF_RDS: ::c_int = 21;
pub const AF_SNA: ::c_int = 22;
pub const AF_IRDA: ::c_int = 23;
pub const AF_PPPOX: ::c_int = 24;
pub const AF_WANPIPE: ::c_int = 25;
pub const AF_LLC: ::c_int = 26;
pub const AF_CAN: ::c_int = 29;
pub const AF_TIPC: ::c_int = 30;
pub const AF_BLUETOOTH: ::c_int = 31;
pub const AF_IUCV: ::c_int = 32;
pub const AF_RXRPC: ::c_int = 33;
pub const AF_ISDN: ::c_int = 34;
pub const AF_PHONET: ::c_int = 35;
pub const AF_IEEE802154: ::c_int = 36;
pub const AF_CAIF: ::c_int = 37;
pub const AF_ALG: ::c_int = 38;
pub const PF_UNSPEC: ::c_int = AF_UNSPEC;
pub const PF_UNIX: ::c_int = AF_UNIX;
pub const PF_LOCAL: ::c_int = AF_LOCAL;
pub const PF_INET: ::c_int = AF_INET;
pub const PF_AX25: ::c_int = AF_AX25;
pub const PF_IPX: ::c_int = AF_IPX;
pub const PF_APPLETALK: ::c_int = AF_APPLETALK;
pub const PF_NETROM: ::c_int = AF_NETROM;
pub const PF_BRIDGE: ::c_int = AF_BRIDGE;
pub const PF_ATMPVC: ::c_int = AF_ATMPVC;
pub const PF_X25: ::c_int = AF_X25;
pub const PF_INET6: ::c_int = AF_INET6;
pub const PF_ROSE: ::c_int = AF_ROSE;
pub const PF_DECnet: ::c_int = AF_DECnet;
pub const PF_NETBEUI: ::c_int = AF_NETBEUI;
pub const PF_SECURITY: ::c_int = AF_SECURITY;
pub const PF_KEY: ::c_int = AF_KEY;
pub const PF_NETLINK: ::c_int = AF_NETLINK;
pub const PF_ROUTE: ::c_int = AF_ROUTE;
pub const PF_PACKET: ::c_int = AF_PACKET;
pub const PF_ASH: ::c_int = AF_ASH;
pub const PF_ECONET: ::c_int = AF_ECONET;
pub const PF_ATMSVC: ::c_int = AF_ATMSVC;
pub const PF_RDS: ::c_int = AF_RDS;
pub const PF_SNA: ::c_int = AF_SNA;
pub const PF_IRDA: ::c_int = AF_IRDA;
pub const PF_PPPOX: ::c_int = AF_PPPOX;
pub const PF_WANPIPE: ::c_int = AF_WANPIPE;
pub const PF_LLC: ::c_int = AF_LLC;
pub const PF_CAN: ::c_int = AF_CAN;
pub const PF_TIPC: ::c_int = AF_TIPC;
pub const PF_BLUETOOTH: ::c_int = AF_BLUETOOTH;
pub const PF_IUCV: ::c_int = AF_IUCV;
pub const PF_RXRPC: ::c_int = AF_RXRPC;
pub const PF_ISDN: ::c_int = AF_ISDN;
pub const PF_PHONET: ::c_int = AF_PHONET;
pub const PF_IEEE802154: ::c_int = AF_IEEE802154;
pub const PF_CAIF: ::c_int = AF_CAIF;
pub const PF_ALG: ::c_int = AF_ALG;
pub const SOMAXCONN: ::c_int = 128;
pub const MSG_OOB: ::c_int = 1;
pub const MSG_PEEK: ::c_int = 2;
pub const MSG_DONTROUTE: ::c_int = 4;
pub const MSG_CTRUNC: ::c_int = 8;
pub const MSG_TRUNC: ::c_int = 0x20;
pub const MSG_DONTWAIT: ::c_int = 0x40;
pub const MSG_EOR: ::c_int = 0x80;
pub const MSG_WAITALL: ::c_int = 0x100;
pub const MSG_FIN: ::c_int = 0x200;
pub const MSG_SYN: ::c_int = 0x400;
pub const MSG_CONFIRM: ::c_int = 0x800;
pub const MSG_RST: ::c_int = 0x1000;
pub const MSG_ERRQUEUE: ::c_int = 0x2000;
pub const MSG_NOSIGNAL: ::c_int = 0x4000;
pub const MSG_MORE: ::c_int = 0x8000;
pub const MSG_WAITFORONE: ::c_int = 0x10000;
pub const MSG_FASTOPEN: ::c_int = 0x20000000;
pub const MSG_CMSG_CLOEXEC: ::c_int = 0x40000000;
pub const SOCK_RAW: ::c_int = 3;
pub const IPPROTO_ICMP: ::c_int = 1;
pub const IPPROTO_ICMPV6: ::c_int = 58;
pub const IPPROTO_TCP: ::c_int = 6;
pub const IPPROTO_IP: ::c_int = 0;
pub const IPPROTO_IPV6: ::c_int = 41;
pub const IP_MULTICAST_TTL: ::c_int = 33;
pub const IP_MULTICAST_LOOP: ::c_int = 34;
pub const IP_TTL: ::c_int = 2;
pub const IP_HDRINCL: ::c_int = 3;
pub const IP_ADD_MEMBERSHIP: ::c_int = 35;
pub const IP_DROP_MEMBERSHIP: ::c_int = 36;
pub const IP_TRANSPARENT: ::c_int = 19;
pub const IPV6_ADD_MEMBERSHIP: ::c_int = 20;
pub const IPV6_DROP_MEMBERSHIP: ::c_int = 21;
pub const TCP_NODELAY: ::c_int = 1;
pub const TCP_MAXSEG: ::c_int = 2;
pub const TCP_CORK: ::c_int = 3;
pub const TCP_KEEPIDLE: ::c_int = 4;
pub const TCP_KEEPINTVL: ::c_int = 5;
pub const TCP_KEEPCNT: ::c_int = 6;
pub const TCP_SYNCNT: ::c_int = 7;
pub const TCP_LINGER2: ::c_int = 8;
pub const TCP_DEFER_ACCEPT: ::c_int = 9;
pub const TCP_WINDOW_CLAMP: ::c_int = 10;
pub const TCP_INFO: ::c_int = 11;
pub const TCP_QUICKACK: ::c_int = 12;
pub const TCP_CONGESTION: ::c_int = 13;
pub const IPV6_MULTICAST_LOOP: ::c_int = 19;
pub const IPV6_V6ONLY: ::c_int = 26;
pub const SO_DEBUG: ::c_int = 1;
pub const SHUT_RD: ::c_int = 0;
pub const SHUT_WR: ::c_int = 1;
pub const SHUT_RDWR: ::c_int = 2;
pub const LOCK_SH: ::c_int = 1;
pub const LOCK_EX: ::c_int = 2;
pub const LOCK_NB: ::c_int = 4;
pub const LOCK_UN: ::c_int = 8;
pub const SS_ONSTACK: ::c_int = 1;
pub const SS_DISABLE: ::c_int = 2;
pub const PATH_MAX: ::c_int = 4096;
pub const FD_SETSIZE: usize = 1024;
pub const EPOLLIN: ::c_int = 0x1;
pub const EPOLLPRI: ::c_int = 0x2;
pub const EPOLLOUT: ::c_int = 0x4;
pub const EPOLLRDNORM: ::c_int = 0x40;
pub const EPOLLRDBAND: ::c_int = 0x80;
pub const EPOLLWRNORM: ::c_int = 0x100;
pub const EPOLLWRBAND: ::c_int = 0x200;
pub const EPOLLMSG: ::c_int = 0x400;
pub const EPOLLERR: ::c_int = 0x8;
pub const EPOLLHUP: ::c_int = 0x10;
pub const EPOLLET: ::c_int = 0x80000000;
pub const EPOLL_CTL_ADD: ::c_int = 1;
pub const EPOLL_CTL_MOD: ::c_int = 3;
pub const EPOLL_CTL_DEL: ::c_int = 2;
pub const MNT_DETACH: ::c_int = 0x2;
pub const MNT_EXPIRE: ::c_int = 0x4;
pub const Q_GETFMT: ::c_int = 0x800004;
pub const Q_GETINFO: ::c_int = 0x800005;
pub const Q_SETINFO: ::c_int = 0x800006;
pub const QIF_BLIMITS: ::uint32_t = 1;
pub const QIF_SPACE: ::uint32_t = 2;
pub const QIF_ILIMITS: ::uint32_t = 4;
pub const QIF_INODES: ::uint32_t = 8;
pub const QIF_BTIME: ::uint32_t = 16;
pub const QIF_ITIME: ::uint32_t = 32;
pub const QIF_LIMITS: ::uint32_t = 5;
pub const QIF_USAGE: ::uint32_t = 10;
pub const QIF_TIMES: ::uint32_t = 48;
pub const QIF_ALL: ::uint32_t = 63;
pub const MNT_FORCE: ::c_int = 0x1;
pub const Q_SYNC: ::c_int = 0x800001;
pub const Q_QUOTAON: ::c_int = 0x800002;
pub const Q_QUOTAOFF: ::c_int = 0x800003;
pub const Q_GETQUOTA: ::c_int = 0x800007;
pub const Q_SETQUOTA: ::c_int = 0x800008;
pub const TCIOFF: ::c_int = 2;
pub const TCION: ::c_int = 3;
pub const TCOOFF: ::c_int = 0;
pub const TCOON: ::c_int = 1;
pub const TCIFLUSH: ::c_int = 0;
pub const TCOFLUSH: ::c_int = 1;
pub const TCIOFLUSH: ::c_int = 2;
pub const NL0: ::c_int = 0x00000000;
pub const NL1: ::c_int = 0x00000100;
pub const TAB0: ::c_int = 0x00000000;
pub const CR0: ::c_int = 0x00000000;
pub const FF0: ::c_int = 0x00000000;
pub const BS0: ::c_int = 0x00000000;
pub const VT0: ::c_int = 0x00000000;
pub const VERASE: usize = 2;
pub const VKILL: usize = 3;
pub const VINTR: usize = 0;
pub const VQUIT: usize = 1;
pub const VLNEXT: usize = 15;
pub const IGNBRK: ::tcflag_t = 0x00000001;
pub const BRKINT: ::tcflag_t = 0x00000002;
pub const IGNPAR: ::tcflag_t = 0x00000004;
pub const PARMRK: ::tcflag_t = 0x00000008;
pub const INPCK: ::tcflag_t = 0x00000010;
pub const ISTRIP: ::tcflag_t = 0x00000020;
pub const INLCR: ::tcflag_t = 0x00000040;
pub const IGNCR: ::tcflag_t = 0x00000080;
pub const ICRNL: ::tcflag_t = 0x00000100;
pub const IXANY: ::tcflag_t = 0x00000800;
pub const IMAXBEL: ::tcflag_t = 0x00002000;
pub const OPOST: ::tcflag_t = 0x1;
pub const CS5: ::tcflag_t = 0x00000000;
pub const CRTSCTS: ::tcflag_t = 0x80000000;
pub const ECHO: ::tcflag_t = 0x00000008;
pub const CLONE_VM: ::c_int = 0x100;
pub const CLONE_FS: ::c_int = 0x200;
pub const CLONE_FILES: ::c_int = 0x400;
pub const CLONE_SIGHAND: ::c_int = 0x800;
pub const CLONE_PTRACE: ::c_int = 0x2000;
pub const CLONE_VFORK: ::c_int = 0x4000;
pub const CLONE_PARENT: ::c_int = 0x8000;
pub const CLONE_THREAD: ::c_int = 0x10000;
pub const CLONE_NEWNS: ::c_int = 0x20000;
pub const CLONE_SYSVSEM: ::c_int = 0x40000;
pub const CLONE_SETTLS: ::c_int = 0x80000;
pub const CLONE_PARENT_SETTID: ::c_int = 0x100000;
pub const CLONE_CHILD_CLEARTID: ::c_int = 0x200000;
pub const CLONE_DETACHED: ::c_int = 0x400000;
pub const CLONE_UNTRACED: ::c_int = 0x800000;
pub const CLONE_CHILD_SETTID: ::c_int = 0x01000000;
pub const CLONE_NEWUTS: ::c_int = 0x04000000;
pub const CLONE_NEWIPC: ::c_int = 0x08000000;
pub const CLONE_NEWUSER: ::c_int = 0x10000000;
pub const CLONE_NEWPID: ::c_int = 0x20000000;
pub const CLONE_NEWNET: ::c_int = 0x40000000;
pub const CLONE_IO: ::c_int = 0x80000000;
pub const WNOHANG: ::c_int = 0x00000001;
pub const WUNTRACED: ::c_int = 0x00000002;
pub const WSTOPPED: ::c_int = WUNTRACED;
pub const WEXITED: ::c_int = 0x00000004;
pub const WCONTINUED: ::c_int = 0x00000008;
pub const WNOWAIT: ::c_int = 0x01000000;
pub const __WNOTHREAD: ::c_int = 0x20000000;
pub const __WALL: ::c_int = 0x40000000;
pub const __WCLONE: ::c_int = 0x80000000;
pub const SPLICE_F_MOVE: ::c_uint = 0x01;
pub const SPLICE_F_NONBLOCK: ::c_uint = 0x02;
pub const SPLICE_F_MORE: ::c_uint = 0x04;
pub const SPLICE_F_GIFT: ::c_uint = 0x08;
pub const RTLD_LOCAL: ::c_int = 0;
pub const POSIX_FADV_NORMAL: ::c_int = 0;
pub const POSIX_FADV_RANDOM: ::c_int = 1;
pub const POSIX_FADV_SEQUENTIAL: ::c_int = 2;
pub const POSIX_FADV_WILLNEED: ::c_int = 3;
pub const AT_FDCWD: ::c_int = -100;
pub const AT_SYMLINK_NOFOLLOW: ::c_int = 0x100;
pub const LOG_CRON: ::c_int = 9 << 3;
pub const LOG_AUTHPRIV: ::c_int = 10 << 3;
pub const LOG_FTP: ::c_int = 11 << 3;
pub const LOG_PERROR: ::c_int = 0x20;
pub const PIPE_BUF: usize = 4096;
pub const SI_LOAD_SHIFT: ::c_uint = 16;
pub const SIGEV_SIGNAL: ::c_int = 0;
pub const SIGEV_NONE: ::c_int = 1;
pub const SIGEV_THREAD: ::c_int = 2;
pub const P_ALL: idtype_t = 0;
pub const P_PID: idtype_t = 1;
pub const P_PGID: idtype_t = 2;
f! {
pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] &= !(1 << (fd % size));
return
}
pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool {
let fd = fd as usize;
let size = mem::size_of_val(&(*set).fds_bits[0]) * 8;
return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0
}
pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] |= 1 << (fd % size);
return
}
pub fn FD_ZERO(set: *mut fd_set) -> () {
for slot in (*set).fds_bits.iter_mut() {
*slot = 0;
}
}
pub fn WIFSTOPPED(status: ::c_int) -> bool {
(status & 0xff) == 0x7f
}
pub fn WSTOPSIG(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff
}
pub fn WIFCONTINUED(status: ::c_int) -> bool {
status == 0xffff
}
pub fn WIFSIGNALED(status: ::c_int) -> bool {
((status & 0x7f) + 1) as i8 >= 2
}
pub fn WTERMSIG(status: ::c_int) -> ::c_int {
status & 0x7f
}
pub fn WIFEXITED(status: ::c_int) -> bool {
(status & 0x7f) == 0
}
pub fn WEXITSTATUS(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff
}
pub fn WCOREDUMP(status: ::c_int) -> bool {
(status & 0x80) != 0
}
}
extern {
pub fn getpwnam_r(name: *const ::c_char,
pwd: *mut passwd,
buf: *mut ::c_char,
buflen: ::size_t,
result: *mut *mut passwd) -> ::c_int;
pub fn getpwuid_r(uid: ::uid_t,
pwd: *mut passwd,
buf: *mut ::c_char,
buflen: ::size_t,
result: *mut *mut passwd) -> ::c_int;
pub fn fdatasync(fd: ::c_int) -> ::c_int;
pub fn mincore(addr: *mut ::c_void, len: ::size_t,
vec: *mut ::c_uchar) -> ::c_int;
pub fn clock_getres(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
pub fn clock_gettime(clk_id: clockid_t, tp: *mut ::timespec) -> ::c_int;
pub fn clock_nanosleep(clk_id: clockid_t,
flags: ::c_int,
rqtp: *const ::timespec,
rmtp: *mut ::timespec) -> ::c_int;
pub fn clock_settime(clk_id: clockid_t, tp: *const ::timespec) -> ::c_int;
pub fn prctl(option: ::c_int, ...) -> ::c_int;
pub fn pthread_getattr_np(native: ::pthread_t,
attr: *mut ::pthread_attr_t) -> ::c_int;
pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t,
guardsize: *mut ::size_t) -> ::c_int;
pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t,
stackaddr: *mut *mut ::c_void,
stacksize: *mut ::size_t) -> ::c_int;
pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void;
pub fn setgroups(ngroups: ::size_t,
ptr: *const ::gid_t) -> ::c_int;
pub fn initgroups(user: *const ::c_char, group: ::gid_t) -> ::c_int;
pub fn sched_setscheduler(pid: ::pid_t,
policy: ::c_int,
param: *const sched_param) -> ::c_int;
pub fn sched_getscheduler(pid: ::pid_t) -> ::c_int;
pub fn sched_get_priority_max(policy: ::c_int) -> ::c_int;
pub fn sched_get_priority_min(policy: ::c_int) -> ::c_int;
pub fn epoll_create(size: ::c_int) -> ::c_int;
pub fn epoll_create1(flags: ::c_int) -> ::c_int;
pub fn epoll_ctl(epfd: ::c_int,
op: ::c_int,
fd: ::c_int,
event: *mut epoll_event) -> ::c_int;
pub fn epoll_wait(epfd: ::c_int,
events: *mut epoll_event,
maxevents: ::c_int,
timeout: ::c_int) -> ::c_int;
pub fn pipe2(fds: *mut ::c_int, flags: ::c_int) -> ::c_int;
pub fn mount(src: *const ::c_char,
target: *const ::c_char,
fstype: *const ::c_char,
flags: ::c_ulong,
data: *const ::c_void) -> ::c_int;
pub fn umount(target: *const ::c_char) -> ::c_int;
pub fn umount2(target: *const ::c_char, flags: ::c_int) -> ::c_int;
pub fn clone(cb: extern fn(*mut ::c_void) -> ::c_int,
child_stack: *mut ::c_void,
flags: ::c_int,
arg: *mut ::c_void, ...) -> ::c_int;
pub fn statfs(path: *const ::c_char, buf: *mut statfs) -> ::c_int;
pub fn fstatfs(fd: ::c_int, buf: *mut statfs) -> ::c_int;
pub fn memrchr(cx: *const ::c_void,
c: ::c_int,
n: ::size_t) -> *mut ::c_void;
pub fn syscall(num: ::c_long, ...) -> ::c_long;
pub fn sendfile(out_fd: ::c_int,
in_fd: ::c_int,
offset: *mut off_t,
count: ::size_t) -> ::ssize_t;
pub fn splice(fd_in: ::c_int,
off_in: *mut ::loff_t,
fd_out: ::c_int,
off_out: *mut ::loff_t,
len: ::size_t,
flags: ::c_uint) -> ::ssize_t;
pub fn tee(fd_in: ::c_int,
fd_out: ::c_int,
len: ::size_t,
flags: ::c_uint) -> ::ssize_t;
pub fn vmsplice(fd: ::c_int,
iov: *const ::iovec,
nr_segs: ::size_t,
flags: ::c_uint) -> ::ssize_t;
pub fn posix_fadvise(fd: ::c_int, offset: ::off_t, len: ::off_t,
advise: ::c_int) -> ::c_int;
pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int;
pub fn utimensat(dirfd: ::c_int, path: *const ::c_char,
times: *const ::timespec, flag: ::c_int) -> ::c_int;
pub fn duplocale(base: ::locale_t) -> ::locale_t;
pub fn freelocale(loc: ::locale_t);
pub fn newlocale(mask: ::c_int,
locale: *const ::c_char,
base: ::locale_t) -> ::locale_t;
pub fn uselocale(loc: ::locale_t) -> ::locale_t;
pub fn creat64(path: *const c_char, mode: mode_t) -> ::c_int;
pub fn fstat64(fildes: ::c_int, buf: *mut stat64) -> ::c_int;
pub fn ftruncate64(fd: ::c_int, length: off64_t) -> ::c_int;
pub fn getrlimit64(resource: ::c_int, rlim: *mut rlimit64) -> ::c_int;
pub fn lseek64(fd: ::c_int, offset: off64_t, whence: ::c_int) -> off64_t;
pub fn lstat64(path: *const c_char, buf: *mut stat64) -> ::c_int;
pub fn mmap64(addr: *mut ::c_void,
len: ::size_t,
prot: ::c_int,
flags: ::c_int,
fd: ::c_int,
offset: off64_t)
-> *mut ::c_void;
pub fn open64(path: *const c_char, oflag: ::c_int, ...) -> ::c_int;
pub fn pread64(fd: ::c_int, buf: *mut ::c_void, count: ::size_t,
offset: off64_t) -> ::ssize_t;
pub fn pwrite64(fd: ::c_int, buf: *const ::c_void, count: ::size_t,
offset: off64_t) -> ::ssize_t;
pub fn readdir64_r(dirp: *mut ::DIR, entry: *mut ::dirent64,
result: *mut *mut ::dirent64) -> ::c_int;
pub fn setrlimit64(resource: ::c_int, rlim: *const rlimit64) -> ::c_int;
pub fn stat64(path: *const c_char, buf: *mut stat64) -> ::c_int;
pub fn truncate64(path: *const c_char, length: off64_t) -> ::c_int;
pub fn eventfd(init: ::c_uint, flags: ::c_int) -> ::c_int;
pub fn sysinfo (info: *mut ::sysinfo) -> ::c_int;
pub fn fdopendir(fd: ::c_int) -> *mut ::DIR;
pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char,
mode: ::mode_t, dev: dev_t) -> ::c_int;
pub fn ppoll(fds: *mut ::pollfd,
nfds: nfds_t,
timeout: *const ::timespec,
sigmask: *const sigset_t) -> ::c_int;
pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t,
clock_id: *mut clockid_t) -> ::c_int;
pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t,
clock_id: clockid_t) -> ::c_int;
pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t,
pshared: ::c_int) -> ::c_int;
pub fn pthread_condattr_getpshared(attr: *const pthread_condattr_t,
pshared: *mut ::c_int) -> ::c_int;
pub fn sched_getaffinity(pid: ::pid_t,
cpusetsize: ::size_t,
cpuset: *mut cpu_set_t) -> ::c_int;
pub fn sched_setaffinity(pid: ::pid_t,
cpusetsize: ::size_t,
cpuset: *const cpu_set_t) -> ::c_int;
pub fn unshare(flags: ::c_int) -> ::c_int;
pub fn setns(fd: ::c_int, nstype: ::c_int) -> ::c_int;
pub fn sem_timedwait(sem: *mut sem_t,
abstime: *const ::timespec) -> ::c_int;
pub fn accept4(fd: ::c_int, addr: *mut ::sockaddr, len: *mut ::socklen_t,
flg: ::c_int) -> ::c_int;
pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t,
abstime: *const ::timespec) -> ::c_int;
pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t,
pshared: ::c_int) -> ::c_int;
pub fn pthread_mutexattr_getpshared(attr: *const pthread_mutexattr_t,
pshared: *mut ::c_int) -> ::c_int;
pub fn ptsname_r(fd: ::c_int,
buf: *mut ::c_char,
buflen: ::size_t) -> ::c_int;
pub fn clearenv() -> ::c_int;
pub fn waitid(idtype: idtype_t, id: id_t, infop: *mut ::siginfo_t,
options: ::c_int) -> ::c_int;
}
cfg_if! {
if #[cfg(any(target_os = "linux",
target_os = "emscripten",
target_os = "fuchsia"))] {
mod linux;
pub use self::linux::*;
} else if #[cfg(target_os = "android")] {
mod android;
pub use self::android::*;
} else {
// Unknown target_os
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.